1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62
63 #include "bnxt.h"
64 #include "bnxt_hwrm.h"
65 #include "bnxt_ulp.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77
78 #define BNXT_TX_TIMEOUT (5 * HZ)
79 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
80 NETIF_MSG_TX_ERR)
81
82 MODULE_IMPORT_NS("NETDEV_INTERNAL");
83 MODULE_LICENSE("GPL");
84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
85
86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
88
89 #define BNXT_TX_PUSH_THRESH 164
90
91 /* indexed by enum board_idx */
92 static const struct {
93 char *name;
94 } board_info[] = {
95 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
96 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
97 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
98 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
99 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
100 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
101 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
102 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
103 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
104 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
105 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
106 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
108 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
109 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
110 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
111 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
112 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
114 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
115 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
116 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
117 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
118 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
119 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
120 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
121 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
123 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
124 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
127 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
129 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
130 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
131 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
132 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
133 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
134 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
136 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
137 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
138 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
139 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
140 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
141 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
142 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
143 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
144 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
145 };
146
147 static const struct pci_device_id bnxt_pci_tbl[] = {
148 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
149 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
150 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
151 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
152 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
153 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
154 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
155 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
156 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
157 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
158 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
159 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
160 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
161 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
162 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
163 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
164 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
165 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
166 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
167 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
168 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
170 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
171 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
172 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
175 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
182 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
183 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
184 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
185 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
186 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
187 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
188 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
189 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
190 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
197 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
198 #ifdef CONFIG_BNXT_SRIOV
199 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
200 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
201 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
203 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
205 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
206 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
207 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
208 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
209 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
215 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
216 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
217 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
218 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
219 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
220 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
221 #endif
222 { 0 }
223 };
224
225 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
226
227 static const u16 bnxt_vf_req_snif[] = {
228 HWRM_FUNC_CFG,
229 HWRM_FUNC_VF_CFG,
230 HWRM_PORT_PHY_QCFG,
231 HWRM_CFA_L2_FILTER_ALLOC,
232 };
233
234 static const u16 bnxt_async_events_arr[] = {
235 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
236 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
237 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
238 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
239 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
240 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
241 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
242 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
243 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
244 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
245 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
246 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
247 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
248 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
249 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
250 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
251 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
252 };
253
254 const u16 bnxt_bstore_to_trace[] = {
255 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
256 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
257 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
258 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
259 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
260 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
261 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
262 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
263 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
264 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
265 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
266 };
267
268 static struct workqueue_struct *bnxt_pf_wq;
269
270 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
271 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
272 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
273
274 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
275 .ports = {
276 .src = 0,
277 .dst = 0,
278 },
279 .addrs = {
280 .v6addrs = {
281 .src = BNXT_IPV6_MASK_NONE,
282 .dst = BNXT_IPV6_MASK_NONE,
283 },
284 },
285 };
286
287 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
288 .ports = {
289 .src = cpu_to_be16(0xffff),
290 .dst = cpu_to_be16(0xffff),
291 },
292 .addrs = {
293 .v6addrs = {
294 .src = BNXT_IPV6_MASK_ALL,
295 .dst = BNXT_IPV6_MASK_ALL,
296 },
297 },
298 };
299
300 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
301 .ports = {
302 .src = cpu_to_be16(0xffff),
303 .dst = cpu_to_be16(0xffff),
304 },
305 .addrs = {
306 .v4addrs = {
307 .src = cpu_to_be32(0xffffffff),
308 .dst = cpu_to_be32(0xffffffff),
309 },
310 },
311 };
312
bnxt_vf_pciid(enum board_idx idx)313 static bool bnxt_vf_pciid(enum board_idx idx)
314 {
315 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
316 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
317 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
318 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
319 }
320
321 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
322 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
323
324 #define BNXT_DB_CQ(db, idx) \
325 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
326
327 #define BNXT_DB_NQ_P5(db, idx) \
328 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
329 (db)->doorbell)
330
331 #define BNXT_DB_NQ_P7(db, idx) \
332 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
333 DB_RING_IDX(db, idx), (db)->doorbell)
334
335 #define BNXT_DB_CQ_ARM(db, idx) \
336 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
337
338 #define BNXT_DB_NQ_ARM_P5(db, idx) \
339 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
340 DB_RING_IDX(db, idx), (db)->doorbell)
341
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)342 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
343 {
344 if (bp->flags & BNXT_FLAG_CHIP_P7)
345 BNXT_DB_NQ_P7(db, idx);
346 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
347 BNXT_DB_NQ_P5(db, idx);
348 else
349 BNXT_DB_CQ(db, idx);
350 }
351
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)352 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
353 {
354 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
355 BNXT_DB_NQ_ARM_P5(db, idx);
356 else
357 BNXT_DB_CQ_ARM(db, idx);
358 }
359
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)360 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
361 {
362 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
363 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
364 DB_RING_IDX(db, idx), db->doorbell);
365 else
366 BNXT_DB_CQ(db, idx);
367 }
368
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)369 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
370 {
371 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
372 return;
373
374 if (BNXT_PF(bp))
375 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
376 else
377 schedule_delayed_work(&bp->fw_reset_task, delay);
378 }
379
__bnxt_queue_sp_work(struct bnxt * bp)380 static void __bnxt_queue_sp_work(struct bnxt *bp)
381 {
382 if (BNXT_PF(bp))
383 queue_work(bnxt_pf_wq, &bp->sp_task);
384 else
385 schedule_work(&bp->sp_task);
386 }
387
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)388 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
389 {
390 set_bit(event, &bp->sp_event);
391 __bnxt_queue_sp_work(bp);
392 }
393
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)394 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
395 {
396 if (!rxr->bnapi->in_reset) {
397 rxr->bnapi->in_reset = true;
398 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
399 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
400 else
401 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
402 __bnxt_queue_sp_work(bp);
403 }
404 rxr->rx_next_cons = 0xffff;
405 }
406
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)407 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
408 u16 curr)
409 {
410 struct bnxt_napi *bnapi = txr->bnapi;
411
412 if (bnapi->tx_fault)
413 return;
414
415 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
416 txr->txq_index, txr->tx_hw_cons,
417 txr->tx_cons, txr->tx_prod, curr);
418 WARN_ON_ONCE(1);
419 bnapi->tx_fault = 1;
420 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
421 }
422
423 const u16 bnxt_lhint_arr[] = {
424 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
425 TX_BD_FLAGS_LHINT_512_TO_1023,
426 TX_BD_FLAGS_LHINT_1024_TO_2047,
427 TX_BD_FLAGS_LHINT_1024_TO_2047,
428 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
429 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
430 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
431 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
432 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
433 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 };
444
bnxt_xmit_get_cfa_action(struct sk_buff * skb)445 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
446 {
447 struct metadata_dst *md_dst = skb_metadata_dst(skb);
448
449 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
450 return 0;
451
452 return md_dst->u.port_info.port_id;
453 }
454
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)455 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
456 u16 prod)
457 {
458 /* Sync BD data before updating doorbell */
459 wmb();
460 bnxt_db_write(bp, &txr->tx_db, prod);
461 txr->kick_pending = 0;
462 }
463
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)464 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
465 {
466 struct bnxt *bp = netdev_priv(dev);
467 struct tx_bd *txbd, *txbd0;
468 struct tx_bd_ext *txbd1;
469 struct netdev_queue *txq;
470 int i;
471 dma_addr_t mapping;
472 unsigned int length, pad = 0;
473 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
474 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
475 struct pci_dev *pdev = bp->pdev;
476 u16 prod, last_frag, txts_prod;
477 struct bnxt_tx_ring_info *txr;
478 struct bnxt_sw_tx_bd *tx_buf;
479 __le32 lflags = 0;
480 skb_frag_t *frag;
481
482 i = skb_get_queue_mapping(skb);
483 if (unlikely(i >= bp->tx_nr_rings)) {
484 dev_kfree_skb_any(skb);
485 dev_core_stats_tx_dropped_inc(dev);
486 return NETDEV_TX_OK;
487 }
488
489 txq = netdev_get_tx_queue(dev, i);
490 txr = &bp->tx_ring[bp->tx_ring_map[i]];
491 prod = txr->tx_prod;
492
493 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
494 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
495 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
496 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
497 if (skb_linearize(skb)) {
498 dev_kfree_skb_any(skb);
499 dev_core_stats_tx_dropped_inc(dev);
500 return NETDEV_TX_OK;
501 }
502 }
503 #endif
504 free_size = bnxt_tx_avail(bp, txr);
505 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
506 /* We must have raced with NAPI cleanup */
507 if (net_ratelimit() && txr->kick_pending)
508 netif_warn(bp, tx_err, dev,
509 "bnxt: ring busy w/ flush pending!\n");
510 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
511 bp->tx_wake_thresh))
512 return NETDEV_TX_BUSY;
513 }
514
515 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
516 goto tx_free;
517
518 length = skb->len;
519 len = skb_headlen(skb);
520 last_frag = skb_shinfo(skb)->nr_frags;
521
522 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
523
524 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
525 tx_buf->skb = skb;
526 tx_buf->nr_frags = last_frag;
527
528 vlan_tag_flags = 0;
529 cfa_action = bnxt_xmit_get_cfa_action(skb);
530 if (skb_vlan_tag_present(skb)) {
531 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
532 skb_vlan_tag_get(skb);
533 /* Currently supports 8021Q, 8021AD vlan offloads
534 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
535 */
536 if (skb->vlan_proto == htons(ETH_P_8021Q))
537 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
538 }
539
540 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
541 ptp->tx_tstamp_en) {
542 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
543 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
544 tx_buf->is_ts_pkt = 1;
545 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
546 } else if (!skb_is_gso(skb)) {
547 u16 seq_id, hdr_off;
548
549 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
550 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
551 if (vlan_tag_flags)
552 hdr_off += VLAN_HLEN;
553 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
554 tx_buf->is_ts_pkt = 1;
555 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
556
557 ptp->txts_req[txts_prod].tx_seqid = seq_id;
558 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
559 tx_buf->txts_prod = txts_prod;
560 }
561 }
562 }
563 if (unlikely(skb->no_fcs))
564 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
565
566 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
567 skb_frags_readable(skb) && !lflags) {
568 struct tx_push_buffer *tx_push_buf = txr->tx_push;
569 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
570 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
571 void __iomem *db = txr->tx_db.doorbell;
572 void *pdata = tx_push_buf->data;
573 u64 *end;
574 int j, push_len;
575
576 /* Set COAL_NOW to be ready quickly for the next push */
577 tx_push->tx_bd_len_flags_type =
578 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
579 TX_BD_TYPE_LONG_TX_BD |
580 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
581 TX_BD_FLAGS_COAL_NOW |
582 TX_BD_FLAGS_PACKET_END |
583 TX_BD_CNT(2));
584
585 if (skb->ip_summed == CHECKSUM_PARTIAL)
586 tx_push1->tx_bd_hsize_lflags =
587 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
588 else
589 tx_push1->tx_bd_hsize_lflags = 0;
590
591 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
592 tx_push1->tx_bd_cfa_action =
593 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
594
595 end = pdata + length;
596 end = PTR_ALIGN(end, 8) - 1;
597 *end = 0;
598
599 skb_copy_from_linear_data(skb, pdata, len);
600 pdata += len;
601 for (j = 0; j < last_frag; j++) {
602 void *fptr;
603
604 frag = &skb_shinfo(skb)->frags[j];
605 fptr = skb_frag_address_safe(frag);
606 if (!fptr)
607 goto normal_tx;
608
609 memcpy(pdata, fptr, skb_frag_size(frag));
610 pdata += skb_frag_size(frag);
611 }
612
613 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
614 txbd->tx_bd_haddr = txr->data_mapping;
615 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
616 prod = NEXT_TX(prod);
617 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
618 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
619 memcpy(txbd, tx_push1, sizeof(*txbd));
620 prod = NEXT_TX(prod);
621 tx_push->doorbell =
622 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
623 DB_RING_IDX(&txr->tx_db, prod));
624 WRITE_ONCE(txr->tx_prod, prod);
625
626 tx_buf->is_push = 1;
627 netdev_tx_sent_queue(txq, skb->len);
628 wmb(); /* Sync is_push and byte queue before pushing data */
629
630 push_len = (length + sizeof(*tx_push) + 7) / 8;
631 if (push_len > 16) {
632 __iowrite64_copy(db, tx_push_buf, 16);
633 __iowrite32_copy(db + 4, tx_push_buf + 1,
634 (push_len - 16) << 1);
635 } else {
636 __iowrite64_copy(db, tx_push_buf, push_len);
637 }
638
639 goto tx_done;
640 }
641
642 normal_tx:
643 if (length < BNXT_MIN_PKT_SIZE) {
644 pad = BNXT_MIN_PKT_SIZE - length;
645 if (skb_pad(skb, pad))
646 /* SKB already freed. */
647 goto tx_kick_pending;
648 length = BNXT_MIN_PKT_SIZE;
649 }
650
651 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
652
653 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
654 goto tx_free;
655
656 dma_unmap_addr_set(tx_buf, mapping, mapping);
657 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
658 TX_BD_CNT(last_frag + 2);
659
660 txbd->tx_bd_haddr = cpu_to_le64(mapping);
661 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
662
663 prod = NEXT_TX(prod);
664 txbd1 = (struct tx_bd_ext *)
665 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
666
667 txbd1->tx_bd_hsize_lflags = lflags;
668 if (skb_is_gso(skb)) {
669 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
670 u32 hdr_len;
671
672 if (skb->encapsulation) {
673 if (udp_gso)
674 hdr_len = skb_inner_transport_offset(skb) +
675 sizeof(struct udphdr);
676 else
677 hdr_len = skb_inner_tcp_all_headers(skb);
678 } else if (udp_gso) {
679 hdr_len = skb_transport_offset(skb) +
680 sizeof(struct udphdr);
681 } else {
682 hdr_len = skb_tcp_all_headers(skb);
683 }
684
685 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
686 TX_BD_FLAGS_T_IPID |
687 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
688 length = skb_shinfo(skb)->gso_size;
689 txbd1->tx_bd_mss = cpu_to_le32(length);
690 length += hdr_len;
691 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
692 txbd1->tx_bd_hsize_lflags |=
693 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
694 txbd1->tx_bd_mss = 0;
695 }
696
697 length >>= 9;
698 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
699 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
700 skb->len);
701 i = 0;
702 goto tx_dma_error;
703 }
704 flags |= bnxt_lhint_arr[length];
705 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
706
707 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
708 txbd1->tx_bd_cfa_action =
709 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
710 txbd0 = txbd;
711 for (i = 0; i < last_frag; i++) {
712 frag = &skb_shinfo(skb)->frags[i];
713 prod = NEXT_TX(prod);
714 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
715
716 len = skb_frag_size(frag);
717 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
718 DMA_TO_DEVICE);
719
720 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
721 goto tx_dma_error;
722
723 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
724 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
725 mapping, mapping);
726
727 txbd->tx_bd_haddr = cpu_to_le64(mapping);
728
729 flags = len << TX_BD_LEN_SHIFT;
730 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
731 }
732
733 flags &= ~TX_BD_LEN;
734 txbd->tx_bd_len_flags_type =
735 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
736 TX_BD_FLAGS_PACKET_END);
737
738 netdev_tx_sent_queue(txq, skb->len);
739
740 skb_tx_timestamp(skb);
741
742 prod = NEXT_TX(prod);
743 WRITE_ONCE(txr->tx_prod, prod);
744
745 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
746 bnxt_txr_db_kick(bp, txr, prod);
747 } else {
748 if (free_size >= bp->tx_wake_thresh)
749 txbd0->tx_bd_len_flags_type |=
750 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
751 txr->kick_pending = 1;
752 }
753
754 tx_done:
755
756 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
757 if (netdev_xmit_more() && !tx_buf->is_push) {
758 txbd0->tx_bd_len_flags_type &=
759 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
760 bnxt_txr_db_kick(bp, txr, prod);
761 }
762
763 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
764 bp->tx_wake_thresh);
765 }
766 return NETDEV_TX_OK;
767
768 tx_dma_error:
769 last_frag = i;
770
771 /* start back at beginning and unmap skb */
772 prod = txr->tx_prod;
773 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
774 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
775 skb_headlen(skb), DMA_TO_DEVICE);
776 prod = NEXT_TX(prod);
777
778 /* unmap remaining mapped pages */
779 for (i = 0; i < last_frag; i++) {
780 prod = NEXT_TX(prod);
781 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
782 frag = &skb_shinfo(skb)->frags[i];
783 netmem_dma_unmap_page_attrs(&pdev->dev,
784 dma_unmap_addr(tx_buf, mapping),
785 skb_frag_size(frag),
786 DMA_TO_DEVICE, 0);
787 }
788
789 tx_free:
790 dev_kfree_skb_any(skb);
791 tx_kick_pending:
792 if (BNXT_TX_PTP_IS_SET(lflags)) {
793 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
794 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
795 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
796 /* set SKB to err so PTP worker will clean up */
797 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
798 }
799 if (txr->kick_pending)
800 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
801 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
802 dev_core_stats_tx_dropped_inc(dev);
803 return NETDEV_TX_OK;
804 }
805
806 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)807 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
808 int budget)
809 {
810 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
811 struct pci_dev *pdev = bp->pdev;
812 u16 hw_cons = txr->tx_hw_cons;
813 unsigned int tx_bytes = 0;
814 u16 cons = txr->tx_cons;
815 skb_frag_t *frag;
816 int tx_pkts = 0;
817 bool rc = false;
818
819 while (RING_TX(bp, cons) != hw_cons) {
820 struct bnxt_sw_tx_bd *tx_buf;
821 struct sk_buff *skb;
822 bool is_ts_pkt;
823 int j, last;
824
825 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
826 skb = tx_buf->skb;
827
828 if (unlikely(!skb)) {
829 bnxt_sched_reset_txr(bp, txr, cons);
830 return rc;
831 }
832
833 is_ts_pkt = tx_buf->is_ts_pkt;
834 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
835 rc = true;
836 break;
837 }
838
839 cons = NEXT_TX(cons);
840 tx_pkts++;
841 tx_bytes += skb->len;
842 tx_buf->skb = NULL;
843 tx_buf->is_ts_pkt = 0;
844
845 if (tx_buf->is_push) {
846 tx_buf->is_push = 0;
847 goto next_tx_int;
848 }
849
850 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
851 skb_headlen(skb), DMA_TO_DEVICE);
852 last = tx_buf->nr_frags;
853
854 for (j = 0; j < last; j++) {
855 frag = &skb_shinfo(skb)->frags[j];
856 cons = NEXT_TX(cons);
857 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
858 netmem_dma_unmap_page_attrs(&pdev->dev,
859 dma_unmap_addr(tx_buf,
860 mapping),
861 skb_frag_size(frag),
862 DMA_TO_DEVICE, 0);
863 }
864 if (unlikely(is_ts_pkt)) {
865 if (BNXT_CHIP_P5(bp)) {
866 /* PTP worker takes ownership of the skb */
867 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
868 skb = NULL;
869 }
870 }
871
872 next_tx_int:
873 cons = NEXT_TX(cons);
874
875 dev_consume_skb_any(skb);
876 }
877
878 WRITE_ONCE(txr->tx_cons, cons);
879
880 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
881 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
882 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
883
884 return rc;
885 }
886
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)887 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
888 {
889 struct bnxt_tx_ring_info *txr;
890 bool more = false;
891 int i;
892
893 bnxt_for_each_napi_tx(i, bnapi, txr) {
894 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
895 more |= __bnxt_tx_int(bp, txr, budget);
896 }
897 if (!more)
898 bnapi->events &= ~BNXT_TX_CMP_EVENT;
899 }
900
bnxt_separate_head_pool(struct bnxt_rx_ring_info * rxr)901 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
902 {
903 return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
904 }
905
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)906 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
907 struct bnxt_rx_ring_info *rxr,
908 unsigned int *offset,
909 gfp_t gfp)
910 {
911 struct page *page;
912
913 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
914 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
915 BNXT_RX_PAGE_SIZE);
916 } else {
917 page = page_pool_dev_alloc_pages(rxr->page_pool);
918 *offset = 0;
919 }
920 if (!page)
921 return NULL;
922
923 *mapping = page_pool_get_dma_addr(page) + *offset;
924 return page;
925 }
926
__bnxt_alloc_rx_netmem(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)927 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
928 struct bnxt_rx_ring_info *rxr,
929 gfp_t gfp)
930 {
931 netmem_ref netmem;
932
933 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
934 if (!netmem)
935 return 0;
936
937 *mapping = page_pool_get_dma_addr_netmem(netmem);
938 return netmem;
939 }
940
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)941 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
942 struct bnxt_rx_ring_info *rxr,
943 gfp_t gfp)
944 {
945 unsigned int offset;
946 struct page *page;
947
948 page = page_pool_alloc_frag(rxr->head_pool, &offset,
949 bp->rx_buf_size, gfp);
950 if (!page)
951 return NULL;
952
953 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
954 return page_address(page) + offset;
955 }
956
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)957 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
958 u16 prod, gfp_t gfp)
959 {
960 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
961 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
962 dma_addr_t mapping;
963
964 if (BNXT_RX_PAGE_MODE(bp)) {
965 unsigned int offset;
966 struct page *page =
967 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
968
969 if (!page)
970 return -ENOMEM;
971
972 mapping += bp->rx_dma_offset;
973 rx_buf->data = page;
974 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
975 } else {
976 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
977
978 if (!data)
979 return -ENOMEM;
980
981 rx_buf->data = data;
982 rx_buf->data_ptr = data + bp->rx_offset;
983 }
984 rx_buf->mapping = mapping;
985
986 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
987 return 0;
988 }
989
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)990 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
991 {
992 u16 prod = rxr->rx_prod;
993 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
994 struct bnxt *bp = rxr->bnapi->bp;
995 struct rx_bd *cons_bd, *prod_bd;
996
997 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
998 cons_rx_buf = &rxr->rx_buf_ring[cons];
999
1000 prod_rx_buf->data = data;
1001 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1002
1003 prod_rx_buf->mapping = cons_rx_buf->mapping;
1004
1005 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1006 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1007
1008 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1009 }
1010
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1011 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1012 {
1013 u16 next, max = rxr->rx_agg_bmap_size;
1014
1015 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1016 if (next >= max)
1017 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1018 return next;
1019 }
1020
bnxt_alloc_rx_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)1021 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1022 u16 prod, gfp_t gfp)
1023 {
1024 struct rx_bd *rxbd =
1025 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1026 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1027 u16 sw_prod = rxr->rx_sw_agg_prod;
1028 unsigned int offset = 0;
1029 dma_addr_t mapping;
1030 netmem_ref netmem;
1031
1032 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
1033 if (!netmem)
1034 return -ENOMEM;
1035
1036 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1037 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1038
1039 __set_bit(sw_prod, rxr->rx_agg_bmap);
1040 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1041 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1042
1043 rx_agg_buf->netmem = netmem;
1044 rx_agg_buf->offset = offset;
1045 rx_agg_buf->mapping = mapping;
1046 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1047 rxbd->rx_bd_opaque = sw_prod;
1048 return 0;
1049 }
1050
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1051 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1052 struct bnxt_cp_ring_info *cpr,
1053 u16 cp_cons, u16 curr)
1054 {
1055 struct rx_agg_cmp *agg;
1056
1057 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1058 agg = (struct rx_agg_cmp *)
1059 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1060 return agg;
1061 }
1062
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1063 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1064 struct bnxt_rx_ring_info *rxr,
1065 u16 agg_id, u16 curr)
1066 {
1067 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1068
1069 return &tpa_info->agg_arr[curr];
1070 }
1071
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1072 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1073 u16 start, u32 agg_bufs, bool tpa)
1074 {
1075 struct bnxt_napi *bnapi = cpr->bnapi;
1076 struct bnxt *bp = bnapi->bp;
1077 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1078 u16 prod = rxr->rx_agg_prod;
1079 u16 sw_prod = rxr->rx_sw_agg_prod;
1080 bool p5_tpa = false;
1081 u32 i;
1082
1083 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1084 p5_tpa = true;
1085
1086 for (i = 0; i < agg_bufs; i++) {
1087 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1088 struct rx_agg_cmp *agg;
1089 struct rx_bd *prod_bd;
1090 netmem_ref netmem;
1091 u16 cons;
1092
1093 if (p5_tpa)
1094 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1095 else
1096 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1097 cons = agg->rx_agg_cmp_opaque;
1098 __clear_bit(cons, rxr->rx_agg_bmap);
1099
1100 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1101 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1102
1103 __set_bit(sw_prod, rxr->rx_agg_bmap);
1104 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1105 cons_rx_buf = &rxr->rx_agg_ring[cons];
1106
1107 /* It is possible for sw_prod to be equal to cons, so
1108 * set cons_rx_buf->netmem to 0 first.
1109 */
1110 netmem = cons_rx_buf->netmem;
1111 cons_rx_buf->netmem = 0;
1112 prod_rx_buf->netmem = netmem;
1113 prod_rx_buf->offset = cons_rx_buf->offset;
1114
1115 prod_rx_buf->mapping = cons_rx_buf->mapping;
1116
1117 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1118
1119 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1120 prod_bd->rx_bd_opaque = sw_prod;
1121
1122 prod = NEXT_RX_AGG(prod);
1123 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1124 }
1125 rxr->rx_agg_prod = prod;
1126 rxr->rx_sw_agg_prod = sw_prod;
1127 }
1128
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1129 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1130 struct bnxt_rx_ring_info *rxr,
1131 u16 cons, void *data, u8 *data_ptr,
1132 dma_addr_t dma_addr,
1133 unsigned int offset_and_len)
1134 {
1135 unsigned int len = offset_and_len & 0xffff;
1136 struct page *page = data;
1137 u16 prod = rxr->rx_prod;
1138 struct sk_buff *skb;
1139 int err;
1140
1141 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1142 if (unlikely(err)) {
1143 bnxt_reuse_rx_data(rxr, cons, data);
1144 return NULL;
1145 }
1146 dma_addr -= bp->rx_dma_offset;
1147 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1148 bp->rx_dir);
1149 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1150 if (!skb) {
1151 page_pool_recycle_direct(rxr->page_pool, page);
1152 return NULL;
1153 }
1154 skb_mark_for_recycle(skb);
1155 skb_reserve(skb, bp->rx_offset);
1156 __skb_put(skb, len);
1157
1158 return skb;
1159 }
1160
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1161 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1162 struct bnxt_rx_ring_info *rxr,
1163 u16 cons, void *data, u8 *data_ptr,
1164 dma_addr_t dma_addr,
1165 unsigned int offset_and_len)
1166 {
1167 unsigned int payload = offset_and_len >> 16;
1168 unsigned int len = offset_and_len & 0xffff;
1169 skb_frag_t *frag;
1170 struct page *page = data;
1171 u16 prod = rxr->rx_prod;
1172 struct sk_buff *skb;
1173 int off, err;
1174
1175 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1176 if (unlikely(err)) {
1177 bnxt_reuse_rx_data(rxr, cons, data);
1178 return NULL;
1179 }
1180 dma_addr -= bp->rx_dma_offset;
1181 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1182 bp->rx_dir);
1183
1184 if (unlikely(!payload))
1185 payload = eth_get_headlen(bp->dev, data_ptr, len);
1186
1187 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1188 if (!skb) {
1189 page_pool_recycle_direct(rxr->page_pool, page);
1190 return NULL;
1191 }
1192
1193 skb_mark_for_recycle(skb);
1194 off = (void *)data_ptr - page_address(page);
1195 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1196 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1197 payload + NET_IP_ALIGN);
1198
1199 frag = &skb_shinfo(skb)->frags[0];
1200 skb_frag_size_sub(frag, payload);
1201 skb_frag_off_add(frag, payload);
1202 skb->data_len -= payload;
1203 skb->tail += payload;
1204
1205 return skb;
1206 }
1207
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1208 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1209 struct bnxt_rx_ring_info *rxr, u16 cons,
1210 void *data, u8 *data_ptr,
1211 dma_addr_t dma_addr,
1212 unsigned int offset_and_len)
1213 {
1214 u16 prod = rxr->rx_prod;
1215 struct sk_buff *skb;
1216 int err;
1217
1218 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1219 if (unlikely(err)) {
1220 bnxt_reuse_rx_data(rxr, cons, data);
1221 return NULL;
1222 }
1223
1224 skb = napi_build_skb(data, bp->rx_buf_size);
1225 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1226 bp->rx_dir);
1227 if (!skb) {
1228 page_pool_free_va(rxr->head_pool, data, true);
1229 return NULL;
1230 }
1231
1232 skb_mark_for_recycle(skb);
1233 skb_reserve(skb, bp->rx_offset);
1234 skb_put(skb, offset_and_len & 0xffff);
1235 return skb;
1236 }
1237
__bnxt_rx_agg_netmems(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs,bool tpa,struct sk_buff * skb,struct xdp_buff * xdp)1238 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1239 struct bnxt_cp_ring_info *cpr,
1240 u16 idx, u32 agg_bufs, bool tpa,
1241 struct sk_buff *skb,
1242 struct xdp_buff *xdp)
1243 {
1244 struct bnxt_napi *bnapi = cpr->bnapi;
1245 struct skb_shared_info *shinfo;
1246 struct bnxt_rx_ring_info *rxr;
1247 u32 i, total_frag_len = 0;
1248 bool p5_tpa = false;
1249 u16 prod;
1250
1251 rxr = bnapi->rx_ring;
1252 prod = rxr->rx_agg_prod;
1253
1254 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1255 p5_tpa = true;
1256
1257 if (skb)
1258 shinfo = skb_shinfo(skb);
1259 else
1260 shinfo = xdp_get_shared_info_from_buff(xdp);
1261
1262 for (i = 0; i < agg_bufs; i++) {
1263 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1264 struct rx_agg_cmp *agg;
1265 u16 cons, frag_len;
1266 netmem_ref netmem;
1267
1268 if (p5_tpa)
1269 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1270 else
1271 agg = bnxt_get_agg(bp, cpr, idx, i);
1272 cons = agg->rx_agg_cmp_opaque;
1273 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1274 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1275
1276 cons_rx_buf = &rxr->rx_agg_ring[cons];
1277 if (skb) {
1278 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1279 cons_rx_buf->offset,
1280 frag_len, BNXT_RX_PAGE_SIZE);
1281 } else {
1282 skb_frag_t *frag = &shinfo->frags[i];
1283
1284 skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1285 cons_rx_buf->offset,
1286 frag_len);
1287 shinfo->nr_frags = i + 1;
1288 }
1289 __clear_bit(cons, rxr->rx_agg_bmap);
1290
1291 /* It is possible for bnxt_alloc_rx_netmem() to allocate
1292 * a sw_prod index that equals the cons index, so we
1293 * need to clear the cons entry now.
1294 */
1295 netmem = cons_rx_buf->netmem;
1296 cons_rx_buf->netmem = 0;
1297
1298 if (xdp && netmem_is_pfmemalloc(netmem))
1299 xdp_buff_set_frag_pfmemalloc(xdp);
1300
1301 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1302 if (skb) {
1303 skb->len -= frag_len;
1304 skb->data_len -= frag_len;
1305 skb->truesize -= BNXT_RX_PAGE_SIZE;
1306 }
1307
1308 --shinfo->nr_frags;
1309 cons_rx_buf->netmem = netmem;
1310
1311 /* Update prod since possibly some netmems have been
1312 * allocated already.
1313 */
1314 rxr->rx_agg_prod = prod;
1315 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1316 return 0;
1317 }
1318
1319 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1320 BNXT_RX_PAGE_SIZE);
1321
1322 total_frag_len += frag_len;
1323 prod = NEXT_RX_AGG(prod);
1324 }
1325 rxr->rx_agg_prod = prod;
1326 return total_frag_len;
1327 }
1328
bnxt_rx_agg_netmems_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1329 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1330 struct bnxt_cp_ring_info *cpr,
1331 struct sk_buff *skb, u16 idx,
1332 u32 agg_bufs, bool tpa)
1333 {
1334 u32 total_frag_len = 0;
1335
1336 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1337 skb, NULL);
1338 if (!total_frag_len) {
1339 skb_mark_for_recycle(skb);
1340 dev_kfree_skb(skb);
1341 return NULL;
1342 }
1343
1344 return skb;
1345 }
1346
bnxt_rx_agg_netmems_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1347 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1348 struct bnxt_cp_ring_info *cpr,
1349 struct xdp_buff *xdp, u16 idx,
1350 u32 agg_bufs, bool tpa)
1351 {
1352 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1353 u32 total_frag_len = 0;
1354
1355 if (!xdp_buff_has_frags(xdp))
1356 shinfo->nr_frags = 0;
1357
1358 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1359 NULL, xdp);
1360 if (total_frag_len) {
1361 xdp_buff_set_frags_flag(xdp);
1362 shinfo->nr_frags = agg_bufs;
1363 shinfo->xdp_frags_size = total_frag_len;
1364 }
1365 return total_frag_len;
1366 }
1367
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1368 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1369 u8 agg_bufs, u32 *raw_cons)
1370 {
1371 u16 last;
1372 struct rx_agg_cmp *agg;
1373
1374 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1375 last = RING_CMP(*raw_cons);
1376 agg = (struct rx_agg_cmp *)
1377 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1378 return RX_AGG_CMP_VALID(agg, *raw_cons);
1379 }
1380
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1381 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1382 unsigned int len,
1383 dma_addr_t mapping)
1384 {
1385 struct bnxt *bp = bnapi->bp;
1386 struct pci_dev *pdev = bp->pdev;
1387 struct sk_buff *skb;
1388
1389 skb = napi_alloc_skb(&bnapi->napi, len);
1390 if (!skb)
1391 return NULL;
1392
1393 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1394 bp->rx_dir);
1395
1396 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1397 len + NET_IP_ALIGN);
1398
1399 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1400 bp->rx_dir);
1401
1402 skb_put(skb, len);
1403
1404 return skb;
1405 }
1406
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1407 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1408 unsigned int len,
1409 dma_addr_t mapping)
1410 {
1411 return bnxt_copy_data(bnapi, data, len, mapping);
1412 }
1413
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1414 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1415 struct xdp_buff *xdp,
1416 unsigned int len,
1417 dma_addr_t mapping)
1418 {
1419 unsigned int metasize = 0;
1420 u8 *data = xdp->data;
1421 struct sk_buff *skb;
1422
1423 len = xdp->data_end - xdp->data_meta;
1424 metasize = xdp->data - xdp->data_meta;
1425 data = xdp->data_meta;
1426
1427 skb = bnxt_copy_data(bnapi, data, len, mapping);
1428 if (!skb)
1429 return skb;
1430
1431 if (metasize) {
1432 skb_metadata_set(skb, metasize);
1433 __skb_pull(skb, metasize);
1434 }
1435
1436 return skb;
1437 }
1438
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1439 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1440 u32 *raw_cons, void *cmp)
1441 {
1442 struct rx_cmp *rxcmp = cmp;
1443 u32 tmp_raw_cons = *raw_cons;
1444 u8 cmp_type, agg_bufs = 0;
1445
1446 cmp_type = RX_CMP_TYPE(rxcmp);
1447
1448 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1449 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1450 RX_CMP_AGG_BUFS) >>
1451 RX_CMP_AGG_BUFS_SHIFT;
1452 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1453 struct rx_tpa_end_cmp *tpa_end = cmp;
1454
1455 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1456 return 0;
1457
1458 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1459 }
1460
1461 if (agg_bufs) {
1462 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1463 return -EBUSY;
1464 }
1465 *raw_cons = tmp_raw_cons;
1466 return 0;
1467 }
1468
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1469 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1470 {
1471 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1472 u16 idx = agg_id & MAX_TPA_P5_MASK;
1473
1474 if (test_bit(idx, map->agg_idx_bmap))
1475 idx = find_first_zero_bit(map->agg_idx_bmap,
1476 BNXT_AGG_IDX_BMAP_SIZE);
1477 __set_bit(idx, map->agg_idx_bmap);
1478 map->agg_id_tbl[agg_id] = idx;
1479 return idx;
1480 }
1481
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1482 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1483 {
1484 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1485
1486 __clear_bit(idx, map->agg_idx_bmap);
1487 }
1488
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1489 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1490 {
1491 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1492
1493 return map->agg_id_tbl[agg_id];
1494 }
1495
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1496 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1497 struct rx_tpa_start_cmp *tpa_start,
1498 struct rx_tpa_start_cmp_ext *tpa_start1)
1499 {
1500 tpa_info->cfa_code_valid = 1;
1501 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1502 tpa_info->vlan_valid = 0;
1503 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1504 tpa_info->vlan_valid = 1;
1505 tpa_info->metadata =
1506 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1507 }
1508 }
1509
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1510 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1511 struct rx_tpa_start_cmp *tpa_start,
1512 struct rx_tpa_start_cmp_ext *tpa_start1)
1513 {
1514 tpa_info->vlan_valid = 0;
1515 if (TPA_START_VLAN_VALID(tpa_start)) {
1516 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1517 u32 vlan_proto = ETH_P_8021Q;
1518
1519 tpa_info->vlan_valid = 1;
1520 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1521 vlan_proto = ETH_P_8021AD;
1522 tpa_info->metadata = vlan_proto << 16 |
1523 TPA_START_METADATA0_TCI(tpa_start1);
1524 }
1525 }
1526
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1527 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1528 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1529 struct rx_tpa_start_cmp_ext *tpa_start1)
1530 {
1531 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1532 struct bnxt_tpa_info *tpa_info;
1533 u16 cons, prod, agg_id;
1534 struct rx_bd *prod_bd;
1535 dma_addr_t mapping;
1536
1537 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1538 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1539 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1540 } else {
1541 agg_id = TPA_START_AGG_ID(tpa_start);
1542 }
1543 cons = tpa_start->rx_tpa_start_cmp_opaque;
1544 prod = rxr->rx_prod;
1545 cons_rx_buf = &rxr->rx_buf_ring[cons];
1546 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1547 tpa_info = &rxr->rx_tpa[agg_id];
1548
1549 if (unlikely(cons != rxr->rx_next_cons ||
1550 TPA_START_ERROR(tpa_start))) {
1551 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1552 cons, rxr->rx_next_cons,
1553 TPA_START_ERROR_CODE(tpa_start1));
1554 bnxt_sched_reset_rxr(bp, rxr);
1555 return;
1556 }
1557 prod_rx_buf->data = tpa_info->data;
1558 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1559
1560 mapping = tpa_info->mapping;
1561 prod_rx_buf->mapping = mapping;
1562
1563 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1564
1565 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1566
1567 tpa_info->data = cons_rx_buf->data;
1568 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1569 cons_rx_buf->data = NULL;
1570 tpa_info->mapping = cons_rx_buf->mapping;
1571
1572 tpa_info->len =
1573 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1574 RX_TPA_START_CMP_LEN_SHIFT;
1575 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1576 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1577 tpa_info->gso_type = SKB_GSO_TCPV4;
1578 if (TPA_START_IS_IPV6(tpa_start1))
1579 tpa_info->gso_type = SKB_GSO_TCPV6;
1580 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1581 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1582 TPA_START_HASH_TYPE(tpa_start) == 3)
1583 tpa_info->gso_type = SKB_GSO_TCPV6;
1584 tpa_info->rss_hash =
1585 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1586 } else {
1587 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1588 tpa_info->gso_type = 0;
1589 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1590 }
1591 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1592 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1593 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1594 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1595 else
1596 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1597 tpa_info->agg_count = 0;
1598
1599 rxr->rx_prod = NEXT_RX(prod);
1600 cons = RING_RX(bp, NEXT_RX(cons));
1601 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1602 cons_rx_buf = &rxr->rx_buf_ring[cons];
1603
1604 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1605 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1606 cons_rx_buf->data = NULL;
1607 }
1608
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1609 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1610 {
1611 if (agg_bufs)
1612 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1613 }
1614
1615 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1616 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1617 {
1618 struct udphdr *uh = NULL;
1619
1620 if (ip_proto == htons(ETH_P_IP)) {
1621 struct iphdr *iph = (struct iphdr *)skb->data;
1622
1623 if (iph->protocol == IPPROTO_UDP)
1624 uh = (struct udphdr *)(iph + 1);
1625 } else {
1626 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1627
1628 if (iph->nexthdr == IPPROTO_UDP)
1629 uh = (struct udphdr *)(iph + 1);
1630 }
1631 if (uh) {
1632 if (uh->check)
1633 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1634 else
1635 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1636 }
1637 }
1638 #endif
1639
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1640 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1641 int payload_off, int tcp_ts,
1642 struct sk_buff *skb)
1643 {
1644 #ifdef CONFIG_INET
1645 struct tcphdr *th;
1646 int len, nw_off;
1647 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1648 u32 hdr_info = tpa_info->hdr_info;
1649 bool loopback = false;
1650
1651 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1652 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1653 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1654
1655 /* If the packet is an internal loopback packet, the offsets will
1656 * have an extra 4 bytes.
1657 */
1658 if (inner_mac_off == 4) {
1659 loopback = true;
1660 } else if (inner_mac_off > 4) {
1661 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1662 ETH_HLEN - 2));
1663
1664 /* We only support inner iPv4/ipv6. If we don't see the
1665 * correct protocol ID, it must be a loopback packet where
1666 * the offsets are off by 4.
1667 */
1668 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1669 loopback = true;
1670 }
1671 if (loopback) {
1672 /* internal loopback packet, subtract all offsets by 4 */
1673 inner_ip_off -= 4;
1674 inner_mac_off -= 4;
1675 outer_ip_off -= 4;
1676 }
1677
1678 nw_off = inner_ip_off - ETH_HLEN;
1679 skb_set_network_header(skb, nw_off);
1680 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1681 struct ipv6hdr *iph = ipv6_hdr(skb);
1682
1683 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1684 len = skb->len - skb_transport_offset(skb);
1685 th = tcp_hdr(skb);
1686 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1687 } else {
1688 struct iphdr *iph = ip_hdr(skb);
1689
1690 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1691 len = skb->len - skb_transport_offset(skb);
1692 th = tcp_hdr(skb);
1693 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1694 }
1695
1696 if (inner_mac_off) { /* tunnel */
1697 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1698 ETH_HLEN - 2));
1699
1700 bnxt_gro_tunnel(skb, proto);
1701 }
1702 #endif
1703 return skb;
1704 }
1705
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1706 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1707 int payload_off, int tcp_ts,
1708 struct sk_buff *skb)
1709 {
1710 #ifdef CONFIG_INET
1711 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1712 u32 hdr_info = tpa_info->hdr_info;
1713 int iphdr_len, nw_off;
1714
1715 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1716 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1717 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1718
1719 nw_off = inner_ip_off - ETH_HLEN;
1720 skb_set_network_header(skb, nw_off);
1721 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1722 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1723 skb_set_transport_header(skb, nw_off + iphdr_len);
1724
1725 if (inner_mac_off) { /* tunnel */
1726 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1727 ETH_HLEN - 2));
1728
1729 bnxt_gro_tunnel(skb, proto);
1730 }
1731 #endif
1732 return skb;
1733 }
1734
1735 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1736 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1737
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1738 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1739 int payload_off, int tcp_ts,
1740 struct sk_buff *skb)
1741 {
1742 #ifdef CONFIG_INET
1743 struct tcphdr *th;
1744 int len, nw_off, tcp_opt_len = 0;
1745
1746 if (tcp_ts)
1747 tcp_opt_len = 12;
1748
1749 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1750 struct iphdr *iph;
1751
1752 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1753 ETH_HLEN;
1754 skb_set_network_header(skb, nw_off);
1755 iph = ip_hdr(skb);
1756 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1757 len = skb->len - skb_transport_offset(skb);
1758 th = tcp_hdr(skb);
1759 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1760 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1761 struct ipv6hdr *iph;
1762
1763 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1764 ETH_HLEN;
1765 skb_set_network_header(skb, nw_off);
1766 iph = ipv6_hdr(skb);
1767 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1768 len = skb->len - skb_transport_offset(skb);
1769 th = tcp_hdr(skb);
1770 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1771 } else {
1772 dev_kfree_skb_any(skb);
1773 return NULL;
1774 }
1775
1776 if (nw_off) /* tunnel */
1777 bnxt_gro_tunnel(skb, skb->protocol);
1778 #endif
1779 return skb;
1780 }
1781
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1782 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1783 struct bnxt_tpa_info *tpa_info,
1784 struct rx_tpa_end_cmp *tpa_end,
1785 struct rx_tpa_end_cmp_ext *tpa_end1,
1786 struct sk_buff *skb)
1787 {
1788 #ifdef CONFIG_INET
1789 int payload_off;
1790 u16 segs;
1791
1792 segs = TPA_END_TPA_SEGS(tpa_end);
1793 if (segs == 1)
1794 return skb;
1795
1796 NAPI_GRO_CB(skb)->count = segs;
1797 skb_shinfo(skb)->gso_size =
1798 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1799 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1800 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1801 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1802 else
1803 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1804 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1805 if (likely(skb))
1806 tcp_gro_complete(skb);
1807 #endif
1808 return skb;
1809 }
1810
1811 /* Given the cfa_code of a received packet determine which
1812 * netdev (vf-rep or PF) the packet is destined to.
1813 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1814 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1815 {
1816 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1817
1818 /* if vf-rep dev is NULL, it must belong to the PF */
1819 return dev ? dev : bp->dev;
1820 }
1821
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1822 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1823 struct bnxt_cp_ring_info *cpr,
1824 u32 *raw_cons,
1825 struct rx_tpa_end_cmp *tpa_end,
1826 struct rx_tpa_end_cmp_ext *tpa_end1,
1827 u8 *event)
1828 {
1829 struct bnxt_napi *bnapi = cpr->bnapi;
1830 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1831 struct net_device *dev = bp->dev;
1832 u8 *data_ptr, agg_bufs;
1833 unsigned int len;
1834 struct bnxt_tpa_info *tpa_info;
1835 dma_addr_t mapping;
1836 struct sk_buff *skb;
1837 u16 idx = 0, agg_id;
1838 void *data;
1839 bool gro;
1840
1841 if (unlikely(bnapi->in_reset)) {
1842 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1843
1844 if (rc < 0)
1845 return ERR_PTR(-EBUSY);
1846 return NULL;
1847 }
1848
1849 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1850 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1851 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1852 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1853 tpa_info = &rxr->rx_tpa[agg_id];
1854 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1855 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1856 agg_bufs, tpa_info->agg_count);
1857 agg_bufs = tpa_info->agg_count;
1858 }
1859 tpa_info->agg_count = 0;
1860 *event |= BNXT_AGG_EVENT;
1861 bnxt_free_agg_idx(rxr, agg_id);
1862 idx = agg_id;
1863 gro = !!(bp->flags & BNXT_FLAG_GRO);
1864 } else {
1865 agg_id = TPA_END_AGG_ID(tpa_end);
1866 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1867 tpa_info = &rxr->rx_tpa[agg_id];
1868 idx = RING_CMP(*raw_cons);
1869 if (agg_bufs) {
1870 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1871 return ERR_PTR(-EBUSY);
1872
1873 *event |= BNXT_AGG_EVENT;
1874 idx = NEXT_CMP(idx);
1875 }
1876 gro = !!TPA_END_GRO(tpa_end);
1877 }
1878 data = tpa_info->data;
1879 data_ptr = tpa_info->data_ptr;
1880 prefetch(data_ptr);
1881 len = tpa_info->len;
1882 mapping = tpa_info->mapping;
1883
1884 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1885 bnxt_abort_tpa(cpr, idx, agg_bufs);
1886 if (agg_bufs > MAX_SKB_FRAGS)
1887 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1888 agg_bufs, (int)MAX_SKB_FRAGS);
1889 return NULL;
1890 }
1891
1892 if (len <= bp->rx_copybreak) {
1893 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1894 if (!skb) {
1895 bnxt_abort_tpa(cpr, idx, agg_bufs);
1896 cpr->sw_stats->rx.rx_oom_discards += 1;
1897 return NULL;
1898 }
1899 } else {
1900 u8 *new_data;
1901 dma_addr_t new_mapping;
1902
1903 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1904 GFP_ATOMIC);
1905 if (!new_data) {
1906 bnxt_abort_tpa(cpr, idx, agg_bufs);
1907 cpr->sw_stats->rx.rx_oom_discards += 1;
1908 return NULL;
1909 }
1910
1911 tpa_info->data = new_data;
1912 tpa_info->data_ptr = new_data + bp->rx_offset;
1913 tpa_info->mapping = new_mapping;
1914
1915 skb = napi_build_skb(data, bp->rx_buf_size);
1916 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1917 bp->rx_buf_use_size, bp->rx_dir);
1918
1919 if (!skb) {
1920 page_pool_free_va(rxr->head_pool, data, true);
1921 bnxt_abort_tpa(cpr, idx, agg_bufs);
1922 cpr->sw_stats->rx.rx_oom_discards += 1;
1923 return NULL;
1924 }
1925 skb_mark_for_recycle(skb);
1926 skb_reserve(skb, bp->rx_offset);
1927 skb_put(skb, len);
1928 }
1929
1930 if (agg_bufs) {
1931 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1932 true);
1933 if (!skb) {
1934 /* Page reuse already handled by bnxt_rx_pages(). */
1935 cpr->sw_stats->rx.rx_oom_discards += 1;
1936 return NULL;
1937 }
1938 }
1939
1940 if (tpa_info->cfa_code_valid)
1941 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1942 skb->protocol = eth_type_trans(skb, dev);
1943
1944 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1945 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1946
1947 if (tpa_info->vlan_valid &&
1948 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1949 __be16 vlan_proto = htons(tpa_info->metadata >>
1950 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1951 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1952
1953 if (eth_type_vlan(vlan_proto)) {
1954 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1955 } else {
1956 dev_kfree_skb(skb);
1957 return NULL;
1958 }
1959 }
1960
1961 skb_checksum_none_assert(skb);
1962 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1963 skb->ip_summed = CHECKSUM_UNNECESSARY;
1964 skb->csum_level =
1965 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1966 }
1967
1968 if (gro)
1969 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1970
1971 return skb;
1972 }
1973
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1974 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1975 struct rx_agg_cmp *rx_agg)
1976 {
1977 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1978 struct bnxt_tpa_info *tpa_info;
1979
1980 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1981 tpa_info = &rxr->rx_tpa[agg_id];
1982 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1983 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1984 }
1985
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1986 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1987 struct sk_buff *skb)
1988 {
1989 skb_mark_for_recycle(skb);
1990
1991 if (skb->dev != bp->dev) {
1992 /* this packet belongs to a vf-rep */
1993 bnxt_vf_rep_rx(bp, skb);
1994 return;
1995 }
1996 skb_record_rx_queue(skb, bnapi->index);
1997 napi_gro_receive(&bnapi->napi, skb);
1998 }
1999
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)2000 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2001 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2002 {
2003 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2004
2005 if (BNXT_PTP_RX_TS_VALID(flags))
2006 goto ts_valid;
2007 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2008 return false;
2009
2010 ts_valid:
2011 *cmpl_ts = ts;
2012 return true;
2013 }
2014
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)2015 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2016 struct rx_cmp *rxcmp,
2017 struct rx_cmp_ext *rxcmp1)
2018 {
2019 __be16 vlan_proto;
2020 u16 vtag;
2021
2022 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2023 __le32 flags2 = rxcmp1->rx_cmp_flags2;
2024 u32 meta_data;
2025
2026 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2027 return skb;
2028
2029 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2030 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2031 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2032 if (eth_type_vlan(vlan_proto))
2033 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2034 else
2035 goto vlan_err;
2036 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2037 if (RX_CMP_VLAN_VALID(rxcmp)) {
2038 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2039
2040 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2041 vlan_proto = htons(ETH_P_8021Q);
2042 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2043 vlan_proto = htons(ETH_P_8021AD);
2044 else
2045 goto vlan_err;
2046 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2047 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2048 }
2049 }
2050 return skb;
2051 vlan_err:
2052 skb_mark_for_recycle(skb);
2053 dev_kfree_skb(skb);
2054 return NULL;
2055 }
2056
bnxt_rss_ext_op(struct bnxt * bp,struct rx_cmp * rxcmp)2057 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2058 struct rx_cmp *rxcmp)
2059 {
2060 u8 ext_op;
2061
2062 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2063 switch (ext_op) {
2064 case EXT_OP_INNER_4:
2065 case EXT_OP_OUTER_4:
2066 case EXT_OP_INNFL_3:
2067 case EXT_OP_OUTFL_3:
2068 return PKT_HASH_TYPE_L4;
2069 default:
2070 return PKT_HASH_TYPE_L3;
2071 }
2072 }
2073
2074 /* returns the following:
2075 * 1 - 1 packet successfully received
2076 * 0 - successful TPA_START, packet not completed yet
2077 * -EBUSY - completion ring does not have all the agg buffers yet
2078 * -ENOMEM - packet aborted due to out of memory
2079 * -EIO - packet aborted due to hw error indicated in BD
2080 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2081 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2082 u32 *raw_cons, u8 *event)
2083 {
2084 struct bnxt_napi *bnapi = cpr->bnapi;
2085 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2086 struct net_device *dev = bp->dev;
2087 struct rx_cmp *rxcmp;
2088 struct rx_cmp_ext *rxcmp1;
2089 u32 tmp_raw_cons = *raw_cons;
2090 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2091 struct skb_shared_info *sinfo;
2092 struct bnxt_sw_rx_bd *rx_buf;
2093 unsigned int len;
2094 u8 *data_ptr, agg_bufs, cmp_type;
2095 bool xdp_active = false;
2096 dma_addr_t dma_addr;
2097 struct sk_buff *skb;
2098 struct xdp_buff xdp;
2099 u32 flags, misc;
2100 u32 cmpl_ts;
2101 void *data;
2102 int rc = 0;
2103
2104 rxcmp = (struct rx_cmp *)
2105 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2106
2107 cmp_type = RX_CMP_TYPE(rxcmp);
2108
2109 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2110 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2111 goto next_rx_no_prod_no_len;
2112 }
2113
2114 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2115 cp_cons = RING_CMP(tmp_raw_cons);
2116 rxcmp1 = (struct rx_cmp_ext *)
2117 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2118
2119 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2120 return -EBUSY;
2121
2122 /* The valid test of the entry must be done first before
2123 * reading any further.
2124 */
2125 dma_rmb();
2126 prod = rxr->rx_prod;
2127
2128 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2129 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2130 bnxt_tpa_start(bp, rxr, cmp_type,
2131 (struct rx_tpa_start_cmp *)rxcmp,
2132 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2133
2134 *event |= BNXT_RX_EVENT;
2135 goto next_rx_no_prod_no_len;
2136
2137 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2138 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2139 (struct rx_tpa_end_cmp *)rxcmp,
2140 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2141
2142 if (IS_ERR(skb))
2143 return -EBUSY;
2144
2145 rc = -ENOMEM;
2146 if (likely(skb)) {
2147 bnxt_deliver_skb(bp, bnapi, skb);
2148 rc = 1;
2149 }
2150 *event |= BNXT_RX_EVENT;
2151 goto next_rx_no_prod_no_len;
2152 }
2153
2154 cons = rxcmp->rx_cmp_opaque;
2155 if (unlikely(cons != rxr->rx_next_cons)) {
2156 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2157
2158 /* 0xffff is forced error, don't print it */
2159 if (rxr->rx_next_cons != 0xffff)
2160 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2161 cons, rxr->rx_next_cons);
2162 bnxt_sched_reset_rxr(bp, rxr);
2163 if (rc1)
2164 return rc1;
2165 goto next_rx_no_prod_no_len;
2166 }
2167 rx_buf = &rxr->rx_buf_ring[cons];
2168 data = rx_buf->data;
2169 data_ptr = rx_buf->data_ptr;
2170 prefetch(data_ptr);
2171
2172 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2173 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2174
2175 if (agg_bufs) {
2176 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2177 return -EBUSY;
2178
2179 cp_cons = NEXT_CMP(cp_cons);
2180 *event |= BNXT_AGG_EVENT;
2181 }
2182 *event |= BNXT_RX_EVENT;
2183
2184 rx_buf->data = NULL;
2185 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2186 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2187
2188 bnxt_reuse_rx_data(rxr, cons, data);
2189 if (agg_bufs)
2190 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2191 false);
2192
2193 rc = -EIO;
2194 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2195 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2196 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2197 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2198 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2199 rx_err);
2200 bnxt_sched_reset_rxr(bp, rxr);
2201 }
2202 }
2203 goto next_rx_no_len;
2204 }
2205
2206 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2207 len = flags >> RX_CMP_LEN_SHIFT;
2208 dma_addr = rx_buf->mapping;
2209
2210 if (bnxt_xdp_attached(bp, rxr)) {
2211 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2212 if (agg_bufs) {
2213 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
2214 cp_cons,
2215 agg_bufs,
2216 false);
2217 if (!frag_len)
2218 goto oom_next_rx;
2219
2220 }
2221 xdp_active = true;
2222 }
2223
2224 if (xdp_active) {
2225 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2226 rc = 1;
2227 goto next_rx;
2228 }
2229 if (xdp_buff_has_frags(&xdp)) {
2230 sinfo = xdp_get_shared_info_from_buff(&xdp);
2231 agg_bufs = sinfo->nr_frags;
2232 } else {
2233 agg_bufs = 0;
2234 }
2235 }
2236
2237 if (len <= bp->rx_copybreak) {
2238 if (!xdp_active)
2239 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2240 else
2241 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2242 bnxt_reuse_rx_data(rxr, cons, data);
2243 if (!skb) {
2244 if (agg_bufs) {
2245 if (!xdp_active)
2246 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2247 agg_bufs, false);
2248 else
2249 bnxt_xdp_buff_frags_free(rxr, &xdp);
2250 }
2251 goto oom_next_rx;
2252 }
2253 } else {
2254 u32 payload;
2255
2256 if (rx_buf->data_ptr == data_ptr)
2257 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2258 else
2259 payload = 0;
2260 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2261 payload | len);
2262 if (!skb)
2263 goto oom_next_rx;
2264 }
2265
2266 if (agg_bufs) {
2267 if (!xdp_active) {
2268 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2269 agg_bufs, false);
2270 if (!skb)
2271 goto oom_next_rx;
2272 } else {
2273 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2274 rxr->page_pool, &xdp);
2275 if (!skb) {
2276 /* we should be able to free the old skb here */
2277 bnxt_xdp_buff_frags_free(rxr, &xdp);
2278 goto oom_next_rx;
2279 }
2280 }
2281 }
2282
2283 if (RX_CMP_HASH_VALID(rxcmp)) {
2284 enum pkt_hash_types type;
2285
2286 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2287 type = bnxt_rss_ext_op(bp, rxcmp);
2288 } else {
2289 u32 itypes = RX_CMP_ITYPES(rxcmp);
2290
2291 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2292 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2293 type = PKT_HASH_TYPE_L4;
2294 else
2295 type = PKT_HASH_TYPE_L3;
2296 }
2297 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2298 }
2299
2300 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2301 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2302 skb->protocol = eth_type_trans(skb, dev);
2303
2304 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2305 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2306 if (!skb)
2307 goto next_rx;
2308 }
2309
2310 skb_checksum_none_assert(skb);
2311 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2312 if (dev->features & NETIF_F_RXCSUM) {
2313 skb->ip_summed = CHECKSUM_UNNECESSARY;
2314 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2315 }
2316 } else {
2317 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2318 if (dev->features & NETIF_F_RXCSUM)
2319 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2320 }
2321 }
2322
2323 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2324 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2325 u64 ns, ts;
2326
2327 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2328 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2329
2330 ns = bnxt_timecounter_cyc2time(ptp, ts);
2331 memset(skb_hwtstamps(skb), 0,
2332 sizeof(*skb_hwtstamps(skb)));
2333 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2334 }
2335 }
2336 }
2337 bnxt_deliver_skb(bp, bnapi, skb);
2338 rc = 1;
2339
2340 next_rx:
2341 cpr->rx_packets += 1;
2342 cpr->rx_bytes += len;
2343
2344 next_rx_no_len:
2345 rxr->rx_prod = NEXT_RX(prod);
2346 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2347
2348 next_rx_no_prod_no_len:
2349 *raw_cons = tmp_raw_cons;
2350
2351 return rc;
2352
2353 oom_next_rx:
2354 cpr->sw_stats->rx.rx_oom_discards += 1;
2355 rc = -ENOMEM;
2356 goto next_rx;
2357 }
2358
2359 /* In netpoll mode, if we are using a combined completion ring, we need to
2360 * discard the rx packets and recycle the buffers.
2361 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2362 static int bnxt_force_rx_discard(struct bnxt *bp,
2363 struct bnxt_cp_ring_info *cpr,
2364 u32 *raw_cons, u8 *event)
2365 {
2366 u32 tmp_raw_cons = *raw_cons;
2367 struct rx_cmp_ext *rxcmp1;
2368 struct rx_cmp *rxcmp;
2369 u16 cp_cons;
2370 u8 cmp_type;
2371 int rc;
2372
2373 cp_cons = RING_CMP(tmp_raw_cons);
2374 rxcmp = (struct rx_cmp *)
2375 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2376
2377 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2378 cp_cons = RING_CMP(tmp_raw_cons);
2379 rxcmp1 = (struct rx_cmp_ext *)
2380 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2381
2382 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2383 return -EBUSY;
2384
2385 /* The valid test of the entry must be done first before
2386 * reading any further.
2387 */
2388 dma_rmb();
2389 cmp_type = RX_CMP_TYPE(rxcmp);
2390 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2391 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2392 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2393 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2394 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2395 struct rx_tpa_end_cmp_ext *tpa_end1;
2396
2397 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2398 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2399 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2400 }
2401 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2402 if (rc && rc != -EBUSY)
2403 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2404 return rc;
2405 }
2406
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2407 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2408 {
2409 struct bnxt_fw_health *fw_health = bp->fw_health;
2410 u32 reg = fw_health->regs[reg_idx];
2411 u32 reg_type, reg_off, val = 0;
2412
2413 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2414 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2415 switch (reg_type) {
2416 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2417 pci_read_config_dword(bp->pdev, reg_off, &val);
2418 break;
2419 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2420 reg_off = fw_health->mapped_regs[reg_idx];
2421 fallthrough;
2422 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2423 val = readl(bp->bar0 + reg_off);
2424 break;
2425 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2426 val = readl(bp->bar1 + reg_off);
2427 break;
2428 }
2429 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2430 val &= fw_health->fw_reset_inprog_reg_mask;
2431 return val;
2432 }
2433
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2434 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2435 {
2436 int i;
2437
2438 for (i = 0; i < bp->rx_nr_rings; i++) {
2439 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2440 struct bnxt_ring_grp_info *grp_info;
2441
2442 grp_info = &bp->grp_info[grp_idx];
2443 if (grp_info->agg_fw_ring_id == ring_id)
2444 return grp_idx;
2445 }
2446 return INVALID_HW_RING_ID;
2447 }
2448
bnxt_get_force_speed(struct bnxt_link_info * link_info)2449 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2450 {
2451 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2452
2453 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2454 return link_info->force_link_speed2;
2455 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2456 return link_info->force_pam4_link_speed;
2457 return link_info->force_link_speed;
2458 }
2459
bnxt_set_force_speed(struct bnxt_link_info * link_info)2460 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2461 {
2462 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2463
2464 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2465 link_info->req_link_speed = link_info->force_link_speed2;
2466 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2467 switch (link_info->req_link_speed) {
2468 case BNXT_LINK_SPEED_50GB_PAM4:
2469 case BNXT_LINK_SPEED_100GB_PAM4:
2470 case BNXT_LINK_SPEED_200GB_PAM4:
2471 case BNXT_LINK_SPEED_400GB_PAM4:
2472 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2473 break;
2474 case BNXT_LINK_SPEED_100GB_PAM4_112:
2475 case BNXT_LINK_SPEED_200GB_PAM4_112:
2476 case BNXT_LINK_SPEED_400GB_PAM4_112:
2477 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2478 break;
2479 default:
2480 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2481 }
2482 return;
2483 }
2484 link_info->req_link_speed = link_info->force_link_speed;
2485 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2486 if (link_info->force_pam4_link_speed) {
2487 link_info->req_link_speed = link_info->force_pam4_link_speed;
2488 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2489 }
2490 }
2491
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2492 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2493 {
2494 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2495
2496 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2497 link_info->advertising = link_info->auto_link_speeds2;
2498 return;
2499 }
2500 link_info->advertising = link_info->auto_link_speeds;
2501 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2502 }
2503
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2504 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2505 {
2506 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2507
2508 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2509 if (link_info->req_link_speed != link_info->force_link_speed2)
2510 return true;
2511 return false;
2512 }
2513 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2514 link_info->req_link_speed != link_info->force_link_speed)
2515 return true;
2516 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2517 link_info->req_link_speed != link_info->force_pam4_link_speed)
2518 return true;
2519 return false;
2520 }
2521
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2522 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2523 {
2524 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2525
2526 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2527 if (link_info->advertising != link_info->auto_link_speeds2)
2528 return true;
2529 return false;
2530 }
2531 if (link_info->advertising != link_info->auto_link_speeds ||
2532 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2533 return true;
2534 return false;
2535 }
2536
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2537 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2538 {
2539 u32 flags = bp->ctx->ctx_arr[type].flags;
2540
2541 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2542 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2543 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2544 }
2545
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2546 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2547 {
2548 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2549 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2550 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2551 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2552 struct bnxt_bs_trace_info *bs_trace;
2553 int last_pg;
2554
2555 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2556 return;
2557
2558 mem_size = ctxm->max_entries * ctxm->entry_size;
2559 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2560 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2561
2562 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2563 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2564
2565 rmem = &ctx_pg[0].ring_mem;
2566 bs_trace = &bp->bs_trace[trace_type];
2567 bs_trace->ctx_type = ctxm->type;
2568 bs_trace->trace_type = trace_type;
2569 if (pages > MAX_CTX_PAGES) {
2570 int last_pg_dir = rmem->nr_pages - 1;
2571
2572 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2573 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2574 } else {
2575 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2576 }
2577 bs_trace->magic_byte += magic_byte_offset;
2578 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2579 }
2580
2581 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2582 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2583 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2584
2585 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2586 (((data2) & \
2587 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2588 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2589
2590 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2591 ((data2) & \
2592 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2593
2594 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2595 (((data2) & \
2596 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2597 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2598
2599 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2600 ((data1) & \
2601 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2602
2603 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2604 (((data1) & \
2605 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2606 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2607
2608 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2609 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2610 {
2611 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2612
2613 switch (err_type) {
2614 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2615 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2616 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2617 break;
2618 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2619 netdev_warn(bp->dev, "Pause Storm detected!\n");
2620 break;
2621 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2622 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2623 break;
2624 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2625 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2626 char *threshold_type;
2627 bool notify = false;
2628 char *dir_str;
2629
2630 switch (type) {
2631 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2632 threshold_type = "warning";
2633 break;
2634 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2635 threshold_type = "critical";
2636 break;
2637 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2638 threshold_type = "fatal";
2639 break;
2640 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2641 threshold_type = "shutdown";
2642 break;
2643 default:
2644 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2645 return false;
2646 }
2647 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2648 dir_str = "above";
2649 notify = true;
2650 } else {
2651 dir_str = "below";
2652 }
2653 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2654 dir_str, threshold_type);
2655 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2656 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2657 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2658 if (notify) {
2659 bp->thermal_threshold_type = type;
2660 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2661 return true;
2662 }
2663 return false;
2664 }
2665 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2666 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2667 break;
2668 default:
2669 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2670 err_type);
2671 break;
2672 }
2673 return false;
2674 }
2675
2676 #define BNXT_GET_EVENT_PORT(data) \
2677 ((data) & \
2678 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2679
2680 #define BNXT_EVENT_RING_TYPE(data2) \
2681 ((data2) & \
2682 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2683
2684 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2685 (BNXT_EVENT_RING_TYPE(data2) == \
2686 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2687
2688 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2689 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2690 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2691
2692 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2693 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2694 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2695
2696 #define BNXT_PHC_BITS 48
2697
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2698 static int bnxt_async_event_process(struct bnxt *bp,
2699 struct hwrm_async_event_cmpl *cmpl)
2700 {
2701 u16 event_id = le16_to_cpu(cmpl->event_id);
2702 u32 data1 = le32_to_cpu(cmpl->event_data1);
2703 u32 data2 = le32_to_cpu(cmpl->event_data2);
2704
2705 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2706 event_id, data1, data2);
2707
2708 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2709 switch (event_id) {
2710 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2711 struct bnxt_link_info *link_info = &bp->link_info;
2712
2713 if (BNXT_VF(bp))
2714 goto async_event_process_exit;
2715
2716 /* print unsupported speed warning in forced speed mode only */
2717 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2718 (data1 & 0x20000)) {
2719 u16 fw_speed = bnxt_get_force_speed(link_info);
2720 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2721
2722 if (speed != SPEED_UNKNOWN)
2723 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2724 speed);
2725 }
2726 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2727 }
2728 fallthrough;
2729 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2730 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2731 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2732 fallthrough;
2733 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2734 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2735 break;
2736 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2737 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2738 break;
2739 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2740 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2741
2742 if (BNXT_VF(bp))
2743 break;
2744
2745 if (bp->pf.port_id != port_id)
2746 break;
2747
2748 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2749 break;
2750 }
2751 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2752 if (BNXT_PF(bp))
2753 goto async_event_process_exit;
2754 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2755 break;
2756 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2757 char *type_str = "Solicited";
2758
2759 if (!bp->fw_health)
2760 goto async_event_process_exit;
2761
2762 bp->fw_reset_timestamp = jiffies;
2763 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2764 if (!bp->fw_reset_min_dsecs)
2765 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2766 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2767 if (!bp->fw_reset_max_dsecs)
2768 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2769 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2770 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2771 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2772 type_str = "Fatal";
2773 bp->fw_health->fatalities++;
2774 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2775 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2776 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2777 type_str = "Non-fatal";
2778 bp->fw_health->survivals++;
2779 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2780 }
2781 netif_warn(bp, hw, bp->dev,
2782 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2783 type_str, data1, data2,
2784 bp->fw_reset_min_dsecs * 100,
2785 bp->fw_reset_max_dsecs * 100);
2786 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2787 break;
2788 }
2789 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2790 struct bnxt_fw_health *fw_health = bp->fw_health;
2791 char *status_desc = "healthy";
2792 u32 status;
2793
2794 if (!fw_health)
2795 goto async_event_process_exit;
2796
2797 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2798 fw_health->enabled = false;
2799 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2800 break;
2801 }
2802 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2803 fw_health->tmr_multiplier =
2804 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2805 bp->current_interval * 10);
2806 fw_health->tmr_counter = fw_health->tmr_multiplier;
2807 if (!fw_health->enabled)
2808 fw_health->last_fw_heartbeat =
2809 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2810 fw_health->last_fw_reset_cnt =
2811 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2812 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2813 if (status != BNXT_FW_STATUS_HEALTHY)
2814 status_desc = "unhealthy";
2815 netif_info(bp, drv, bp->dev,
2816 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2817 fw_health->primary ? "primary" : "backup", status,
2818 status_desc, fw_health->last_fw_reset_cnt);
2819 if (!fw_health->enabled) {
2820 /* Make sure tmr_counter is set and visible to
2821 * bnxt_health_check() before setting enabled to true.
2822 */
2823 smp_wmb();
2824 fw_health->enabled = true;
2825 }
2826 goto async_event_process_exit;
2827 }
2828 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2829 netif_notice(bp, hw, bp->dev,
2830 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2831 data1, data2);
2832 goto async_event_process_exit;
2833 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2834 struct bnxt_rx_ring_info *rxr;
2835 u16 grp_idx;
2836
2837 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2838 goto async_event_process_exit;
2839
2840 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2841 BNXT_EVENT_RING_TYPE(data2), data1);
2842 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2843 goto async_event_process_exit;
2844
2845 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2846 if (grp_idx == INVALID_HW_RING_ID) {
2847 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2848 data1);
2849 goto async_event_process_exit;
2850 }
2851 rxr = bp->bnapi[grp_idx]->rx_ring;
2852 bnxt_sched_reset_rxr(bp, rxr);
2853 goto async_event_process_exit;
2854 }
2855 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2856 struct bnxt_fw_health *fw_health = bp->fw_health;
2857
2858 netif_notice(bp, hw, bp->dev,
2859 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2860 data1, data2);
2861 if (fw_health) {
2862 fw_health->echo_req_data1 = data1;
2863 fw_health->echo_req_data2 = data2;
2864 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2865 break;
2866 }
2867 goto async_event_process_exit;
2868 }
2869 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2870 bnxt_ptp_pps_event(bp, data1, data2);
2871 goto async_event_process_exit;
2872 }
2873 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2874 if (bnxt_event_error_report(bp, data1, data2))
2875 break;
2876 goto async_event_process_exit;
2877 }
2878 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2879 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2880 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2881 if (BNXT_PTP_USE_RTC(bp)) {
2882 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2883 unsigned long flags;
2884 u64 ns;
2885
2886 if (!ptp)
2887 goto async_event_process_exit;
2888
2889 bnxt_ptp_update_current_time(bp);
2890 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2891 BNXT_PHC_BITS) | ptp->current_time);
2892 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2893 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2894 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2895 }
2896 break;
2897 }
2898 goto async_event_process_exit;
2899 }
2900 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2901 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2902
2903 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2904 goto async_event_process_exit;
2905 }
2906 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2907 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2908 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2909
2910 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2911 goto async_event_process_exit;
2912 }
2913 default:
2914 goto async_event_process_exit;
2915 }
2916 __bnxt_queue_sp_work(bp);
2917 async_event_process_exit:
2918 bnxt_ulp_async_events(bp, cmpl);
2919 return 0;
2920 }
2921
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2922 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2923 {
2924 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2925 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2926 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2927 (struct hwrm_fwd_req_cmpl *)txcmp;
2928
2929 switch (cmpl_type) {
2930 case CMPL_BASE_TYPE_HWRM_DONE:
2931 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2932 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2933 break;
2934
2935 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2936 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2937
2938 if ((vf_id < bp->pf.first_vf_id) ||
2939 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2940 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2941 vf_id);
2942 return -EINVAL;
2943 }
2944
2945 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2946 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2947 break;
2948
2949 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2950 bnxt_async_event_process(bp,
2951 (struct hwrm_async_event_cmpl *)txcmp);
2952 break;
2953
2954 default:
2955 break;
2956 }
2957
2958 return 0;
2959 }
2960
bnxt_vnic_is_active(struct bnxt * bp)2961 static bool bnxt_vnic_is_active(struct bnxt *bp)
2962 {
2963 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2964
2965 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2966 }
2967
bnxt_msix(int irq,void * dev_instance)2968 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2969 {
2970 struct bnxt_napi *bnapi = dev_instance;
2971 struct bnxt *bp = bnapi->bp;
2972 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2973 u32 cons = RING_CMP(cpr->cp_raw_cons);
2974
2975 cpr->event_ctr++;
2976 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2977 napi_schedule(&bnapi->napi);
2978 return IRQ_HANDLED;
2979 }
2980
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2981 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2982 {
2983 u32 raw_cons = cpr->cp_raw_cons;
2984 u16 cons = RING_CMP(raw_cons);
2985 struct tx_cmp *txcmp;
2986
2987 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2988
2989 return TX_CMP_VALID(txcmp, raw_cons);
2990 }
2991
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2992 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2993 int budget)
2994 {
2995 struct bnxt_napi *bnapi = cpr->bnapi;
2996 u32 raw_cons = cpr->cp_raw_cons;
2997 bool flush_xdp = false;
2998 u32 cons;
2999 int rx_pkts = 0;
3000 u8 event = 0;
3001 struct tx_cmp *txcmp;
3002
3003 cpr->has_more_work = 0;
3004 cpr->had_work_done = 1;
3005 while (1) {
3006 u8 cmp_type;
3007 int rc;
3008
3009 cons = RING_CMP(raw_cons);
3010 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3011
3012 if (!TX_CMP_VALID(txcmp, raw_cons))
3013 break;
3014
3015 /* The valid test of the entry must be done first before
3016 * reading any further.
3017 */
3018 dma_rmb();
3019 cmp_type = TX_CMP_TYPE(txcmp);
3020 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3021 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3022 u32 opaque = txcmp->tx_cmp_opaque;
3023 struct bnxt_tx_ring_info *txr;
3024 u16 tx_freed;
3025
3026 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3027 event |= BNXT_TX_CMP_EVENT;
3028 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3029 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3030 else
3031 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3032 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3033 bp->tx_ring_mask;
3034 /* return full budget so NAPI will complete. */
3035 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3036 rx_pkts = budget;
3037 raw_cons = NEXT_RAW_CMP(raw_cons);
3038 if (budget)
3039 cpr->has_more_work = 1;
3040 break;
3041 }
3042 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3043 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3044 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3045 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3046 if (likely(budget))
3047 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3048 else
3049 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3050 &event);
3051 if (event & BNXT_REDIRECT_EVENT)
3052 flush_xdp = true;
3053 if (likely(rc >= 0))
3054 rx_pkts += rc;
3055 /* Increment rx_pkts when rc is -ENOMEM to count towards
3056 * the NAPI budget. Otherwise, we may potentially loop
3057 * here forever if we consistently cannot allocate
3058 * buffers.
3059 */
3060 else if (rc == -ENOMEM && budget)
3061 rx_pkts++;
3062 else if (rc == -EBUSY) /* partial completion */
3063 break;
3064 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3065 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3066 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3067 bnxt_hwrm_handler(bp, txcmp);
3068 }
3069 raw_cons = NEXT_RAW_CMP(raw_cons);
3070
3071 if (rx_pkts && rx_pkts == budget) {
3072 cpr->has_more_work = 1;
3073 break;
3074 }
3075 }
3076
3077 if (flush_xdp) {
3078 xdp_do_flush();
3079 event &= ~BNXT_REDIRECT_EVENT;
3080 }
3081
3082 if (event & BNXT_TX_EVENT) {
3083 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3084 u16 prod = txr->tx_prod;
3085
3086 /* Sync BD data before updating doorbell */
3087 wmb();
3088
3089 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3090 event &= ~BNXT_TX_EVENT;
3091 }
3092
3093 cpr->cp_raw_cons = raw_cons;
3094 bnapi->events |= event;
3095 return rx_pkts;
3096 }
3097
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3098 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3099 int budget)
3100 {
3101 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3102 bnapi->tx_int(bp, bnapi, budget);
3103
3104 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3105 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3106
3107 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3108 bnapi->events &= ~BNXT_RX_EVENT;
3109 }
3110 if (bnapi->events & BNXT_AGG_EVENT) {
3111 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3112
3113 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3114 bnapi->events &= ~BNXT_AGG_EVENT;
3115 }
3116 }
3117
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3118 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3119 int budget)
3120 {
3121 struct bnxt_napi *bnapi = cpr->bnapi;
3122 int rx_pkts;
3123
3124 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3125
3126 /* ACK completion ring before freeing tx ring and producing new
3127 * buffers in rx/agg rings to prevent overflowing the completion
3128 * ring.
3129 */
3130 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3131
3132 __bnxt_poll_work_done(bp, bnapi, budget);
3133 return rx_pkts;
3134 }
3135
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3136 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3137 {
3138 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3139 struct bnxt *bp = bnapi->bp;
3140 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3141 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3142 struct tx_cmp *txcmp;
3143 struct rx_cmp_ext *rxcmp1;
3144 u32 cp_cons, tmp_raw_cons;
3145 u32 raw_cons = cpr->cp_raw_cons;
3146 bool flush_xdp = false;
3147 u32 rx_pkts = 0;
3148 u8 event = 0;
3149
3150 while (1) {
3151 int rc;
3152
3153 cp_cons = RING_CMP(raw_cons);
3154 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3155
3156 if (!TX_CMP_VALID(txcmp, raw_cons))
3157 break;
3158
3159 /* The valid test of the entry must be done first before
3160 * reading any further.
3161 */
3162 dma_rmb();
3163 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3164 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3165 cp_cons = RING_CMP(tmp_raw_cons);
3166 rxcmp1 = (struct rx_cmp_ext *)
3167 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3168
3169 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3170 break;
3171
3172 /* force an error to recycle the buffer */
3173 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3174 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3175
3176 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3177 if (likely(rc == -EIO) && budget)
3178 rx_pkts++;
3179 else if (rc == -EBUSY) /* partial completion */
3180 break;
3181 if (event & BNXT_REDIRECT_EVENT)
3182 flush_xdp = true;
3183 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3184 CMPL_BASE_TYPE_HWRM_DONE)) {
3185 bnxt_hwrm_handler(bp, txcmp);
3186 } else {
3187 netdev_err(bp->dev,
3188 "Invalid completion received on special ring\n");
3189 }
3190 raw_cons = NEXT_RAW_CMP(raw_cons);
3191
3192 if (rx_pkts == budget)
3193 break;
3194 }
3195
3196 cpr->cp_raw_cons = raw_cons;
3197 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3198 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3199
3200 if (event & BNXT_AGG_EVENT)
3201 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3202 if (flush_xdp)
3203 xdp_do_flush();
3204
3205 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3206 napi_complete_done(napi, rx_pkts);
3207 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3208 }
3209 return rx_pkts;
3210 }
3211
bnxt_poll(struct napi_struct * napi,int budget)3212 static int bnxt_poll(struct napi_struct *napi, int budget)
3213 {
3214 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3215 struct bnxt *bp = bnapi->bp;
3216 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3217 int work_done = 0;
3218
3219 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3220 napi_complete(napi);
3221 return 0;
3222 }
3223 while (1) {
3224 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3225
3226 if (work_done >= budget) {
3227 if (!budget)
3228 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3229 break;
3230 }
3231
3232 if (!bnxt_has_work(bp, cpr)) {
3233 if (napi_complete_done(napi, work_done))
3234 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3235 break;
3236 }
3237 }
3238 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3239 struct dim_sample dim_sample = {};
3240
3241 dim_update_sample(cpr->event_ctr,
3242 cpr->rx_packets,
3243 cpr->rx_bytes,
3244 &dim_sample);
3245 net_dim(&cpr->dim, &dim_sample);
3246 }
3247 return work_done;
3248 }
3249
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3250 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3251 {
3252 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3253 int i, work_done = 0;
3254
3255 for (i = 0; i < cpr->cp_ring_count; i++) {
3256 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3257
3258 if (cpr2->had_nqe_notify) {
3259 work_done += __bnxt_poll_work(bp, cpr2,
3260 budget - work_done);
3261 cpr->has_more_work |= cpr2->has_more_work;
3262 }
3263 }
3264 return work_done;
3265 }
3266
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3267 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3268 u64 dbr_type, int budget)
3269 {
3270 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3271 int i;
3272
3273 for (i = 0; i < cpr->cp_ring_count; i++) {
3274 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3275 struct bnxt_db_info *db;
3276
3277 if (cpr2->had_work_done) {
3278 u32 tgl = 0;
3279
3280 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3281 cpr2->had_nqe_notify = 0;
3282 tgl = cpr2->toggle;
3283 }
3284 db = &cpr2->cp_db;
3285 bnxt_writeq(bp,
3286 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3287 DB_RING_IDX(db, cpr2->cp_raw_cons),
3288 db->doorbell);
3289 cpr2->had_work_done = 0;
3290 }
3291 }
3292 __bnxt_poll_work_done(bp, bnapi, budget);
3293 }
3294
bnxt_poll_p5(struct napi_struct * napi,int budget)3295 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3296 {
3297 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3298 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3299 struct bnxt_cp_ring_info *cpr_rx;
3300 u32 raw_cons = cpr->cp_raw_cons;
3301 struct bnxt *bp = bnapi->bp;
3302 struct nqe_cn *nqcmp;
3303 int work_done = 0;
3304 u32 cons;
3305
3306 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3307 napi_complete(napi);
3308 return 0;
3309 }
3310 if (cpr->has_more_work) {
3311 cpr->has_more_work = 0;
3312 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3313 }
3314 while (1) {
3315 u16 type;
3316
3317 cons = RING_CMP(raw_cons);
3318 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3319
3320 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3321 if (cpr->has_more_work)
3322 break;
3323
3324 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3325 budget);
3326 cpr->cp_raw_cons = raw_cons;
3327 if (napi_complete_done(napi, work_done))
3328 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3329 cpr->cp_raw_cons);
3330 goto poll_done;
3331 }
3332
3333 /* The valid test of the entry must be done first before
3334 * reading any further.
3335 */
3336 dma_rmb();
3337
3338 type = le16_to_cpu(nqcmp->type);
3339 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3340 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3341 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3342 struct bnxt_cp_ring_info *cpr2;
3343
3344 /* No more budget for RX work */
3345 if (budget && work_done >= budget &&
3346 cq_type == BNXT_NQ_HDL_TYPE_RX)
3347 break;
3348
3349 idx = BNXT_NQ_HDL_IDX(idx);
3350 cpr2 = &cpr->cp_ring_arr[idx];
3351 cpr2->had_nqe_notify = 1;
3352 cpr2->toggle = NQE_CN_TOGGLE(type);
3353 work_done += __bnxt_poll_work(bp, cpr2,
3354 budget - work_done);
3355 cpr->has_more_work |= cpr2->has_more_work;
3356 } else {
3357 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3358 }
3359 raw_cons = NEXT_RAW_CMP(raw_cons);
3360 }
3361 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3362 if (raw_cons != cpr->cp_raw_cons) {
3363 cpr->cp_raw_cons = raw_cons;
3364 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3365 }
3366 poll_done:
3367 cpr_rx = &cpr->cp_ring_arr[0];
3368 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3369 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3370 struct dim_sample dim_sample = {};
3371
3372 dim_update_sample(cpr->event_ctr,
3373 cpr_rx->rx_packets,
3374 cpr_rx->rx_bytes,
3375 &dim_sample);
3376 net_dim(&cpr->dim, &dim_sample);
3377 }
3378 return work_done;
3379 }
3380
bnxt_free_one_tx_ring_skbs(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int idx)3381 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3382 struct bnxt_tx_ring_info *txr, int idx)
3383 {
3384 int i, max_idx;
3385 struct pci_dev *pdev = bp->pdev;
3386
3387 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3388
3389 for (i = 0; i < max_idx;) {
3390 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3391 struct sk_buff *skb;
3392 int j, last;
3393
3394 if (idx < bp->tx_nr_rings_xdp &&
3395 tx_buf->action == XDP_REDIRECT) {
3396 dma_unmap_single(&pdev->dev,
3397 dma_unmap_addr(tx_buf, mapping),
3398 dma_unmap_len(tx_buf, len),
3399 DMA_TO_DEVICE);
3400 xdp_return_frame(tx_buf->xdpf);
3401 tx_buf->action = 0;
3402 tx_buf->xdpf = NULL;
3403 i++;
3404 continue;
3405 }
3406
3407 skb = tx_buf->skb;
3408 if (!skb) {
3409 i++;
3410 continue;
3411 }
3412
3413 tx_buf->skb = NULL;
3414
3415 if (tx_buf->is_push) {
3416 dev_kfree_skb(skb);
3417 i += 2;
3418 continue;
3419 }
3420
3421 dma_unmap_single(&pdev->dev,
3422 dma_unmap_addr(tx_buf, mapping),
3423 skb_headlen(skb),
3424 DMA_TO_DEVICE);
3425
3426 last = tx_buf->nr_frags;
3427 i += 2;
3428 for (j = 0; j < last; j++, i++) {
3429 int ring_idx = i & bp->tx_ring_mask;
3430 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3431
3432 tx_buf = &txr->tx_buf_ring[ring_idx];
3433 netmem_dma_unmap_page_attrs(&pdev->dev,
3434 dma_unmap_addr(tx_buf,
3435 mapping),
3436 skb_frag_size(frag),
3437 DMA_TO_DEVICE, 0);
3438 }
3439 dev_kfree_skb(skb);
3440 }
3441 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3442 }
3443
bnxt_free_tx_skbs(struct bnxt * bp)3444 static void bnxt_free_tx_skbs(struct bnxt *bp)
3445 {
3446 int i;
3447
3448 if (!bp->tx_ring)
3449 return;
3450
3451 for (i = 0; i < bp->tx_nr_rings; i++) {
3452 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3453
3454 if (!txr->tx_buf_ring)
3455 continue;
3456
3457 bnxt_free_one_tx_ring_skbs(bp, txr, i);
3458 }
3459
3460 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3461 bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3462 }
3463
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3464 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3465 {
3466 int i, max_idx;
3467
3468 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3469
3470 for (i = 0; i < max_idx; i++) {
3471 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3472 void *data = rx_buf->data;
3473
3474 if (!data)
3475 continue;
3476
3477 rx_buf->data = NULL;
3478 if (BNXT_RX_PAGE_MODE(bp))
3479 page_pool_recycle_direct(rxr->page_pool, data);
3480 else
3481 page_pool_free_va(rxr->head_pool, data, true);
3482 }
3483 }
3484
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3485 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3486 {
3487 int i, max_idx;
3488
3489 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3490
3491 for (i = 0; i < max_idx; i++) {
3492 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3493 netmem_ref netmem = rx_agg_buf->netmem;
3494
3495 if (!netmem)
3496 continue;
3497
3498 rx_agg_buf->netmem = 0;
3499 __clear_bit(i, rxr->rx_agg_bmap);
3500
3501 page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3502 }
3503 }
3504
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3505 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3506 struct bnxt_rx_ring_info *rxr)
3507 {
3508 int i;
3509
3510 for (i = 0; i < bp->max_tpa; i++) {
3511 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3512 u8 *data = tpa_info->data;
3513
3514 if (!data)
3515 continue;
3516
3517 tpa_info->data = NULL;
3518 page_pool_free_va(rxr->head_pool, data, false);
3519 }
3520 }
3521
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3522 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3523 struct bnxt_rx_ring_info *rxr)
3524 {
3525 struct bnxt_tpa_idx_map *map;
3526
3527 if (!rxr->rx_tpa)
3528 goto skip_rx_tpa_free;
3529
3530 bnxt_free_one_tpa_info_data(bp, rxr);
3531
3532 skip_rx_tpa_free:
3533 if (!rxr->rx_buf_ring)
3534 goto skip_rx_buf_free;
3535
3536 bnxt_free_one_rx_ring(bp, rxr);
3537
3538 skip_rx_buf_free:
3539 if (!rxr->rx_agg_ring)
3540 goto skip_rx_agg_free;
3541
3542 bnxt_free_one_rx_agg_ring(bp, rxr);
3543
3544 skip_rx_agg_free:
3545 map = rxr->rx_tpa_idx_map;
3546 if (map)
3547 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3548 }
3549
bnxt_free_rx_skbs(struct bnxt * bp)3550 static void bnxt_free_rx_skbs(struct bnxt *bp)
3551 {
3552 int i;
3553
3554 if (!bp->rx_ring)
3555 return;
3556
3557 for (i = 0; i < bp->rx_nr_rings; i++)
3558 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3559 }
3560
bnxt_free_skbs(struct bnxt * bp)3561 static void bnxt_free_skbs(struct bnxt *bp)
3562 {
3563 bnxt_free_tx_skbs(bp);
3564 bnxt_free_rx_skbs(bp);
3565 }
3566
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3567 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3568 {
3569 u8 init_val = ctxm->init_value;
3570 u16 offset = ctxm->init_offset;
3571 u8 *p2 = p;
3572 int i;
3573
3574 if (!init_val)
3575 return;
3576 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3577 memset(p, init_val, len);
3578 return;
3579 }
3580 for (i = 0; i < len; i += ctxm->entry_size)
3581 *(p2 + i + offset) = init_val;
3582 }
3583
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3584 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3585 void *buf, size_t offset, size_t head,
3586 size_t tail)
3587 {
3588 int i, head_page, start_idx, source_offset;
3589 size_t len, rem_len, total_len, max_bytes;
3590
3591 head_page = head / rmem->page_size;
3592 source_offset = head % rmem->page_size;
3593 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3594 if (!total_len)
3595 total_len = MAX_CTX_BYTES;
3596 start_idx = head_page % MAX_CTX_PAGES;
3597 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3598 source_offset;
3599 total_len = min(total_len, max_bytes);
3600 rem_len = total_len;
3601
3602 for (i = start_idx; rem_len; i++, source_offset = 0) {
3603 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3604 if (buf)
3605 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3606 len);
3607 offset += len;
3608 rem_len -= len;
3609 }
3610 return total_len;
3611 }
3612
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3613 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3614 {
3615 struct pci_dev *pdev = bp->pdev;
3616 int i;
3617
3618 if (!rmem->pg_arr)
3619 goto skip_pages;
3620
3621 for (i = 0; i < rmem->nr_pages; i++) {
3622 if (!rmem->pg_arr[i])
3623 continue;
3624
3625 dma_free_coherent(&pdev->dev, rmem->page_size,
3626 rmem->pg_arr[i], rmem->dma_arr[i]);
3627
3628 rmem->pg_arr[i] = NULL;
3629 }
3630 skip_pages:
3631 if (rmem->pg_tbl) {
3632 size_t pg_tbl_size = rmem->nr_pages * 8;
3633
3634 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3635 pg_tbl_size = rmem->page_size;
3636 dma_free_coherent(&pdev->dev, pg_tbl_size,
3637 rmem->pg_tbl, rmem->pg_tbl_map);
3638 rmem->pg_tbl = NULL;
3639 }
3640 if (rmem->vmem_size && *rmem->vmem) {
3641 vfree(*rmem->vmem);
3642 *rmem->vmem = NULL;
3643 }
3644 }
3645
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3646 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3647 {
3648 struct pci_dev *pdev = bp->pdev;
3649 u64 valid_bit = 0;
3650 int i;
3651
3652 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3653 valid_bit = PTU_PTE_VALID;
3654 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3655 size_t pg_tbl_size = rmem->nr_pages * 8;
3656
3657 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3658 pg_tbl_size = rmem->page_size;
3659 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3660 &rmem->pg_tbl_map,
3661 GFP_KERNEL);
3662 if (!rmem->pg_tbl)
3663 return -ENOMEM;
3664 }
3665
3666 for (i = 0; i < rmem->nr_pages; i++) {
3667 u64 extra_bits = valid_bit;
3668
3669 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3670 rmem->page_size,
3671 &rmem->dma_arr[i],
3672 GFP_KERNEL);
3673 if (!rmem->pg_arr[i])
3674 return -ENOMEM;
3675
3676 if (rmem->ctx_mem)
3677 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3678 rmem->page_size);
3679 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3680 if (i == rmem->nr_pages - 2 &&
3681 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3682 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3683 else if (i == rmem->nr_pages - 1 &&
3684 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3685 extra_bits |= PTU_PTE_LAST;
3686 rmem->pg_tbl[i] =
3687 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3688 }
3689 }
3690
3691 if (rmem->vmem_size) {
3692 *rmem->vmem = vzalloc(rmem->vmem_size);
3693 if (!(*rmem->vmem))
3694 return -ENOMEM;
3695 }
3696 return 0;
3697 }
3698
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3699 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3700 struct bnxt_rx_ring_info *rxr)
3701 {
3702 int i;
3703
3704 kfree(rxr->rx_tpa_idx_map);
3705 rxr->rx_tpa_idx_map = NULL;
3706 if (rxr->rx_tpa) {
3707 for (i = 0; i < bp->max_tpa; i++) {
3708 kfree(rxr->rx_tpa[i].agg_arr);
3709 rxr->rx_tpa[i].agg_arr = NULL;
3710 }
3711 }
3712 kfree(rxr->rx_tpa);
3713 rxr->rx_tpa = NULL;
3714 }
3715
bnxt_free_tpa_info(struct bnxt * bp)3716 static void bnxt_free_tpa_info(struct bnxt *bp)
3717 {
3718 int i;
3719
3720 for (i = 0; i < bp->rx_nr_rings; i++) {
3721 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3722
3723 bnxt_free_one_tpa_info(bp, rxr);
3724 }
3725 }
3726
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3727 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3728 struct bnxt_rx_ring_info *rxr)
3729 {
3730 struct rx_agg_cmp *agg;
3731 int i;
3732
3733 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3734 GFP_KERNEL);
3735 if (!rxr->rx_tpa)
3736 return -ENOMEM;
3737
3738 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3739 return 0;
3740 for (i = 0; i < bp->max_tpa; i++) {
3741 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3742 if (!agg)
3743 return -ENOMEM;
3744 rxr->rx_tpa[i].agg_arr = agg;
3745 }
3746 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3747 GFP_KERNEL);
3748 if (!rxr->rx_tpa_idx_map)
3749 return -ENOMEM;
3750
3751 return 0;
3752 }
3753
bnxt_alloc_tpa_info(struct bnxt * bp)3754 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3755 {
3756 int i, rc;
3757
3758 bp->max_tpa = MAX_TPA;
3759 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3760 if (!bp->max_tpa_v2)
3761 return 0;
3762 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3763 }
3764
3765 for (i = 0; i < bp->rx_nr_rings; i++) {
3766 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3767
3768 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3769 if (rc)
3770 return rc;
3771 }
3772 return 0;
3773 }
3774
bnxt_free_rx_rings(struct bnxt * bp)3775 static void bnxt_free_rx_rings(struct bnxt *bp)
3776 {
3777 int i;
3778
3779 if (!bp->rx_ring)
3780 return;
3781
3782 bnxt_free_tpa_info(bp);
3783 for (i = 0; i < bp->rx_nr_rings; i++) {
3784 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3785 struct bnxt_ring_struct *ring;
3786
3787 if (rxr->xdp_prog)
3788 bpf_prog_put(rxr->xdp_prog);
3789
3790 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3791 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3792
3793 page_pool_destroy(rxr->page_pool);
3794 if (bnxt_separate_head_pool(rxr))
3795 page_pool_destroy(rxr->head_pool);
3796 rxr->page_pool = rxr->head_pool = NULL;
3797
3798 kfree(rxr->rx_agg_bmap);
3799 rxr->rx_agg_bmap = NULL;
3800
3801 ring = &rxr->rx_ring_struct;
3802 bnxt_free_ring(bp, &ring->ring_mem);
3803
3804 ring = &rxr->rx_agg_ring_struct;
3805 bnxt_free_ring(bp, &ring->ring_mem);
3806 }
3807 }
3808
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3809 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3810 struct bnxt_rx_ring_info *rxr,
3811 int numa_node)
3812 {
3813 const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
3814 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3815 struct page_pool_params pp = { 0 };
3816 struct page_pool *pool;
3817
3818 pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
3819 if (BNXT_RX_PAGE_MODE(bp))
3820 pp.pool_size += bp->rx_ring_size / rx_size_fac;
3821 pp.nid = numa_node;
3822 pp.napi = &rxr->bnapi->napi;
3823 pp.netdev = bp->dev;
3824 pp.dev = &bp->pdev->dev;
3825 pp.dma_dir = bp->rx_dir;
3826 pp.max_len = PAGE_SIZE;
3827 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3828 PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3829 pp.queue_idx = rxr->bnapi->index;
3830
3831 pool = page_pool_create(&pp);
3832 if (IS_ERR(pool))
3833 return PTR_ERR(pool);
3834 rxr->page_pool = pool;
3835
3836 rxr->need_head_pool = page_pool_is_unreadable(pool);
3837 if (bnxt_separate_head_pool(rxr)) {
3838 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3839 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3840 pool = page_pool_create(&pp);
3841 if (IS_ERR(pool))
3842 goto err_destroy_pp;
3843 }
3844 rxr->head_pool = pool;
3845
3846 return 0;
3847
3848 err_destroy_pp:
3849 page_pool_destroy(rxr->page_pool);
3850 rxr->page_pool = NULL;
3851 return PTR_ERR(pool);
3852 }
3853
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3854 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3855 {
3856 u16 mem_size;
3857
3858 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3859 mem_size = rxr->rx_agg_bmap_size / 8;
3860 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3861 if (!rxr->rx_agg_bmap)
3862 return -ENOMEM;
3863
3864 return 0;
3865 }
3866
bnxt_alloc_rx_rings(struct bnxt * bp)3867 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3868 {
3869 int numa_node = dev_to_node(&bp->pdev->dev);
3870 int i, rc = 0, agg_rings = 0, cpu;
3871
3872 if (!bp->rx_ring)
3873 return -ENOMEM;
3874
3875 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3876 agg_rings = 1;
3877
3878 for (i = 0; i < bp->rx_nr_rings; i++) {
3879 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3880 struct bnxt_ring_struct *ring;
3881 int cpu_node;
3882
3883 ring = &rxr->rx_ring_struct;
3884
3885 cpu = cpumask_local_spread(i, numa_node);
3886 cpu_node = cpu_to_node(cpu);
3887 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3888 i, cpu_node);
3889 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3890 if (rc)
3891 return rc;
3892
3893 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3894 if (rc < 0)
3895 return rc;
3896
3897 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3898 MEM_TYPE_PAGE_POOL,
3899 rxr->page_pool);
3900 if (rc) {
3901 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3902 return rc;
3903 }
3904
3905 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3906 if (rc)
3907 return rc;
3908
3909 ring->grp_idx = i;
3910 if (agg_rings) {
3911 ring = &rxr->rx_agg_ring_struct;
3912 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3913 if (rc)
3914 return rc;
3915
3916 ring->grp_idx = i;
3917 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3918 if (rc)
3919 return rc;
3920 }
3921 }
3922 if (bp->flags & BNXT_FLAG_TPA)
3923 rc = bnxt_alloc_tpa_info(bp);
3924 return rc;
3925 }
3926
bnxt_free_tx_rings(struct bnxt * bp)3927 static void bnxt_free_tx_rings(struct bnxt *bp)
3928 {
3929 int i;
3930 struct pci_dev *pdev = bp->pdev;
3931
3932 if (!bp->tx_ring)
3933 return;
3934
3935 for (i = 0; i < bp->tx_nr_rings; i++) {
3936 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3937 struct bnxt_ring_struct *ring;
3938
3939 if (txr->tx_push) {
3940 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3941 txr->tx_push, txr->tx_push_mapping);
3942 txr->tx_push = NULL;
3943 }
3944
3945 ring = &txr->tx_ring_struct;
3946
3947 bnxt_free_ring(bp, &ring->ring_mem);
3948 }
3949 }
3950
3951 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3952 ((tc) * (bp)->tx_nr_rings_per_tc)
3953
3954 #define BNXT_RING_TO_TC_OFF(bp, tx) \
3955 ((tx) % (bp)->tx_nr_rings_per_tc)
3956
3957 #define BNXT_RING_TO_TC(bp, tx) \
3958 ((tx) / (bp)->tx_nr_rings_per_tc)
3959
bnxt_alloc_tx_rings(struct bnxt * bp)3960 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3961 {
3962 int i, j, rc;
3963 struct pci_dev *pdev = bp->pdev;
3964
3965 bp->tx_push_size = 0;
3966 if (bp->tx_push_thresh) {
3967 int push_size;
3968
3969 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3970 bp->tx_push_thresh);
3971
3972 if (push_size > 256) {
3973 push_size = 0;
3974 bp->tx_push_thresh = 0;
3975 }
3976
3977 bp->tx_push_size = push_size;
3978 }
3979
3980 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3981 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3982 struct bnxt_ring_struct *ring;
3983 u8 qidx;
3984
3985 ring = &txr->tx_ring_struct;
3986
3987 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3988 if (rc)
3989 return rc;
3990
3991 ring->grp_idx = txr->bnapi->index;
3992 if (bp->tx_push_size) {
3993 dma_addr_t mapping;
3994
3995 /* One pre-allocated DMA buffer to backup
3996 * TX push operation
3997 */
3998 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3999 bp->tx_push_size,
4000 &txr->tx_push_mapping,
4001 GFP_KERNEL);
4002
4003 if (!txr->tx_push)
4004 return -ENOMEM;
4005
4006 mapping = txr->tx_push_mapping +
4007 sizeof(struct tx_push_bd);
4008 txr->data_mapping = cpu_to_le64(mapping);
4009 }
4010 qidx = bp->tc_to_qidx[j];
4011 ring->queue_id = bp->q_info[qidx].queue_id;
4012 spin_lock_init(&txr->xdp_tx_lock);
4013 if (i < bp->tx_nr_rings_xdp)
4014 continue;
4015 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4016 j++;
4017 }
4018 return 0;
4019 }
4020
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)4021 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4022 {
4023 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4024
4025 kfree(cpr->cp_desc_ring);
4026 cpr->cp_desc_ring = NULL;
4027 ring->ring_mem.pg_arr = NULL;
4028 kfree(cpr->cp_desc_mapping);
4029 cpr->cp_desc_mapping = NULL;
4030 ring->ring_mem.dma_arr = NULL;
4031 }
4032
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)4033 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4034 {
4035 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
4036 if (!cpr->cp_desc_ring)
4037 return -ENOMEM;
4038 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
4039 GFP_KERNEL);
4040 if (!cpr->cp_desc_mapping)
4041 return -ENOMEM;
4042 return 0;
4043 }
4044
bnxt_free_all_cp_arrays(struct bnxt * bp)4045 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4046 {
4047 int i;
4048
4049 if (!bp->bnapi)
4050 return;
4051 for (i = 0; i < bp->cp_nr_rings; i++) {
4052 struct bnxt_napi *bnapi = bp->bnapi[i];
4053
4054 if (!bnapi)
4055 continue;
4056 bnxt_free_cp_arrays(&bnapi->cp_ring);
4057 }
4058 }
4059
bnxt_alloc_all_cp_arrays(struct bnxt * bp)4060 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4061 {
4062 int i, n = bp->cp_nr_pages;
4063
4064 for (i = 0; i < bp->cp_nr_rings; i++) {
4065 struct bnxt_napi *bnapi = bp->bnapi[i];
4066 int rc;
4067
4068 if (!bnapi)
4069 continue;
4070 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4071 if (rc)
4072 return rc;
4073 }
4074 return 0;
4075 }
4076
bnxt_free_cp_rings(struct bnxt * bp)4077 static void bnxt_free_cp_rings(struct bnxt *bp)
4078 {
4079 int i;
4080
4081 if (!bp->bnapi)
4082 return;
4083
4084 for (i = 0; i < bp->cp_nr_rings; i++) {
4085 struct bnxt_napi *bnapi = bp->bnapi[i];
4086 struct bnxt_cp_ring_info *cpr;
4087 struct bnxt_ring_struct *ring;
4088 int j;
4089
4090 if (!bnapi)
4091 continue;
4092
4093 cpr = &bnapi->cp_ring;
4094 ring = &cpr->cp_ring_struct;
4095
4096 bnxt_free_ring(bp, &ring->ring_mem);
4097
4098 if (!cpr->cp_ring_arr)
4099 continue;
4100
4101 for (j = 0; j < cpr->cp_ring_count; j++) {
4102 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4103
4104 ring = &cpr2->cp_ring_struct;
4105 bnxt_free_ring(bp, &ring->ring_mem);
4106 bnxt_free_cp_arrays(cpr2);
4107 }
4108 kfree(cpr->cp_ring_arr);
4109 cpr->cp_ring_arr = NULL;
4110 cpr->cp_ring_count = 0;
4111 }
4112 }
4113
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4114 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4115 struct bnxt_cp_ring_info *cpr)
4116 {
4117 struct bnxt_ring_mem_info *rmem;
4118 struct bnxt_ring_struct *ring;
4119 int rc;
4120
4121 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4122 if (rc) {
4123 bnxt_free_cp_arrays(cpr);
4124 return -ENOMEM;
4125 }
4126 ring = &cpr->cp_ring_struct;
4127 rmem = &ring->ring_mem;
4128 rmem->nr_pages = bp->cp_nr_pages;
4129 rmem->page_size = HW_CMPD_RING_SIZE;
4130 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4131 rmem->dma_arr = cpr->cp_desc_mapping;
4132 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4133 rc = bnxt_alloc_ring(bp, rmem);
4134 if (rc) {
4135 bnxt_free_ring(bp, rmem);
4136 bnxt_free_cp_arrays(cpr);
4137 }
4138 return rc;
4139 }
4140
bnxt_alloc_cp_rings(struct bnxt * bp)4141 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4142 {
4143 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4144 int i, j, rc, ulp_msix;
4145 int tcs = bp->num_tc;
4146
4147 if (!tcs)
4148 tcs = 1;
4149 ulp_msix = bnxt_get_ulp_msix_num(bp);
4150 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4151 struct bnxt_napi *bnapi = bp->bnapi[i];
4152 struct bnxt_cp_ring_info *cpr, *cpr2;
4153 struct bnxt_ring_struct *ring;
4154 int cp_count = 0, k;
4155 int rx = 0, tx = 0;
4156
4157 if (!bnapi)
4158 continue;
4159
4160 cpr = &bnapi->cp_ring;
4161 cpr->bnapi = bnapi;
4162 ring = &cpr->cp_ring_struct;
4163
4164 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4165 if (rc)
4166 return rc;
4167
4168 ring->map_idx = ulp_msix + i;
4169
4170 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4171 continue;
4172
4173 if (i < bp->rx_nr_rings) {
4174 cp_count++;
4175 rx = 1;
4176 }
4177 if (i < bp->tx_nr_rings_xdp) {
4178 cp_count++;
4179 tx = 1;
4180 } else if ((sh && i < bp->tx_nr_rings) ||
4181 (!sh && i >= bp->rx_nr_rings)) {
4182 cp_count += tcs;
4183 tx = 1;
4184 }
4185
4186 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
4187 GFP_KERNEL);
4188 if (!cpr->cp_ring_arr)
4189 return -ENOMEM;
4190 cpr->cp_ring_count = cp_count;
4191
4192 for (k = 0; k < cp_count; k++) {
4193 cpr2 = &cpr->cp_ring_arr[k];
4194 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4195 if (rc)
4196 return rc;
4197 cpr2->bnapi = bnapi;
4198 cpr2->sw_stats = cpr->sw_stats;
4199 cpr2->cp_idx = k;
4200 if (!k && rx) {
4201 bp->rx_ring[i].rx_cpr = cpr2;
4202 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4203 } else {
4204 int n, tc = k - rx;
4205
4206 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4207 bp->tx_ring[n].tx_cpr = cpr2;
4208 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4209 }
4210 }
4211 if (tx)
4212 j++;
4213 }
4214 return 0;
4215 }
4216
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4217 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4218 struct bnxt_rx_ring_info *rxr)
4219 {
4220 struct bnxt_ring_mem_info *rmem;
4221 struct bnxt_ring_struct *ring;
4222
4223 ring = &rxr->rx_ring_struct;
4224 rmem = &ring->ring_mem;
4225 rmem->nr_pages = bp->rx_nr_pages;
4226 rmem->page_size = HW_RXBD_RING_SIZE;
4227 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4228 rmem->dma_arr = rxr->rx_desc_mapping;
4229 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4230 rmem->vmem = (void **)&rxr->rx_buf_ring;
4231
4232 ring = &rxr->rx_agg_ring_struct;
4233 rmem = &ring->ring_mem;
4234 rmem->nr_pages = bp->rx_agg_nr_pages;
4235 rmem->page_size = HW_RXBD_RING_SIZE;
4236 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4237 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4238 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4239 rmem->vmem = (void **)&rxr->rx_agg_ring;
4240 }
4241
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4242 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4243 struct bnxt_rx_ring_info *rxr)
4244 {
4245 struct bnxt_ring_mem_info *rmem;
4246 struct bnxt_ring_struct *ring;
4247 int i;
4248
4249 rxr->page_pool->p.napi = NULL;
4250 rxr->page_pool = NULL;
4251 rxr->head_pool->p.napi = NULL;
4252 rxr->head_pool = NULL;
4253 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4254
4255 ring = &rxr->rx_ring_struct;
4256 rmem = &ring->ring_mem;
4257 rmem->pg_tbl = NULL;
4258 rmem->pg_tbl_map = 0;
4259 for (i = 0; i < rmem->nr_pages; i++) {
4260 rmem->pg_arr[i] = NULL;
4261 rmem->dma_arr[i] = 0;
4262 }
4263 *rmem->vmem = NULL;
4264
4265 ring = &rxr->rx_agg_ring_struct;
4266 rmem = &ring->ring_mem;
4267 rmem->pg_tbl = NULL;
4268 rmem->pg_tbl_map = 0;
4269 for (i = 0; i < rmem->nr_pages; i++) {
4270 rmem->pg_arr[i] = NULL;
4271 rmem->dma_arr[i] = 0;
4272 }
4273 *rmem->vmem = NULL;
4274 }
4275
bnxt_init_ring_struct(struct bnxt * bp)4276 static void bnxt_init_ring_struct(struct bnxt *bp)
4277 {
4278 int i, j;
4279
4280 for (i = 0; i < bp->cp_nr_rings; i++) {
4281 struct bnxt_napi *bnapi = bp->bnapi[i];
4282 struct bnxt_ring_mem_info *rmem;
4283 struct bnxt_cp_ring_info *cpr;
4284 struct bnxt_rx_ring_info *rxr;
4285 struct bnxt_tx_ring_info *txr;
4286 struct bnxt_ring_struct *ring;
4287
4288 if (!bnapi)
4289 continue;
4290
4291 cpr = &bnapi->cp_ring;
4292 ring = &cpr->cp_ring_struct;
4293 rmem = &ring->ring_mem;
4294 rmem->nr_pages = bp->cp_nr_pages;
4295 rmem->page_size = HW_CMPD_RING_SIZE;
4296 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4297 rmem->dma_arr = cpr->cp_desc_mapping;
4298 rmem->vmem_size = 0;
4299
4300 rxr = bnapi->rx_ring;
4301 if (!rxr)
4302 goto skip_rx;
4303
4304 ring = &rxr->rx_ring_struct;
4305 rmem = &ring->ring_mem;
4306 rmem->nr_pages = bp->rx_nr_pages;
4307 rmem->page_size = HW_RXBD_RING_SIZE;
4308 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4309 rmem->dma_arr = rxr->rx_desc_mapping;
4310 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4311 rmem->vmem = (void **)&rxr->rx_buf_ring;
4312
4313 ring = &rxr->rx_agg_ring_struct;
4314 rmem = &ring->ring_mem;
4315 rmem->nr_pages = bp->rx_agg_nr_pages;
4316 rmem->page_size = HW_RXBD_RING_SIZE;
4317 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4318 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4319 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4320 rmem->vmem = (void **)&rxr->rx_agg_ring;
4321
4322 skip_rx:
4323 bnxt_for_each_napi_tx(j, bnapi, txr) {
4324 ring = &txr->tx_ring_struct;
4325 rmem = &ring->ring_mem;
4326 rmem->nr_pages = bp->tx_nr_pages;
4327 rmem->page_size = HW_TXBD_RING_SIZE;
4328 rmem->pg_arr = (void **)txr->tx_desc_ring;
4329 rmem->dma_arr = txr->tx_desc_mapping;
4330 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4331 rmem->vmem = (void **)&txr->tx_buf_ring;
4332 }
4333 }
4334 }
4335
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4336 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4337 {
4338 int i;
4339 u32 prod;
4340 struct rx_bd **rx_buf_ring;
4341
4342 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4343 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4344 int j;
4345 struct rx_bd *rxbd;
4346
4347 rxbd = rx_buf_ring[i];
4348 if (!rxbd)
4349 continue;
4350
4351 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4352 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4353 rxbd->rx_bd_opaque = prod;
4354 }
4355 }
4356 }
4357
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4358 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4359 struct bnxt_rx_ring_info *rxr,
4360 int ring_nr)
4361 {
4362 u32 prod;
4363 int i;
4364
4365 prod = rxr->rx_prod;
4366 for (i = 0; i < bp->rx_ring_size; i++) {
4367 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4368 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4369 ring_nr, i, bp->rx_ring_size);
4370 break;
4371 }
4372 prod = NEXT_RX(prod);
4373 }
4374 rxr->rx_prod = prod;
4375 }
4376
bnxt_alloc_one_rx_ring_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4377 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4378 struct bnxt_rx_ring_info *rxr,
4379 int ring_nr)
4380 {
4381 u32 prod;
4382 int i;
4383
4384 prod = rxr->rx_agg_prod;
4385 for (i = 0; i < bp->rx_agg_ring_size; i++) {
4386 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4387 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4388 ring_nr, i, bp->rx_ring_size);
4389 break;
4390 }
4391 prod = NEXT_RX_AGG(prod);
4392 }
4393 rxr->rx_agg_prod = prod;
4394 }
4395
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4396 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4397 struct bnxt_rx_ring_info *rxr)
4398 {
4399 dma_addr_t mapping;
4400 u8 *data;
4401 int i;
4402
4403 for (i = 0; i < bp->max_tpa; i++) {
4404 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4405 GFP_KERNEL);
4406 if (!data)
4407 return -ENOMEM;
4408
4409 rxr->rx_tpa[i].data = data;
4410 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4411 rxr->rx_tpa[i].mapping = mapping;
4412 }
4413
4414 return 0;
4415 }
4416
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4417 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4418 {
4419 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4420 int rc;
4421
4422 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4423
4424 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4425 return 0;
4426
4427 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4428
4429 if (rxr->rx_tpa) {
4430 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4431 if (rc)
4432 return rc;
4433 }
4434 return 0;
4435 }
4436
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4437 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4438 struct bnxt_rx_ring_info *rxr)
4439 {
4440 struct bnxt_ring_struct *ring;
4441 u32 type;
4442
4443 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4444 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4445
4446 if (NET_IP_ALIGN == 2)
4447 type |= RX_BD_FLAGS_SOP;
4448
4449 ring = &rxr->rx_ring_struct;
4450 bnxt_init_rxbd_pages(ring, type);
4451 ring->fw_ring_id = INVALID_HW_RING_ID;
4452 }
4453
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4454 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4455 struct bnxt_rx_ring_info *rxr)
4456 {
4457 struct bnxt_ring_struct *ring;
4458 u32 type;
4459
4460 ring = &rxr->rx_agg_ring_struct;
4461 ring->fw_ring_id = INVALID_HW_RING_ID;
4462 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4463 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4464 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4465
4466 bnxt_init_rxbd_pages(ring, type);
4467 }
4468 }
4469
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4470 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4471 {
4472 struct bnxt_rx_ring_info *rxr;
4473
4474 rxr = &bp->rx_ring[ring_nr];
4475 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4476
4477 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4478 &rxr->bnapi->napi);
4479
4480 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4481 bpf_prog_add(bp->xdp_prog, 1);
4482 rxr->xdp_prog = bp->xdp_prog;
4483 }
4484
4485 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4486
4487 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4488 }
4489
bnxt_init_cp_rings(struct bnxt * bp)4490 static void bnxt_init_cp_rings(struct bnxt *bp)
4491 {
4492 int i, j;
4493
4494 for (i = 0; i < bp->cp_nr_rings; i++) {
4495 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4496 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4497
4498 ring->fw_ring_id = INVALID_HW_RING_ID;
4499 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4500 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4501 if (!cpr->cp_ring_arr)
4502 continue;
4503 for (j = 0; j < cpr->cp_ring_count; j++) {
4504 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4505
4506 ring = &cpr2->cp_ring_struct;
4507 ring->fw_ring_id = INVALID_HW_RING_ID;
4508 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4509 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4510 }
4511 }
4512 }
4513
bnxt_init_rx_rings(struct bnxt * bp)4514 static int bnxt_init_rx_rings(struct bnxt *bp)
4515 {
4516 int i, rc = 0;
4517
4518 if (BNXT_RX_PAGE_MODE(bp)) {
4519 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4520 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4521 } else {
4522 bp->rx_offset = BNXT_RX_OFFSET;
4523 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4524 }
4525
4526 for (i = 0; i < bp->rx_nr_rings; i++) {
4527 rc = bnxt_init_one_rx_ring(bp, i);
4528 if (rc)
4529 break;
4530 }
4531
4532 return rc;
4533 }
4534
bnxt_init_tx_rings(struct bnxt * bp)4535 static int bnxt_init_tx_rings(struct bnxt *bp)
4536 {
4537 u16 i;
4538
4539 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4540 BNXT_MIN_TX_DESC_CNT);
4541
4542 for (i = 0; i < bp->tx_nr_rings; i++) {
4543 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4544 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4545
4546 ring->fw_ring_id = INVALID_HW_RING_ID;
4547
4548 if (i >= bp->tx_nr_rings_xdp)
4549 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4550 NETDEV_QUEUE_TYPE_TX,
4551 &txr->bnapi->napi);
4552 }
4553
4554 return 0;
4555 }
4556
bnxt_free_ring_grps(struct bnxt * bp)4557 static void bnxt_free_ring_grps(struct bnxt *bp)
4558 {
4559 kfree(bp->grp_info);
4560 bp->grp_info = NULL;
4561 }
4562
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4563 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4564 {
4565 int i;
4566
4567 if (irq_re_init) {
4568 bp->grp_info = kcalloc(bp->cp_nr_rings,
4569 sizeof(struct bnxt_ring_grp_info),
4570 GFP_KERNEL);
4571 if (!bp->grp_info)
4572 return -ENOMEM;
4573 }
4574 for (i = 0; i < bp->cp_nr_rings; i++) {
4575 if (irq_re_init)
4576 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4577 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4578 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4579 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4580 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4581 }
4582 return 0;
4583 }
4584
bnxt_free_vnics(struct bnxt * bp)4585 static void bnxt_free_vnics(struct bnxt *bp)
4586 {
4587 kfree(bp->vnic_info);
4588 bp->vnic_info = NULL;
4589 bp->nr_vnics = 0;
4590 }
4591
bnxt_alloc_vnics(struct bnxt * bp)4592 static int bnxt_alloc_vnics(struct bnxt *bp)
4593 {
4594 int num_vnics = 1;
4595
4596 #ifdef CONFIG_RFS_ACCEL
4597 if (bp->flags & BNXT_FLAG_RFS) {
4598 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4599 num_vnics++;
4600 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4601 num_vnics += bp->rx_nr_rings;
4602 }
4603 #endif
4604
4605 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4606 num_vnics++;
4607
4608 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4609 GFP_KERNEL);
4610 if (!bp->vnic_info)
4611 return -ENOMEM;
4612
4613 bp->nr_vnics = num_vnics;
4614 return 0;
4615 }
4616
bnxt_init_vnics(struct bnxt * bp)4617 static void bnxt_init_vnics(struct bnxt *bp)
4618 {
4619 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4620 int i;
4621
4622 for (i = 0; i < bp->nr_vnics; i++) {
4623 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4624 int j;
4625
4626 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4627 vnic->vnic_id = i;
4628 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4629 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4630
4631 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4632
4633 if (bp->vnic_info[i].rss_hash_key) {
4634 if (i == BNXT_VNIC_DEFAULT) {
4635 u8 *key = (void *)vnic->rss_hash_key;
4636 int k;
4637
4638 if (!bp->rss_hash_key_valid &&
4639 !bp->rss_hash_key_updated) {
4640 get_random_bytes(bp->rss_hash_key,
4641 HW_HASH_KEY_SIZE);
4642 bp->rss_hash_key_updated = true;
4643 }
4644
4645 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4646 HW_HASH_KEY_SIZE);
4647
4648 if (!bp->rss_hash_key_updated)
4649 continue;
4650
4651 bp->rss_hash_key_updated = false;
4652 bp->rss_hash_key_valid = true;
4653
4654 bp->toeplitz_prefix = 0;
4655 for (k = 0; k < 8; k++) {
4656 bp->toeplitz_prefix <<= 8;
4657 bp->toeplitz_prefix |= key[k];
4658 }
4659 } else {
4660 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4661 HW_HASH_KEY_SIZE);
4662 }
4663 }
4664 }
4665 }
4666
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4667 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4668 {
4669 int pages;
4670
4671 pages = ring_size / desc_per_pg;
4672
4673 if (!pages)
4674 return 1;
4675
4676 pages++;
4677
4678 while (pages & (pages - 1))
4679 pages++;
4680
4681 return pages;
4682 }
4683
bnxt_set_tpa_flags(struct bnxt * bp)4684 void bnxt_set_tpa_flags(struct bnxt *bp)
4685 {
4686 bp->flags &= ~BNXT_FLAG_TPA;
4687 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4688 return;
4689 if (bp->dev->features & NETIF_F_LRO)
4690 bp->flags |= BNXT_FLAG_LRO;
4691 else if (bp->dev->features & NETIF_F_GRO_HW)
4692 bp->flags |= BNXT_FLAG_GRO;
4693 }
4694
bnxt_init_ring_params(struct bnxt * bp)4695 static void bnxt_init_ring_params(struct bnxt *bp)
4696 {
4697 unsigned int rx_size;
4698
4699 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4700 /* Try to fit 4 chunks into a 4k page */
4701 rx_size = SZ_1K -
4702 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4703 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4704 }
4705
4706 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4707 * be set on entry.
4708 */
bnxt_set_ring_params(struct bnxt * bp)4709 void bnxt_set_ring_params(struct bnxt *bp)
4710 {
4711 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4712 u32 agg_factor = 0, agg_ring_size = 0;
4713
4714 /* 8 for CRC and VLAN */
4715 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4716
4717 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4718 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4719
4720 ring_size = bp->rx_ring_size;
4721 bp->rx_agg_ring_size = 0;
4722 bp->rx_agg_nr_pages = 0;
4723
4724 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4725 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4726
4727 bp->flags &= ~BNXT_FLAG_JUMBO;
4728 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4729 u32 jumbo_factor;
4730
4731 bp->flags |= BNXT_FLAG_JUMBO;
4732 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4733 if (jumbo_factor > agg_factor)
4734 agg_factor = jumbo_factor;
4735 }
4736 if (agg_factor) {
4737 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4738 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4739 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4740 bp->rx_ring_size, ring_size);
4741 bp->rx_ring_size = ring_size;
4742 }
4743 agg_ring_size = ring_size * agg_factor;
4744
4745 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4746 RX_DESC_CNT);
4747 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4748 u32 tmp = agg_ring_size;
4749
4750 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4751 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4752 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4753 tmp, agg_ring_size);
4754 }
4755 bp->rx_agg_ring_size = agg_ring_size;
4756 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4757
4758 if (BNXT_RX_PAGE_MODE(bp)) {
4759 rx_space = PAGE_SIZE;
4760 rx_size = PAGE_SIZE -
4761 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4762 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4763 } else {
4764 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4765 bp->rx_copybreak,
4766 bp->dev->cfg_pending->hds_thresh);
4767 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4768 rx_space = rx_size + NET_SKB_PAD +
4769 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4770 }
4771 }
4772
4773 bp->rx_buf_use_size = rx_size;
4774 bp->rx_buf_size = rx_space;
4775
4776 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4777 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4778
4779 ring_size = bp->tx_ring_size;
4780 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4781 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4782
4783 max_rx_cmpl = bp->rx_ring_size;
4784 /* MAX TPA needs to be added because TPA_START completions are
4785 * immediately recycled, so the TPA completions are not bound by
4786 * the RX ring size.
4787 */
4788 if (bp->flags & BNXT_FLAG_TPA)
4789 max_rx_cmpl += bp->max_tpa;
4790 /* RX and TPA completions are 32-byte, all others are 16-byte */
4791 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4792 bp->cp_ring_size = ring_size;
4793
4794 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4795 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4796 bp->cp_nr_pages = MAX_CP_PAGES;
4797 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4798 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4799 ring_size, bp->cp_ring_size);
4800 }
4801 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4802 bp->cp_ring_mask = bp->cp_bit - 1;
4803 }
4804
4805 /* Changing allocation mode of RX rings.
4806 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4807 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4808 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4809 {
4810 struct net_device *dev = bp->dev;
4811
4812 if (page_mode) {
4813 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4814 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4815
4816 if (bp->xdp_prog->aux->xdp_has_frags)
4817 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4818 else
4819 dev->max_mtu =
4820 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4821 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4822 bp->flags |= BNXT_FLAG_JUMBO;
4823 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4824 } else {
4825 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4826 bp->rx_skb_func = bnxt_rx_page_skb;
4827 }
4828 bp->rx_dir = DMA_BIDIRECTIONAL;
4829 } else {
4830 dev->max_mtu = bp->max_mtu;
4831 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4832 bp->rx_dir = DMA_FROM_DEVICE;
4833 bp->rx_skb_func = bnxt_rx_skb;
4834 }
4835 }
4836
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4837 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4838 {
4839 __bnxt_set_rx_skb_mode(bp, page_mode);
4840
4841 if (!page_mode) {
4842 int rx, tx;
4843
4844 bnxt_get_max_rings(bp, &rx, &tx, true);
4845 if (rx > 1) {
4846 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4847 bp->dev->hw_features |= NETIF_F_LRO;
4848 }
4849 }
4850
4851 /* Update LRO and GRO_HW availability */
4852 netdev_update_features(bp->dev);
4853 }
4854
bnxt_free_vnic_attributes(struct bnxt * bp)4855 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4856 {
4857 int i;
4858 struct bnxt_vnic_info *vnic;
4859 struct pci_dev *pdev = bp->pdev;
4860
4861 if (!bp->vnic_info)
4862 return;
4863
4864 for (i = 0; i < bp->nr_vnics; i++) {
4865 vnic = &bp->vnic_info[i];
4866
4867 kfree(vnic->fw_grp_ids);
4868 vnic->fw_grp_ids = NULL;
4869
4870 kfree(vnic->uc_list);
4871 vnic->uc_list = NULL;
4872
4873 if (vnic->mc_list) {
4874 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4875 vnic->mc_list, vnic->mc_list_mapping);
4876 vnic->mc_list = NULL;
4877 }
4878
4879 if (vnic->rss_table) {
4880 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4881 vnic->rss_table,
4882 vnic->rss_table_dma_addr);
4883 vnic->rss_table = NULL;
4884 }
4885
4886 vnic->rss_hash_key = NULL;
4887 vnic->flags = 0;
4888 }
4889 }
4890
bnxt_alloc_vnic_attributes(struct bnxt * bp)4891 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4892 {
4893 int i, rc = 0, size;
4894 struct bnxt_vnic_info *vnic;
4895 struct pci_dev *pdev = bp->pdev;
4896 int max_rings;
4897
4898 for (i = 0; i < bp->nr_vnics; i++) {
4899 vnic = &bp->vnic_info[i];
4900
4901 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4902 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4903
4904 if (mem_size > 0) {
4905 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4906 if (!vnic->uc_list) {
4907 rc = -ENOMEM;
4908 goto out;
4909 }
4910 }
4911 }
4912
4913 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4914 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4915 vnic->mc_list =
4916 dma_alloc_coherent(&pdev->dev,
4917 vnic->mc_list_size,
4918 &vnic->mc_list_mapping,
4919 GFP_KERNEL);
4920 if (!vnic->mc_list) {
4921 rc = -ENOMEM;
4922 goto out;
4923 }
4924 }
4925
4926 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4927 goto vnic_skip_grps;
4928
4929 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4930 max_rings = bp->rx_nr_rings;
4931 else
4932 max_rings = 1;
4933
4934 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4935 if (!vnic->fw_grp_ids) {
4936 rc = -ENOMEM;
4937 goto out;
4938 }
4939 vnic_skip_grps:
4940 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4941 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4942 continue;
4943
4944 /* Allocate rss table and hash key */
4945 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4946 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4947 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4948
4949 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4950 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4951 vnic->rss_table_size,
4952 &vnic->rss_table_dma_addr,
4953 GFP_KERNEL);
4954 if (!vnic->rss_table) {
4955 rc = -ENOMEM;
4956 goto out;
4957 }
4958
4959 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4960 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4961 }
4962 return 0;
4963
4964 out:
4965 return rc;
4966 }
4967
bnxt_free_hwrm_resources(struct bnxt * bp)4968 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4969 {
4970 struct bnxt_hwrm_wait_token *token;
4971
4972 dma_pool_destroy(bp->hwrm_dma_pool);
4973 bp->hwrm_dma_pool = NULL;
4974
4975 rcu_read_lock();
4976 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4977 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4978 rcu_read_unlock();
4979 }
4980
bnxt_alloc_hwrm_resources(struct bnxt * bp)4981 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4982 {
4983 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4984 BNXT_HWRM_DMA_SIZE,
4985 BNXT_HWRM_DMA_ALIGN, 0);
4986 if (!bp->hwrm_dma_pool)
4987 return -ENOMEM;
4988
4989 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4990
4991 return 0;
4992 }
4993
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)4994 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4995 {
4996 kfree(stats->hw_masks);
4997 stats->hw_masks = NULL;
4998 kfree(stats->sw_stats);
4999 stats->sw_stats = NULL;
5000 if (stats->hw_stats) {
5001 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5002 stats->hw_stats_map);
5003 stats->hw_stats = NULL;
5004 }
5005 }
5006
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)5007 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5008 bool alloc_masks)
5009 {
5010 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5011 &stats->hw_stats_map, GFP_KERNEL);
5012 if (!stats->hw_stats)
5013 return -ENOMEM;
5014
5015 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5016 if (!stats->sw_stats)
5017 goto stats_mem_err;
5018
5019 if (alloc_masks) {
5020 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5021 if (!stats->hw_masks)
5022 goto stats_mem_err;
5023 }
5024 return 0;
5025
5026 stats_mem_err:
5027 bnxt_free_stats_mem(bp, stats);
5028 return -ENOMEM;
5029 }
5030
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)5031 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5032 {
5033 int i;
5034
5035 for (i = 0; i < count; i++)
5036 mask_arr[i] = mask;
5037 }
5038
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)5039 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5040 {
5041 int i;
5042
5043 for (i = 0; i < count; i++)
5044 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5045 }
5046
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)5047 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5048 struct bnxt_stats_mem *stats)
5049 {
5050 struct hwrm_func_qstats_ext_output *resp;
5051 struct hwrm_func_qstats_ext_input *req;
5052 __le64 *hw_masks;
5053 int rc;
5054
5055 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5056 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5057 return -EOPNOTSUPP;
5058
5059 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5060 if (rc)
5061 return rc;
5062
5063 req->fid = cpu_to_le16(0xffff);
5064 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5065
5066 resp = hwrm_req_hold(bp, req);
5067 rc = hwrm_req_send(bp, req);
5068 if (!rc) {
5069 hw_masks = &resp->rx_ucast_pkts;
5070 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5071 }
5072 hwrm_req_drop(bp, req);
5073 return rc;
5074 }
5075
5076 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5077 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5078
bnxt_init_stats(struct bnxt * bp)5079 static void bnxt_init_stats(struct bnxt *bp)
5080 {
5081 struct bnxt_napi *bnapi = bp->bnapi[0];
5082 struct bnxt_cp_ring_info *cpr;
5083 struct bnxt_stats_mem *stats;
5084 __le64 *rx_stats, *tx_stats;
5085 int rc, rx_count, tx_count;
5086 u64 *rx_masks, *tx_masks;
5087 u64 mask;
5088 u8 flags;
5089
5090 cpr = &bnapi->cp_ring;
5091 stats = &cpr->stats;
5092 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5093 if (rc) {
5094 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5095 mask = (1ULL << 48) - 1;
5096 else
5097 mask = -1ULL;
5098 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5099 }
5100 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5101 stats = &bp->port_stats;
5102 rx_stats = stats->hw_stats;
5103 rx_masks = stats->hw_masks;
5104 rx_count = sizeof(struct rx_port_stats) / 8;
5105 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5106 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5107 tx_count = sizeof(struct tx_port_stats) / 8;
5108
5109 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5110 rc = bnxt_hwrm_port_qstats(bp, flags);
5111 if (rc) {
5112 mask = (1ULL << 40) - 1;
5113
5114 bnxt_fill_masks(rx_masks, mask, rx_count);
5115 bnxt_fill_masks(tx_masks, mask, tx_count);
5116 } else {
5117 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5118 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5119 bnxt_hwrm_port_qstats(bp, 0);
5120 }
5121 }
5122 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5123 stats = &bp->rx_port_stats_ext;
5124 rx_stats = stats->hw_stats;
5125 rx_masks = stats->hw_masks;
5126 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5127 stats = &bp->tx_port_stats_ext;
5128 tx_stats = stats->hw_stats;
5129 tx_masks = stats->hw_masks;
5130 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5131
5132 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5133 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5134 if (rc) {
5135 mask = (1ULL << 40) - 1;
5136
5137 bnxt_fill_masks(rx_masks, mask, rx_count);
5138 if (tx_stats)
5139 bnxt_fill_masks(tx_masks, mask, tx_count);
5140 } else {
5141 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5142 if (tx_stats)
5143 bnxt_copy_hw_masks(tx_masks, tx_stats,
5144 tx_count);
5145 bnxt_hwrm_port_qstats_ext(bp, 0);
5146 }
5147 }
5148 }
5149
bnxt_free_port_stats(struct bnxt * bp)5150 static void bnxt_free_port_stats(struct bnxt *bp)
5151 {
5152 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5153 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5154
5155 bnxt_free_stats_mem(bp, &bp->port_stats);
5156 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5157 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5158 }
5159
bnxt_free_ring_stats(struct bnxt * bp)5160 static void bnxt_free_ring_stats(struct bnxt *bp)
5161 {
5162 int i;
5163
5164 if (!bp->bnapi)
5165 return;
5166
5167 for (i = 0; i < bp->cp_nr_rings; i++) {
5168 struct bnxt_napi *bnapi = bp->bnapi[i];
5169 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5170
5171 bnxt_free_stats_mem(bp, &cpr->stats);
5172
5173 kfree(cpr->sw_stats);
5174 cpr->sw_stats = NULL;
5175 }
5176 }
5177
bnxt_alloc_stats(struct bnxt * bp)5178 static int bnxt_alloc_stats(struct bnxt *bp)
5179 {
5180 u32 size, i;
5181 int rc;
5182
5183 size = bp->hw_ring_stats_size;
5184
5185 for (i = 0; i < bp->cp_nr_rings; i++) {
5186 struct bnxt_napi *bnapi = bp->bnapi[i];
5187 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5188
5189 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
5190 if (!cpr->sw_stats)
5191 return -ENOMEM;
5192
5193 cpr->stats.len = size;
5194 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5195 if (rc)
5196 return rc;
5197
5198 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5199 }
5200
5201 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5202 return 0;
5203
5204 if (bp->port_stats.hw_stats)
5205 goto alloc_ext_stats;
5206
5207 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5208 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5209 if (rc)
5210 return rc;
5211
5212 bp->flags |= BNXT_FLAG_PORT_STATS;
5213
5214 alloc_ext_stats:
5215 /* Display extended statistics only if FW supports it */
5216 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5217 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5218 return 0;
5219
5220 if (bp->rx_port_stats_ext.hw_stats)
5221 goto alloc_tx_ext_stats;
5222
5223 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5224 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5225 /* Extended stats are optional */
5226 if (rc)
5227 return 0;
5228
5229 alloc_tx_ext_stats:
5230 if (bp->tx_port_stats_ext.hw_stats)
5231 return 0;
5232
5233 if (bp->hwrm_spec_code >= 0x10902 ||
5234 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5235 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5236 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5237 /* Extended stats are optional */
5238 if (rc)
5239 return 0;
5240 }
5241 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5242 return 0;
5243 }
5244
bnxt_clear_ring_indices(struct bnxt * bp)5245 static void bnxt_clear_ring_indices(struct bnxt *bp)
5246 {
5247 int i, j;
5248
5249 if (!bp->bnapi)
5250 return;
5251
5252 for (i = 0; i < bp->cp_nr_rings; i++) {
5253 struct bnxt_napi *bnapi = bp->bnapi[i];
5254 struct bnxt_cp_ring_info *cpr;
5255 struct bnxt_rx_ring_info *rxr;
5256 struct bnxt_tx_ring_info *txr;
5257
5258 if (!bnapi)
5259 continue;
5260
5261 cpr = &bnapi->cp_ring;
5262 cpr->cp_raw_cons = 0;
5263
5264 bnxt_for_each_napi_tx(j, bnapi, txr) {
5265 txr->tx_prod = 0;
5266 txr->tx_cons = 0;
5267 txr->tx_hw_cons = 0;
5268 }
5269
5270 rxr = bnapi->rx_ring;
5271 if (rxr) {
5272 rxr->rx_prod = 0;
5273 rxr->rx_agg_prod = 0;
5274 rxr->rx_sw_agg_prod = 0;
5275 rxr->rx_next_cons = 0;
5276 }
5277 bnapi->events = 0;
5278 }
5279 }
5280
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5281 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5282 {
5283 u8 type = fltr->type, flags = fltr->flags;
5284
5285 INIT_LIST_HEAD(&fltr->list);
5286 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5287 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5288 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5289 }
5290
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5291 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5292 {
5293 if (!list_empty(&fltr->list))
5294 list_del_init(&fltr->list);
5295 }
5296
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5297 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5298 {
5299 struct bnxt_filter_base *usr_fltr, *tmp;
5300
5301 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5302 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5303 continue;
5304 bnxt_del_one_usr_fltr(bp, usr_fltr);
5305 }
5306 }
5307
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5308 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5309 {
5310 hlist_del(&fltr->hash);
5311 bnxt_del_one_usr_fltr(bp, fltr);
5312 if (fltr->flags) {
5313 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5314 bp->ntp_fltr_count--;
5315 }
5316 kfree(fltr);
5317 }
5318
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5319 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5320 {
5321 int i;
5322
5323 netdev_assert_locked(bp->dev);
5324
5325 /* Under netdev instance lock and all our NAPIs have been disabled.
5326 * It's safe to delete the hash table.
5327 */
5328 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5329 struct hlist_head *head;
5330 struct hlist_node *tmp;
5331 struct bnxt_ntuple_filter *fltr;
5332
5333 head = &bp->ntp_fltr_hash_tbl[i];
5334 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5335 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5336 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5337 !list_empty(&fltr->base.list)))
5338 continue;
5339 bnxt_del_fltr(bp, &fltr->base);
5340 }
5341 }
5342 if (!all)
5343 return;
5344
5345 bitmap_free(bp->ntp_fltr_bmap);
5346 bp->ntp_fltr_bmap = NULL;
5347 bp->ntp_fltr_count = 0;
5348 }
5349
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5350 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5351 {
5352 int i, rc = 0;
5353
5354 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5355 return 0;
5356
5357 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5358 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5359
5360 bp->ntp_fltr_count = 0;
5361 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5362
5363 if (!bp->ntp_fltr_bmap)
5364 rc = -ENOMEM;
5365
5366 return rc;
5367 }
5368
bnxt_free_l2_filters(struct bnxt * bp,bool all)5369 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5370 {
5371 int i;
5372
5373 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5374 struct hlist_head *head;
5375 struct hlist_node *tmp;
5376 struct bnxt_l2_filter *fltr;
5377
5378 head = &bp->l2_fltr_hash_tbl[i];
5379 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5380 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5381 !list_empty(&fltr->base.list)))
5382 continue;
5383 bnxt_del_fltr(bp, &fltr->base);
5384 }
5385 }
5386 }
5387
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5388 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5389 {
5390 int i;
5391
5392 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5393 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5394 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5395 }
5396
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5397 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5398 {
5399 bnxt_free_vnic_attributes(bp);
5400 bnxt_free_tx_rings(bp);
5401 bnxt_free_rx_rings(bp);
5402 bnxt_free_cp_rings(bp);
5403 bnxt_free_all_cp_arrays(bp);
5404 bnxt_free_ntp_fltrs(bp, false);
5405 bnxt_free_l2_filters(bp, false);
5406 if (irq_re_init) {
5407 bnxt_free_ring_stats(bp);
5408 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5409 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5410 bnxt_free_port_stats(bp);
5411 bnxt_free_ring_grps(bp);
5412 bnxt_free_vnics(bp);
5413 kfree(bp->tx_ring_map);
5414 bp->tx_ring_map = NULL;
5415 kfree(bp->tx_ring);
5416 bp->tx_ring = NULL;
5417 kfree(bp->rx_ring);
5418 bp->rx_ring = NULL;
5419 kfree(bp->bnapi);
5420 bp->bnapi = NULL;
5421 } else {
5422 bnxt_clear_ring_indices(bp);
5423 }
5424 }
5425
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5426 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5427 {
5428 int i, j, rc, size, arr_size;
5429 void *bnapi;
5430
5431 if (irq_re_init) {
5432 /* Allocate bnapi mem pointer array and mem block for
5433 * all queues
5434 */
5435 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5436 bp->cp_nr_rings);
5437 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5438 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5439 if (!bnapi)
5440 return -ENOMEM;
5441
5442 bp->bnapi = bnapi;
5443 bnapi += arr_size;
5444 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5445 bp->bnapi[i] = bnapi;
5446 bp->bnapi[i]->index = i;
5447 bp->bnapi[i]->bp = bp;
5448 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5449 struct bnxt_cp_ring_info *cpr =
5450 &bp->bnapi[i]->cp_ring;
5451
5452 cpr->cp_ring_struct.ring_mem.flags =
5453 BNXT_RMEM_RING_PTE_FLAG;
5454 }
5455 }
5456
5457 bp->rx_ring = kcalloc(bp->rx_nr_rings,
5458 sizeof(struct bnxt_rx_ring_info),
5459 GFP_KERNEL);
5460 if (!bp->rx_ring)
5461 return -ENOMEM;
5462
5463 for (i = 0; i < bp->rx_nr_rings; i++) {
5464 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5465
5466 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5467 rxr->rx_ring_struct.ring_mem.flags =
5468 BNXT_RMEM_RING_PTE_FLAG;
5469 rxr->rx_agg_ring_struct.ring_mem.flags =
5470 BNXT_RMEM_RING_PTE_FLAG;
5471 } else {
5472 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5473 }
5474 rxr->bnapi = bp->bnapi[i];
5475 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5476 }
5477
5478 bp->tx_ring = kcalloc(bp->tx_nr_rings,
5479 sizeof(struct bnxt_tx_ring_info),
5480 GFP_KERNEL);
5481 if (!bp->tx_ring)
5482 return -ENOMEM;
5483
5484 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5485 GFP_KERNEL);
5486
5487 if (!bp->tx_ring_map)
5488 return -ENOMEM;
5489
5490 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5491 j = 0;
5492 else
5493 j = bp->rx_nr_rings;
5494
5495 for (i = 0; i < bp->tx_nr_rings; i++) {
5496 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5497 struct bnxt_napi *bnapi2;
5498
5499 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5500 txr->tx_ring_struct.ring_mem.flags =
5501 BNXT_RMEM_RING_PTE_FLAG;
5502 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5503 if (i >= bp->tx_nr_rings_xdp) {
5504 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5505
5506 bnapi2 = bp->bnapi[k];
5507 txr->txq_index = i - bp->tx_nr_rings_xdp;
5508 txr->tx_napi_idx =
5509 BNXT_RING_TO_TC(bp, txr->txq_index);
5510 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5511 bnapi2->tx_int = bnxt_tx_int;
5512 } else {
5513 bnapi2 = bp->bnapi[j];
5514 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5515 bnapi2->tx_ring[0] = txr;
5516 bnapi2->tx_int = bnxt_tx_int_xdp;
5517 j++;
5518 }
5519 txr->bnapi = bnapi2;
5520 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5521 txr->tx_cpr = &bnapi2->cp_ring;
5522 }
5523
5524 rc = bnxt_alloc_stats(bp);
5525 if (rc)
5526 goto alloc_mem_err;
5527 bnxt_init_stats(bp);
5528
5529 rc = bnxt_alloc_ntp_fltrs(bp);
5530 if (rc)
5531 goto alloc_mem_err;
5532
5533 rc = bnxt_alloc_vnics(bp);
5534 if (rc)
5535 goto alloc_mem_err;
5536 }
5537
5538 rc = bnxt_alloc_all_cp_arrays(bp);
5539 if (rc)
5540 goto alloc_mem_err;
5541
5542 bnxt_init_ring_struct(bp);
5543
5544 rc = bnxt_alloc_rx_rings(bp);
5545 if (rc)
5546 goto alloc_mem_err;
5547
5548 rc = bnxt_alloc_tx_rings(bp);
5549 if (rc)
5550 goto alloc_mem_err;
5551
5552 rc = bnxt_alloc_cp_rings(bp);
5553 if (rc)
5554 goto alloc_mem_err;
5555
5556 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5557 BNXT_VNIC_MCAST_FLAG |
5558 BNXT_VNIC_UCAST_FLAG;
5559 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5560 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5561 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5562
5563 rc = bnxt_alloc_vnic_attributes(bp);
5564 if (rc)
5565 goto alloc_mem_err;
5566 return 0;
5567
5568 alloc_mem_err:
5569 bnxt_free_mem(bp, true);
5570 return rc;
5571 }
5572
bnxt_disable_int(struct bnxt * bp)5573 static void bnxt_disable_int(struct bnxt *bp)
5574 {
5575 int i;
5576
5577 if (!bp->bnapi)
5578 return;
5579
5580 for (i = 0; i < bp->cp_nr_rings; i++) {
5581 struct bnxt_napi *bnapi = bp->bnapi[i];
5582 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5583 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5584
5585 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5586 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5587 }
5588 }
5589
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5590 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5591 {
5592 struct bnxt_napi *bnapi = bp->bnapi[n];
5593 struct bnxt_cp_ring_info *cpr;
5594
5595 cpr = &bnapi->cp_ring;
5596 return cpr->cp_ring_struct.map_idx;
5597 }
5598
bnxt_disable_int_sync(struct bnxt * bp)5599 static void bnxt_disable_int_sync(struct bnxt *bp)
5600 {
5601 int i;
5602
5603 if (!bp->irq_tbl)
5604 return;
5605
5606 atomic_inc(&bp->intr_sem);
5607
5608 bnxt_disable_int(bp);
5609 for (i = 0; i < bp->cp_nr_rings; i++) {
5610 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5611
5612 synchronize_irq(bp->irq_tbl[map_idx].vector);
5613 }
5614 }
5615
bnxt_enable_int(struct bnxt * bp)5616 static void bnxt_enable_int(struct bnxt *bp)
5617 {
5618 int i;
5619
5620 atomic_set(&bp->intr_sem, 0);
5621 for (i = 0; i < bp->cp_nr_rings; i++) {
5622 struct bnxt_napi *bnapi = bp->bnapi[i];
5623 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5624
5625 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5626 }
5627 }
5628
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5629 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5630 bool async_only)
5631 {
5632 DECLARE_BITMAP(async_events_bmap, 256);
5633 u32 *events = (u32 *)async_events_bmap;
5634 struct hwrm_func_drv_rgtr_output *resp;
5635 struct hwrm_func_drv_rgtr_input *req;
5636 u32 flags;
5637 int rc, i;
5638
5639 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5640 if (rc)
5641 return rc;
5642
5643 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5644 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5645 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5646
5647 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5648 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5649 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5650 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5651 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5652 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5653 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5654 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5655 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5656 req->flags = cpu_to_le32(flags);
5657 req->ver_maj_8b = DRV_VER_MAJ;
5658 req->ver_min_8b = DRV_VER_MIN;
5659 req->ver_upd_8b = DRV_VER_UPD;
5660 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5661 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5662 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5663
5664 if (BNXT_PF(bp)) {
5665 u32 data[8];
5666 int i;
5667
5668 memset(data, 0, sizeof(data));
5669 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5670 u16 cmd = bnxt_vf_req_snif[i];
5671 unsigned int bit, idx;
5672
5673 idx = cmd / 32;
5674 bit = cmd % 32;
5675 data[idx] |= 1 << bit;
5676 }
5677
5678 for (i = 0; i < 8; i++)
5679 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5680
5681 req->enables |=
5682 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5683 }
5684
5685 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5686 req->flags |= cpu_to_le32(
5687 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5688
5689 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5690 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5691 u16 event_id = bnxt_async_events_arr[i];
5692
5693 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5694 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5695 continue;
5696 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5697 !bp->ptp_cfg)
5698 continue;
5699 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5700 }
5701 if (bmap && bmap_size) {
5702 for (i = 0; i < bmap_size; i++) {
5703 if (test_bit(i, bmap))
5704 __set_bit(i, async_events_bmap);
5705 }
5706 }
5707 for (i = 0; i < 8; i++)
5708 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5709
5710 if (async_only)
5711 req->enables =
5712 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5713
5714 resp = hwrm_req_hold(bp, req);
5715 rc = hwrm_req_send(bp, req);
5716 if (!rc) {
5717 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5718 if (resp->flags &
5719 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5720 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5721 }
5722 hwrm_req_drop(bp, req);
5723 return rc;
5724 }
5725
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5726 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5727 {
5728 struct hwrm_func_drv_unrgtr_input *req;
5729 int rc;
5730
5731 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5732 return 0;
5733
5734 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5735 if (rc)
5736 return rc;
5737 return hwrm_req_send(bp, req);
5738 }
5739
5740 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5741
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5742 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5743 {
5744 struct hwrm_tunnel_dst_port_free_input *req;
5745 int rc;
5746
5747 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5748 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5749 return 0;
5750 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5751 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5752 return 0;
5753
5754 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5755 if (rc)
5756 return rc;
5757
5758 req->tunnel_type = tunnel_type;
5759
5760 switch (tunnel_type) {
5761 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5762 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5763 bp->vxlan_port = 0;
5764 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5765 break;
5766 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5767 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5768 bp->nge_port = 0;
5769 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5770 break;
5771 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5772 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5773 bp->vxlan_gpe_port = 0;
5774 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5775 break;
5776 default:
5777 break;
5778 }
5779
5780 rc = hwrm_req_send(bp, req);
5781 if (rc)
5782 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5783 rc);
5784 if (bp->flags & BNXT_FLAG_TPA)
5785 bnxt_set_tpa(bp, true);
5786 return rc;
5787 }
5788
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5789 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5790 u8 tunnel_type)
5791 {
5792 struct hwrm_tunnel_dst_port_alloc_output *resp;
5793 struct hwrm_tunnel_dst_port_alloc_input *req;
5794 int rc;
5795
5796 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5797 if (rc)
5798 return rc;
5799
5800 req->tunnel_type = tunnel_type;
5801 req->tunnel_dst_port_val = port;
5802
5803 resp = hwrm_req_hold(bp, req);
5804 rc = hwrm_req_send(bp, req);
5805 if (rc) {
5806 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5807 rc);
5808 goto err_out;
5809 }
5810
5811 switch (tunnel_type) {
5812 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5813 bp->vxlan_port = port;
5814 bp->vxlan_fw_dst_port_id =
5815 le16_to_cpu(resp->tunnel_dst_port_id);
5816 break;
5817 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5818 bp->nge_port = port;
5819 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5820 break;
5821 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5822 bp->vxlan_gpe_port = port;
5823 bp->vxlan_gpe_fw_dst_port_id =
5824 le16_to_cpu(resp->tunnel_dst_port_id);
5825 break;
5826 default:
5827 break;
5828 }
5829 if (bp->flags & BNXT_FLAG_TPA)
5830 bnxt_set_tpa(bp, true);
5831
5832 err_out:
5833 hwrm_req_drop(bp, req);
5834 return rc;
5835 }
5836
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5837 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5838 {
5839 struct hwrm_cfa_l2_set_rx_mask_input *req;
5840 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5841 int rc;
5842
5843 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5844 if (rc)
5845 return rc;
5846
5847 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5848 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5849 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5850 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5851 }
5852 req->mask = cpu_to_le32(vnic->rx_mask);
5853 return hwrm_req_send_silent(bp, req);
5854 }
5855
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)5856 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5857 {
5858 if (!atomic_dec_and_test(&fltr->refcnt))
5859 return;
5860 spin_lock_bh(&bp->ntp_fltr_lock);
5861 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5862 spin_unlock_bh(&bp->ntp_fltr_lock);
5863 return;
5864 }
5865 hlist_del_rcu(&fltr->base.hash);
5866 bnxt_del_one_usr_fltr(bp, &fltr->base);
5867 if (fltr->base.flags) {
5868 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5869 bp->ntp_fltr_count--;
5870 }
5871 spin_unlock_bh(&bp->ntp_fltr_lock);
5872 kfree_rcu(fltr, base.rcu);
5873 }
5874
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5875 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5876 struct bnxt_l2_key *key,
5877 u32 idx)
5878 {
5879 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5880 struct bnxt_l2_filter *fltr;
5881
5882 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5883 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5884
5885 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5886 l2_key->vlan == key->vlan)
5887 return fltr;
5888 }
5889 return NULL;
5890 }
5891
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5892 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5893 struct bnxt_l2_key *key,
5894 u32 idx)
5895 {
5896 struct bnxt_l2_filter *fltr = NULL;
5897
5898 rcu_read_lock();
5899 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5900 if (fltr)
5901 atomic_inc(&fltr->refcnt);
5902 rcu_read_unlock();
5903 return fltr;
5904 }
5905
5906 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5907 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5908 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5909 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5910 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5911
5912 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5913 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5914 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5915 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5916 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5917
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)5918 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5919 {
5920 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5921 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5922 return sizeof(fkeys->addrs.v4addrs) +
5923 sizeof(fkeys->ports);
5924
5925 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5926 return sizeof(fkeys->addrs.v4addrs);
5927 }
5928
5929 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5930 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5931 return sizeof(fkeys->addrs.v6addrs) +
5932 sizeof(fkeys->ports);
5933
5934 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5935 return sizeof(fkeys->addrs.v6addrs);
5936 }
5937
5938 return 0;
5939 }
5940
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)5941 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5942 const unsigned char *key)
5943 {
5944 u64 prefix = bp->toeplitz_prefix, hash = 0;
5945 struct bnxt_ipv4_tuple tuple4;
5946 struct bnxt_ipv6_tuple tuple6;
5947 int i, j, len = 0;
5948 u8 *four_tuple;
5949
5950 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5951 if (!len)
5952 return 0;
5953
5954 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5955 tuple4.v4addrs = fkeys->addrs.v4addrs;
5956 tuple4.ports = fkeys->ports;
5957 four_tuple = (unsigned char *)&tuple4;
5958 } else {
5959 tuple6.v6addrs = fkeys->addrs.v6addrs;
5960 tuple6.ports = fkeys->ports;
5961 four_tuple = (unsigned char *)&tuple6;
5962 }
5963
5964 for (i = 0, j = 8; i < len; i++, j++) {
5965 u8 byte = four_tuple[i];
5966 int bit;
5967
5968 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5969 if (byte & 0x80)
5970 hash ^= prefix;
5971 }
5972 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5973 }
5974
5975 /* The valid part of the hash is in the upper 32 bits. */
5976 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5977 }
5978
5979 #ifdef CONFIG_RFS_ACCEL
5980 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)5981 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5982 {
5983 struct bnxt_l2_filter *fltr;
5984 u32 idx;
5985
5986 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5987 BNXT_L2_FLTR_HASH_MASK;
5988 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5989 return fltr;
5990 }
5991 #endif
5992
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)5993 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5994 struct bnxt_l2_key *key, u32 idx)
5995 {
5996 struct hlist_head *head;
5997
5998 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
5999 fltr->l2_key.vlan = key->vlan;
6000 fltr->base.type = BNXT_FLTR_TYPE_L2;
6001 if (fltr->base.flags) {
6002 int bit_id;
6003
6004 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6005 bp->max_fltr, 0);
6006 if (bit_id < 0)
6007 return -ENOMEM;
6008 fltr->base.sw_id = (u16)bit_id;
6009 bp->ntp_fltr_count++;
6010 }
6011 head = &bp->l2_fltr_hash_tbl[idx];
6012 hlist_add_head_rcu(&fltr->base.hash, head);
6013 bnxt_insert_usr_fltr(bp, &fltr->base);
6014 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6015 atomic_set(&fltr->refcnt, 1);
6016 return 0;
6017 }
6018
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)6019 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6020 struct bnxt_l2_key *key,
6021 gfp_t gfp)
6022 {
6023 struct bnxt_l2_filter *fltr;
6024 u32 idx;
6025 int rc;
6026
6027 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6028 BNXT_L2_FLTR_HASH_MASK;
6029 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6030 if (fltr)
6031 return fltr;
6032
6033 fltr = kzalloc(sizeof(*fltr), gfp);
6034 if (!fltr)
6035 return ERR_PTR(-ENOMEM);
6036 spin_lock_bh(&bp->ntp_fltr_lock);
6037 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6038 spin_unlock_bh(&bp->ntp_fltr_lock);
6039 if (rc) {
6040 bnxt_del_l2_filter(bp, fltr);
6041 fltr = ERR_PTR(rc);
6042 }
6043 return fltr;
6044 }
6045
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)6046 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6047 struct bnxt_l2_key *key,
6048 u16 flags)
6049 {
6050 struct bnxt_l2_filter *fltr;
6051 u32 idx;
6052 int rc;
6053
6054 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6055 BNXT_L2_FLTR_HASH_MASK;
6056 spin_lock_bh(&bp->ntp_fltr_lock);
6057 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6058 if (fltr) {
6059 fltr = ERR_PTR(-EEXIST);
6060 goto l2_filter_exit;
6061 }
6062 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
6063 if (!fltr) {
6064 fltr = ERR_PTR(-ENOMEM);
6065 goto l2_filter_exit;
6066 }
6067 fltr->base.flags = flags;
6068 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6069 if (rc) {
6070 spin_unlock_bh(&bp->ntp_fltr_lock);
6071 bnxt_del_l2_filter(bp, fltr);
6072 return ERR_PTR(rc);
6073 }
6074
6075 l2_filter_exit:
6076 spin_unlock_bh(&bp->ntp_fltr_lock);
6077 return fltr;
6078 }
6079
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6080 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6081 {
6082 #ifdef CONFIG_BNXT_SRIOV
6083 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6084
6085 return vf->fw_fid;
6086 #else
6087 return INVALID_HW_RING_ID;
6088 #endif
6089 }
6090
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6091 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6092 {
6093 struct hwrm_cfa_l2_filter_free_input *req;
6094 u16 target_id = 0xffff;
6095 int rc;
6096
6097 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6098 struct bnxt_pf_info *pf = &bp->pf;
6099
6100 if (fltr->base.vf_idx >= pf->active_vfs)
6101 return -EINVAL;
6102
6103 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6104 if (target_id == INVALID_HW_RING_ID)
6105 return -EINVAL;
6106 }
6107
6108 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6109 if (rc)
6110 return rc;
6111
6112 req->target_id = cpu_to_le16(target_id);
6113 req->l2_filter_id = fltr->base.filter_id;
6114 return hwrm_req_send(bp, req);
6115 }
6116
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6117 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6118 {
6119 struct hwrm_cfa_l2_filter_alloc_output *resp;
6120 struct hwrm_cfa_l2_filter_alloc_input *req;
6121 u16 target_id = 0xffff;
6122 int rc;
6123
6124 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6125 struct bnxt_pf_info *pf = &bp->pf;
6126
6127 if (fltr->base.vf_idx >= pf->active_vfs)
6128 return -EINVAL;
6129
6130 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6131 }
6132 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6133 if (rc)
6134 return rc;
6135
6136 req->target_id = cpu_to_le16(target_id);
6137 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6138
6139 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6140 req->flags |=
6141 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6142 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6143 req->enables =
6144 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6145 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6146 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6147 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6148 eth_broadcast_addr(req->l2_addr_mask);
6149
6150 if (fltr->l2_key.vlan) {
6151 req->enables |=
6152 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6153 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6154 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6155 req->num_vlans = 1;
6156 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6157 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6158 }
6159
6160 resp = hwrm_req_hold(bp, req);
6161 rc = hwrm_req_send(bp, req);
6162 if (!rc) {
6163 fltr->base.filter_id = resp->l2_filter_id;
6164 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6165 }
6166 hwrm_req_drop(bp, req);
6167 return rc;
6168 }
6169
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6170 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6171 struct bnxt_ntuple_filter *fltr)
6172 {
6173 struct hwrm_cfa_ntuple_filter_free_input *req;
6174 int rc;
6175
6176 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6177 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6178 if (rc)
6179 return rc;
6180
6181 req->ntuple_filter_id = fltr->base.filter_id;
6182 return hwrm_req_send(bp, req);
6183 }
6184
6185 #define BNXT_NTP_FLTR_FLAGS \
6186 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6187 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6188 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6189 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6190 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6191 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6192 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6193 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6194 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6195 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6196 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6197 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6198 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6199
6200 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6201 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6202
bnxt_fill_ipv6_mask(__be32 mask[4])6203 void bnxt_fill_ipv6_mask(__be32 mask[4])
6204 {
6205 int i;
6206
6207 for (i = 0; i < 4; i++)
6208 mask[i] = cpu_to_be32(~0);
6209 }
6210
6211 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6212 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6213 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6214 struct bnxt_ntuple_filter *fltr)
6215 {
6216 u16 rxq = fltr->base.rxq;
6217
6218 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6219 struct ethtool_rxfh_context *ctx;
6220 struct bnxt_rss_ctx *rss_ctx;
6221 struct bnxt_vnic_info *vnic;
6222
6223 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6224 fltr->base.fw_vnic_id);
6225 if (ctx) {
6226 rss_ctx = ethtool_rxfh_context_priv(ctx);
6227 vnic = &rss_ctx->vnic;
6228
6229 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6230 }
6231 return;
6232 }
6233 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6234 struct bnxt_vnic_info *vnic;
6235 u32 enables;
6236
6237 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6238 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6239 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6240 req->enables |= cpu_to_le32(enables);
6241 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6242 } else {
6243 u32 flags;
6244
6245 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6246 req->flags |= cpu_to_le32(flags);
6247 req->dst_id = cpu_to_le16(rxq);
6248 }
6249 }
6250
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6251 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6252 struct bnxt_ntuple_filter *fltr)
6253 {
6254 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6255 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6256 struct bnxt_flow_masks *masks = &fltr->fmasks;
6257 struct flow_keys *keys = &fltr->fkeys;
6258 struct bnxt_l2_filter *l2_fltr;
6259 struct bnxt_vnic_info *vnic;
6260 int rc;
6261
6262 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6263 if (rc)
6264 return rc;
6265
6266 l2_fltr = fltr->l2_fltr;
6267 req->l2_filter_id = l2_fltr->base.filter_id;
6268
6269 if (fltr->base.flags & BNXT_ACT_DROP) {
6270 req->flags =
6271 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6272 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6273 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6274 } else {
6275 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6276 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6277 }
6278 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6279
6280 req->ethertype = htons(ETH_P_IP);
6281 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6282 req->ip_protocol = keys->basic.ip_proto;
6283
6284 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6285 req->ethertype = htons(ETH_P_IPV6);
6286 req->ip_addr_type =
6287 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6288 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6289 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6290 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6291 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6292 } else {
6293 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6294 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6295 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6296 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6297 }
6298 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6299 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6300 req->tunnel_type =
6301 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6302 }
6303
6304 req->src_port = keys->ports.src;
6305 req->src_port_mask = masks->ports.src;
6306 req->dst_port = keys->ports.dst;
6307 req->dst_port_mask = masks->ports.dst;
6308
6309 resp = hwrm_req_hold(bp, req);
6310 rc = hwrm_req_send(bp, req);
6311 if (!rc)
6312 fltr->base.filter_id = resp->ntuple_filter_id;
6313 hwrm_req_drop(bp, req);
6314 return rc;
6315 }
6316
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6317 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6318 const u8 *mac_addr)
6319 {
6320 struct bnxt_l2_filter *fltr;
6321 struct bnxt_l2_key key;
6322 int rc;
6323
6324 ether_addr_copy(key.dst_mac_addr, mac_addr);
6325 key.vlan = 0;
6326 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6327 if (IS_ERR(fltr))
6328 return PTR_ERR(fltr);
6329
6330 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6331 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6332 if (rc)
6333 bnxt_del_l2_filter(bp, fltr);
6334 else
6335 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6336 return rc;
6337 }
6338
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6339 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6340 {
6341 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6342
6343 /* Any associated ntuple filters will also be cleared by firmware. */
6344 for (i = 0; i < num_of_vnics; i++) {
6345 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6346
6347 for (j = 0; j < vnic->uc_filter_count; j++) {
6348 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6349
6350 bnxt_hwrm_l2_filter_free(bp, fltr);
6351 bnxt_del_l2_filter(bp, fltr);
6352 }
6353 vnic->uc_filter_count = 0;
6354 }
6355 }
6356
6357 #define BNXT_DFLT_TUNL_TPA_BMAP \
6358 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6359 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6360 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6361
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6362 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6363 struct hwrm_vnic_tpa_cfg_input *req)
6364 {
6365 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6366
6367 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6368 return;
6369
6370 if (bp->vxlan_port)
6371 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6372 if (bp->vxlan_gpe_port)
6373 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6374 if (bp->nge_port)
6375 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6376
6377 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6378 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6379 }
6380
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6381 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6382 u32 tpa_flags)
6383 {
6384 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6385 struct hwrm_vnic_tpa_cfg_input *req;
6386 int rc;
6387
6388 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6389 return 0;
6390
6391 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6392 if (rc)
6393 return rc;
6394
6395 if (tpa_flags) {
6396 u16 mss = bp->dev->mtu - 40;
6397 u32 nsegs, n, segs = 0, flags;
6398
6399 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6400 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6401 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6402 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6403 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6404 if (tpa_flags & BNXT_FLAG_GRO)
6405 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6406
6407 req->flags = cpu_to_le32(flags);
6408
6409 req->enables =
6410 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6411 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6412 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6413
6414 /* Number of segs are log2 units, and first packet is not
6415 * included as part of this units.
6416 */
6417 if (mss <= BNXT_RX_PAGE_SIZE) {
6418 n = BNXT_RX_PAGE_SIZE / mss;
6419 nsegs = (MAX_SKB_FRAGS - 1) * n;
6420 } else {
6421 n = mss / BNXT_RX_PAGE_SIZE;
6422 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6423 n++;
6424 nsegs = (MAX_SKB_FRAGS - n) / n;
6425 }
6426
6427 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6428 segs = MAX_TPA_SEGS_P5;
6429 max_aggs = bp->max_tpa;
6430 } else {
6431 segs = ilog2(nsegs);
6432 }
6433 req->max_agg_segs = cpu_to_le16(segs);
6434 req->max_aggs = cpu_to_le16(max_aggs);
6435
6436 req->min_agg_len = cpu_to_le32(512);
6437 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6438 }
6439 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6440
6441 return hwrm_req_send(bp, req);
6442 }
6443
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6444 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6445 {
6446 struct bnxt_ring_grp_info *grp_info;
6447
6448 grp_info = &bp->grp_info[ring->grp_idx];
6449 return grp_info->cp_fw_ring_id;
6450 }
6451
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6452 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6453 {
6454 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6455 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6456 else
6457 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6458 }
6459
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6460 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6461 {
6462 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6463 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6464 else
6465 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6466 }
6467
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6468 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6469 {
6470 int entries;
6471
6472 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6473 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6474 else
6475 entries = HW_HASH_INDEX_SIZE;
6476
6477 bp->rss_indir_tbl_entries = entries;
6478 bp->rss_indir_tbl =
6479 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6480 if (!bp->rss_indir_tbl)
6481 return -ENOMEM;
6482
6483 return 0;
6484 }
6485
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6486 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6487 struct ethtool_rxfh_context *rss_ctx)
6488 {
6489 u16 max_rings, max_entries, pad, i;
6490 u32 *rss_indir_tbl;
6491
6492 if (!bp->rx_nr_rings)
6493 return;
6494
6495 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6496 max_rings = bp->rx_nr_rings - 1;
6497 else
6498 max_rings = bp->rx_nr_rings;
6499
6500 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6501 if (rss_ctx)
6502 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6503 else
6504 rss_indir_tbl = &bp->rss_indir_tbl[0];
6505
6506 for (i = 0; i < max_entries; i++)
6507 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6508
6509 pad = bp->rss_indir_tbl_entries - max_entries;
6510 if (pad)
6511 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6512 }
6513
bnxt_get_max_rss_ring(struct bnxt * bp)6514 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6515 {
6516 u32 i, tbl_size, max_ring = 0;
6517
6518 if (!bp->rss_indir_tbl)
6519 return 0;
6520
6521 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6522 for (i = 0; i < tbl_size; i++)
6523 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6524 return max_ring;
6525 }
6526
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6527 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6528 {
6529 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6530 if (!rx_rings)
6531 return 0;
6532 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6533 BNXT_RSS_TABLE_ENTRIES_P5);
6534 }
6535 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6536 return 2;
6537 return 1;
6538 }
6539
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6540 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6541 {
6542 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6543 u16 i, j;
6544
6545 /* Fill the RSS indirection table with ring group ids */
6546 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6547 if (!no_rss)
6548 j = bp->rss_indir_tbl[i];
6549 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6550 }
6551 }
6552
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6553 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6554 struct bnxt_vnic_info *vnic)
6555 {
6556 __le16 *ring_tbl = vnic->rss_table;
6557 struct bnxt_rx_ring_info *rxr;
6558 u16 tbl_size, i;
6559
6560 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6561
6562 for (i = 0; i < tbl_size; i++) {
6563 u16 ring_id, j;
6564
6565 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6566 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6567 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6568 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6569 else
6570 j = bp->rss_indir_tbl[i];
6571 rxr = &bp->rx_ring[j];
6572
6573 ring_id = rxr->rx_ring_struct.fw_ring_id;
6574 *ring_tbl++ = cpu_to_le16(ring_id);
6575 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6576 *ring_tbl++ = cpu_to_le16(ring_id);
6577 }
6578 }
6579
6580 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6581 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6582 struct bnxt_vnic_info *vnic)
6583 {
6584 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6585 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6586 if (bp->flags & BNXT_FLAG_CHIP_P7)
6587 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6588 } else {
6589 bnxt_fill_hw_rss_tbl(bp, vnic);
6590 }
6591
6592 if (bp->rss_hash_delta) {
6593 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6594 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6595 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6596 else
6597 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6598 } else {
6599 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6600 }
6601 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6602 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6603 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6604 }
6605
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6606 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6607 bool set_rss)
6608 {
6609 struct hwrm_vnic_rss_cfg_input *req;
6610 int rc;
6611
6612 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6613 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6614 return 0;
6615
6616 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6617 if (rc)
6618 return rc;
6619
6620 if (set_rss)
6621 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6622 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6623 return hwrm_req_send(bp, req);
6624 }
6625
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6626 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6627 struct bnxt_vnic_info *vnic, bool set_rss)
6628 {
6629 struct hwrm_vnic_rss_cfg_input *req;
6630 dma_addr_t ring_tbl_map;
6631 u32 i, nr_ctxs;
6632 int rc;
6633
6634 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6635 if (rc)
6636 return rc;
6637
6638 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6639 if (!set_rss)
6640 return hwrm_req_send(bp, req);
6641
6642 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6643 ring_tbl_map = vnic->rss_table_dma_addr;
6644 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6645
6646 hwrm_req_hold(bp, req);
6647 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6648 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6649 req->ring_table_pair_index = i;
6650 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6651 rc = hwrm_req_send(bp, req);
6652 if (rc)
6653 goto exit;
6654 }
6655
6656 exit:
6657 hwrm_req_drop(bp, req);
6658 return rc;
6659 }
6660
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6661 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6662 {
6663 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6664 struct hwrm_vnic_rss_qcfg_output *resp;
6665 struct hwrm_vnic_rss_qcfg_input *req;
6666
6667 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6668 return;
6669
6670 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6671 /* all contexts configured to same hash_type, zero always exists */
6672 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6673 resp = hwrm_req_hold(bp, req);
6674 if (!hwrm_req_send(bp, req)) {
6675 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6676 bp->rss_hash_delta = 0;
6677 }
6678 hwrm_req_drop(bp, req);
6679 }
6680
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6681 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6682 {
6683 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6684 struct hwrm_vnic_plcmodes_cfg_input *req;
6685 int rc;
6686
6687 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6688 if (rc)
6689 return rc;
6690
6691 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6692 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6693 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6694
6695 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6696 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6697 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6698 req->enables |=
6699 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6700 req->hds_threshold = cpu_to_le16(hds_thresh);
6701 }
6702 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6703 return hwrm_req_send(bp, req);
6704 }
6705
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6706 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6707 struct bnxt_vnic_info *vnic,
6708 u16 ctx_idx)
6709 {
6710 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6711
6712 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6713 return;
6714
6715 req->rss_cos_lb_ctx_id =
6716 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6717
6718 hwrm_req_send(bp, req);
6719 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6720 }
6721
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6722 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6723 {
6724 int i, j;
6725
6726 for (i = 0; i < bp->nr_vnics; i++) {
6727 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6728
6729 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6730 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6731 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6732 }
6733 }
6734 bp->rsscos_nr_ctxs = 0;
6735 }
6736
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6737 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6738 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6739 {
6740 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6741 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6742 int rc;
6743
6744 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6745 if (rc)
6746 return rc;
6747
6748 resp = hwrm_req_hold(bp, req);
6749 rc = hwrm_req_send(bp, req);
6750 if (!rc)
6751 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6752 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6753 hwrm_req_drop(bp, req);
6754
6755 return rc;
6756 }
6757
bnxt_get_roce_vnic_mode(struct bnxt * bp)6758 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6759 {
6760 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6761 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6762 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6763 }
6764
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6765 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6766 {
6767 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6768 struct hwrm_vnic_cfg_input *req;
6769 unsigned int ring = 0, grp_idx;
6770 u16 def_vlan = 0;
6771 int rc;
6772
6773 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6774 if (rc)
6775 return rc;
6776
6777 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6778 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6779
6780 req->default_rx_ring_id =
6781 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6782 req->default_cmpl_ring_id =
6783 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6784 req->enables =
6785 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6786 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6787 goto vnic_mru;
6788 }
6789 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6790 /* Only RSS support for now TBD: COS & LB */
6791 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6792 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6793 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6794 VNIC_CFG_REQ_ENABLES_MRU);
6795 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6796 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6797 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6798 VNIC_CFG_REQ_ENABLES_MRU);
6799 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6800 } else {
6801 req->rss_rule = cpu_to_le16(0xffff);
6802 }
6803
6804 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6805 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6806 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6807 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6808 } else {
6809 req->cos_rule = cpu_to_le16(0xffff);
6810 }
6811
6812 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6813 ring = 0;
6814 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6815 ring = vnic->vnic_id - 1;
6816 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6817 ring = bp->rx_nr_rings - 1;
6818
6819 grp_idx = bp->rx_ring[ring].bnapi->index;
6820 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6821 req->lb_rule = cpu_to_le16(0xffff);
6822 vnic_mru:
6823 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
6824 req->mru = cpu_to_le16(vnic->mru);
6825
6826 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6827 #ifdef CONFIG_BNXT_SRIOV
6828 if (BNXT_VF(bp))
6829 def_vlan = bp->vf.vlan;
6830 #endif
6831 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6832 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6833 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6834 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6835
6836 return hwrm_req_send(bp, req);
6837 }
6838
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6839 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6840 struct bnxt_vnic_info *vnic)
6841 {
6842 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6843 struct hwrm_vnic_free_input *req;
6844
6845 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6846 return;
6847
6848 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6849
6850 hwrm_req_send(bp, req);
6851 vnic->fw_vnic_id = INVALID_HW_RING_ID;
6852 }
6853 }
6854
bnxt_hwrm_vnic_free(struct bnxt * bp)6855 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6856 {
6857 u16 i;
6858
6859 for (i = 0; i < bp->nr_vnics; i++)
6860 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6861 }
6862
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)6863 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6864 unsigned int start_rx_ring_idx,
6865 unsigned int nr_rings)
6866 {
6867 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6868 struct hwrm_vnic_alloc_output *resp;
6869 struct hwrm_vnic_alloc_input *req;
6870 int rc;
6871
6872 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6873 if (rc)
6874 return rc;
6875
6876 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6877 goto vnic_no_ring_grps;
6878
6879 /* map ring groups to this vnic */
6880 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6881 grp_idx = bp->rx_ring[i].bnapi->index;
6882 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6883 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6884 j, nr_rings);
6885 break;
6886 }
6887 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6888 }
6889
6890 vnic_no_ring_grps:
6891 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6892 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6893 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6894 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6895
6896 resp = hwrm_req_hold(bp, req);
6897 rc = hwrm_req_send(bp, req);
6898 if (!rc)
6899 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6900 hwrm_req_drop(bp, req);
6901 return rc;
6902 }
6903
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)6904 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6905 {
6906 struct hwrm_vnic_qcaps_output *resp;
6907 struct hwrm_vnic_qcaps_input *req;
6908 int rc;
6909
6910 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6911 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6912 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6913 if (bp->hwrm_spec_code < 0x10600)
6914 return 0;
6915
6916 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6917 if (rc)
6918 return rc;
6919
6920 resp = hwrm_req_hold(bp, req);
6921 rc = hwrm_req_send(bp, req);
6922 if (!rc) {
6923 u32 flags = le32_to_cpu(resp->flags);
6924
6925 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6926 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6927 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6928 if (flags &
6929 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6930 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6931
6932 /* Older P5 fw before EXT_HW_STATS support did not set
6933 * VLAN_STRIP_CAP properly.
6934 */
6935 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6936 (BNXT_CHIP_P5(bp) &&
6937 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6938 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6939 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6940 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6941 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6942 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6943 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6944 if (bp->max_tpa_v2) {
6945 if (BNXT_CHIP_P5(bp))
6946 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6947 else
6948 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6949 }
6950 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6951 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6952 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6953 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6954 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6955 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6956 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6957 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6958 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6959 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6960 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
6961 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
6962 }
6963 hwrm_req_drop(bp, req);
6964 return rc;
6965 }
6966
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)6967 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6968 {
6969 struct hwrm_ring_grp_alloc_output *resp;
6970 struct hwrm_ring_grp_alloc_input *req;
6971 int rc;
6972 u16 i;
6973
6974 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6975 return 0;
6976
6977 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6978 if (rc)
6979 return rc;
6980
6981 resp = hwrm_req_hold(bp, req);
6982 for (i = 0; i < bp->rx_nr_rings; i++) {
6983 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6984
6985 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6986 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6987 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6988 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6989
6990 rc = hwrm_req_send(bp, req);
6991
6992 if (rc)
6993 break;
6994
6995 bp->grp_info[grp_idx].fw_grp_id =
6996 le32_to_cpu(resp->ring_group_id);
6997 }
6998 hwrm_req_drop(bp, req);
6999 return rc;
7000 }
7001
bnxt_hwrm_ring_grp_free(struct bnxt * bp)7002 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7003 {
7004 struct hwrm_ring_grp_free_input *req;
7005 u16 i;
7006
7007 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7008 return;
7009
7010 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7011 return;
7012
7013 hwrm_req_hold(bp, req);
7014 for (i = 0; i < bp->cp_nr_rings; i++) {
7015 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7016 continue;
7017 req->ring_group_id =
7018 cpu_to_le32(bp->grp_info[i].fw_grp_id);
7019
7020 hwrm_req_send(bp, req);
7021 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7022 }
7023 hwrm_req_drop(bp, req);
7024 }
7025
bnxt_set_rx_ring_params_p5(struct bnxt * bp,u32 ring_type,struct hwrm_ring_alloc_input * req,struct bnxt_ring_struct * ring)7026 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7027 struct hwrm_ring_alloc_input *req,
7028 struct bnxt_ring_struct *ring)
7029 {
7030 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7031 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7032 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7033
7034 if (ring_type == HWRM_RING_ALLOC_AGG) {
7035 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7036 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7037 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
7038 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7039 } else {
7040 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7041 if (NET_IP_ALIGN == 2)
7042 req->flags =
7043 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7044 }
7045 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7046 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7047 req->enables |= cpu_to_le32(enables);
7048 }
7049
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)7050 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7051 struct bnxt_ring_struct *ring,
7052 u32 ring_type, u32 map_index)
7053 {
7054 struct hwrm_ring_alloc_output *resp;
7055 struct hwrm_ring_alloc_input *req;
7056 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7057 struct bnxt_ring_grp_info *grp_info;
7058 int rc, err = 0;
7059 u16 ring_id;
7060
7061 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7062 if (rc)
7063 goto exit;
7064
7065 req->enables = 0;
7066 if (rmem->nr_pages > 1) {
7067 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7068 /* Page size is in log2 units */
7069 req->page_size = BNXT_PAGE_SHIFT;
7070 req->page_tbl_depth = 1;
7071 } else {
7072 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
7073 }
7074 req->fbo = 0;
7075 /* Association of ring index with doorbell index and MSIX number */
7076 req->logical_id = cpu_to_le16(map_index);
7077
7078 switch (ring_type) {
7079 case HWRM_RING_ALLOC_TX: {
7080 struct bnxt_tx_ring_info *txr;
7081 u16 flags = 0;
7082
7083 txr = container_of(ring, struct bnxt_tx_ring_info,
7084 tx_ring_struct);
7085 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7086 /* Association of transmit ring with completion ring */
7087 grp_info = &bp->grp_info[ring->grp_idx];
7088 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7089 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7090 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7091 req->queue_id = cpu_to_le16(ring->queue_id);
7092 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7093 req->cmpl_coal_cnt =
7094 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7095 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7096 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7097 req->flags = cpu_to_le16(flags);
7098 break;
7099 }
7100 case HWRM_RING_ALLOC_RX:
7101 case HWRM_RING_ALLOC_AGG:
7102 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7103 req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7104 cpu_to_le32(bp->rx_ring_mask + 1) :
7105 cpu_to_le32(bp->rx_agg_ring_mask + 1);
7106 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7107 bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
7108 break;
7109 case HWRM_RING_ALLOC_CMPL:
7110 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7111 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7112 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7113 /* Association of cp ring with nq */
7114 grp_info = &bp->grp_info[map_index];
7115 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7116 req->cq_handle = cpu_to_le64(ring->handle);
7117 req->enables |= cpu_to_le32(
7118 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7119 } else {
7120 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7121 }
7122 break;
7123 case HWRM_RING_ALLOC_NQ:
7124 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7125 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7126 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7127 break;
7128 default:
7129 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7130 ring_type);
7131 return -EINVAL;
7132 }
7133
7134 resp = hwrm_req_hold(bp, req);
7135 rc = hwrm_req_send(bp, req);
7136 err = le16_to_cpu(resp->error_code);
7137 ring_id = le16_to_cpu(resp->ring_id);
7138 hwrm_req_drop(bp, req);
7139
7140 exit:
7141 if (rc || err) {
7142 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7143 ring_type, rc, err);
7144 return -EIO;
7145 }
7146 ring->fw_ring_id = ring_id;
7147 return rc;
7148 }
7149
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7150 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7151 {
7152 int rc;
7153
7154 if (BNXT_PF(bp)) {
7155 struct hwrm_func_cfg_input *req;
7156
7157 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7158 if (rc)
7159 return rc;
7160
7161 req->fid = cpu_to_le16(0xffff);
7162 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7163 req->async_event_cr = cpu_to_le16(idx);
7164 return hwrm_req_send(bp, req);
7165 } else {
7166 struct hwrm_func_vf_cfg_input *req;
7167
7168 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7169 if (rc)
7170 return rc;
7171
7172 req->enables =
7173 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7174 req->async_event_cr = cpu_to_le16(idx);
7175 return hwrm_req_send(bp, req);
7176 }
7177 }
7178
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7179 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7180 u32 ring_type)
7181 {
7182 switch (ring_type) {
7183 case HWRM_RING_ALLOC_TX:
7184 db->db_ring_mask = bp->tx_ring_mask;
7185 break;
7186 case HWRM_RING_ALLOC_RX:
7187 db->db_ring_mask = bp->rx_ring_mask;
7188 break;
7189 case HWRM_RING_ALLOC_AGG:
7190 db->db_ring_mask = bp->rx_agg_ring_mask;
7191 break;
7192 case HWRM_RING_ALLOC_CMPL:
7193 case HWRM_RING_ALLOC_NQ:
7194 db->db_ring_mask = bp->cp_ring_mask;
7195 break;
7196 }
7197 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7198 db->db_epoch_mask = db->db_ring_mask + 1;
7199 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7200 }
7201 }
7202
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7203 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7204 u32 map_idx, u32 xid)
7205 {
7206 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7207 switch (ring_type) {
7208 case HWRM_RING_ALLOC_TX:
7209 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7210 break;
7211 case HWRM_RING_ALLOC_RX:
7212 case HWRM_RING_ALLOC_AGG:
7213 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7214 break;
7215 case HWRM_RING_ALLOC_CMPL:
7216 db->db_key64 = DBR_PATH_L2;
7217 break;
7218 case HWRM_RING_ALLOC_NQ:
7219 db->db_key64 = DBR_PATH_L2;
7220 break;
7221 }
7222 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7223
7224 if (bp->flags & BNXT_FLAG_CHIP_P7)
7225 db->db_key64 |= DBR_VALID;
7226
7227 db->doorbell = bp->bar1 + bp->db_offset;
7228 } else {
7229 db->doorbell = bp->bar1 + map_idx * 0x80;
7230 switch (ring_type) {
7231 case HWRM_RING_ALLOC_TX:
7232 db->db_key32 = DB_KEY_TX;
7233 break;
7234 case HWRM_RING_ALLOC_RX:
7235 case HWRM_RING_ALLOC_AGG:
7236 db->db_key32 = DB_KEY_RX;
7237 break;
7238 case HWRM_RING_ALLOC_CMPL:
7239 db->db_key32 = DB_KEY_CP;
7240 break;
7241 }
7242 }
7243 bnxt_set_db_mask(bp, db, ring_type);
7244 }
7245
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7246 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7247 struct bnxt_rx_ring_info *rxr)
7248 {
7249 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7250 struct bnxt_napi *bnapi = rxr->bnapi;
7251 u32 type = HWRM_RING_ALLOC_RX;
7252 u32 map_idx = bnapi->index;
7253 int rc;
7254
7255 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7256 if (rc)
7257 return rc;
7258
7259 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7260 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7261
7262 return 0;
7263 }
7264
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7265 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7266 struct bnxt_rx_ring_info *rxr)
7267 {
7268 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7269 u32 type = HWRM_RING_ALLOC_AGG;
7270 u32 grp_idx = ring->grp_idx;
7271 u32 map_idx;
7272 int rc;
7273
7274 map_idx = grp_idx + bp->rx_nr_rings;
7275 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7276 if (rc)
7277 return rc;
7278
7279 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7280 ring->fw_ring_id);
7281 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7282 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7283 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7284
7285 return 0;
7286 }
7287
bnxt_hwrm_cp_ring_alloc_p5(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7288 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7289 struct bnxt_cp_ring_info *cpr)
7290 {
7291 const u32 type = HWRM_RING_ALLOC_CMPL;
7292 struct bnxt_napi *bnapi = cpr->bnapi;
7293 struct bnxt_ring_struct *ring;
7294 u32 map_idx = bnapi->index;
7295 int rc;
7296
7297 ring = &cpr->cp_ring_struct;
7298 ring->handle = BNXT_SET_NQ_HDL(cpr);
7299 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7300 if (rc)
7301 return rc;
7302 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7303 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7304 return 0;
7305 }
7306
bnxt_hwrm_tx_ring_alloc(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u32 tx_idx)7307 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7308 struct bnxt_tx_ring_info *txr, u32 tx_idx)
7309 {
7310 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7311 const u32 type = HWRM_RING_ALLOC_TX;
7312 int rc;
7313
7314 rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
7315 if (rc)
7316 return rc;
7317 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7318 return 0;
7319 }
7320
bnxt_hwrm_ring_alloc(struct bnxt * bp)7321 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7322 {
7323 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7324 int i, rc = 0;
7325 u32 type;
7326
7327 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7328 type = HWRM_RING_ALLOC_NQ;
7329 else
7330 type = HWRM_RING_ALLOC_CMPL;
7331 for (i = 0; i < bp->cp_nr_rings; i++) {
7332 struct bnxt_napi *bnapi = bp->bnapi[i];
7333 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7334 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7335 u32 map_idx = ring->map_idx;
7336 unsigned int vector;
7337
7338 vector = bp->irq_tbl[map_idx].vector;
7339 disable_irq_nosync(vector);
7340 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7341 if (rc) {
7342 enable_irq(vector);
7343 goto err_out;
7344 }
7345 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7346 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7347 enable_irq(vector);
7348 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7349
7350 if (!i) {
7351 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7352 if (rc)
7353 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7354 }
7355 }
7356
7357 for (i = 0; i < bp->tx_nr_rings; i++) {
7358 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7359
7360 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7361 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7362 if (rc)
7363 goto err_out;
7364 }
7365 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7366 if (rc)
7367 goto err_out;
7368 }
7369
7370 for (i = 0; i < bp->rx_nr_rings; i++) {
7371 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7372
7373 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7374 if (rc)
7375 goto err_out;
7376 /* If we have agg rings, post agg buffers first. */
7377 if (!agg_rings)
7378 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7379 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7380 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7381 if (rc)
7382 goto err_out;
7383 }
7384 }
7385
7386 if (agg_rings) {
7387 for (i = 0; i < bp->rx_nr_rings; i++) {
7388 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7389 if (rc)
7390 goto err_out;
7391 }
7392 }
7393 err_out:
7394 return rc;
7395 }
7396
bnxt_cancel_dim(struct bnxt * bp)7397 static void bnxt_cancel_dim(struct bnxt *bp)
7398 {
7399 int i;
7400
7401 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7402 * if NAPI is enabled.
7403 */
7404 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7405 return;
7406
7407 /* Make sure NAPI sees that the VNIC is disabled */
7408 synchronize_net();
7409 for (i = 0; i < bp->rx_nr_rings; i++) {
7410 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7411 struct bnxt_napi *bnapi = rxr->bnapi;
7412
7413 cancel_work_sync(&bnapi->cp_ring.dim.work);
7414 }
7415 }
7416
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7417 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7418 struct bnxt_ring_struct *ring,
7419 u32 ring_type, int cmpl_ring_id)
7420 {
7421 struct hwrm_ring_free_output *resp;
7422 struct hwrm_ring_free_input *req;
7423 u16 error_code = 0;
7424 int rc;
7425
7426 if (BNXT_NO_FW_ACCESS(bp))
7427 return 0;
7428
7429 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7430 if (rc)
7431 goto exit;
7432
7433 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7434 req->ring_type = ring_type;
7435 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7436
7437 resp = hwrm_req_hold(bp, req);
7438 rc = hwrm_req_send(bp, req);
7439 error_code = le16_to_cpu(resp->error_code);
7440 hwrm_req_drop(bp, req);
7441 exit:
7442 if (rc || error_code) {
7443 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7444 ring_type, rc, error_code);
7445 return -EIO;
7446 }
7447 return 0;
7448 }
7449
bnxt_hwrm_tx_ring_free(struct bnxt * bp,struct bnxt_tx_ring_info * txr,bool close_path)7450 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7451 struct bnxt_tx_ring_info *txr,
7452 bool close_path)
7453 {
7454 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7455 u32 cmpl_ring_id;
7456
7457 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7458 return;
7459
7460 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7461 INVALID_HW_RING_ID;
7462 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7463 cmpl_ring_id);
7464 ring->fw_ring_id = INVALID_HW_RING_ID;
7465 }
7466
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7467 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7468 struct bnxt_rx_ring_info *rxr,
7469 bool close_path)
7470 {
7471 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7472 u32 grp_idx = rxr->bnapi->index;
7473 u32 cmpl_ring_id;
7474
7475 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7476 return;
7477
7478 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7479 hwrm_ring_free_send_msg(bp, ring,
7480 RING_FREE_REQ_RING_TYPE_RX,
7481 close_path ? cmpl_ring_id :
7482 INVALID_HW_RING_ID);
7483 ring->fw_ring_id = INVALID_HW_RING_ID;
7484 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7485 }
7486
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7487 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7488 struct bnxt_rx_ring_info *rxr,
7489 bool close_path)
7490 {
7491 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7492 u32 grp_idx = rxr->bnapi->index;
7493 u32 type, cmpl_ring_id;
7494
7495 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7496 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7497 else
7498 type = RING_FREE_REQ_RING_TYPE_RX;
7499
7500 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7501 return;
7502
7503 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7504 hwrm_ring_free_send_msg(bp, ring, type,
7505 close_path ? cmpl_ring_id :
7506 INVALID_HW_RING_ID);
7507 ring->fw_ring_id = INVALID_HW_RING_ID;
7508 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7509 }
7510
bnxt_hwrm_cp_ring_free(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7511 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7512 struct bnxt_cp_ring_info *cpr)
7513 {
7514 struct bnxt_ring_struct *ring;
7515
7516 ring = &cpr->cp_ring_struct;
7517 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7518 return;
7519
7520 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7521 INVALID_HW_RING_ID);
7522 ring->fw_ring_id = INVALID_HW_RING_ID;
7523 }
7524
bnxt_clear_one_cp_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7525 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7526 {
7527 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7528 int i, size = ring->ring_mem.page_size;
7529
7530 cpr->cp_raw_cons = 0;
7531 cpr->toggle = 0;
7532
7533 for (i = 0; i < bp->cp_nr_pages; i++)
7534 if (cpr->cp_desc_ring[i])
7535 memset(cpr->cp_desc_ring[i], 0, size);
7536 }
7537
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7538 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7539 {
7540 u32 type;
7541 int i;
7542
7543 if (!bp->bnapi)
7544 return;
7545
7546 for (i = 0; i < bp->tx_nr_rings; i++)
7547 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7548
7549 bnxt_cancel_dim(bp);
7550 for (i = 0; i < bp->rx_nr_rings; i++) {
7551 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7552 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7553 }
7554
7555 /* The completion rings are about to be freed. After that the
7556 * IRQ doorbell will not work anymore. So we need to disable
7557 * IRQ here.
7558 */
7559 bnxt_disable_int_sync(bp);
7560
7561 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7562 type = RING_FREE_REQ_RING_TYPE_NQ;
7563 else
7564 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7565 for (i = 0; i < bp->cp_nr_rings; i++) {
7566 struct bnxt_napi *bnapi = bp->bnapi[i];
7567 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7568 struct bnxt_ring_struct *ring;
7569 int j;
7570
7571 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7572 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7573
7574 ring = &cpr->cp_ring_struct;
7575 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7576 hwrm_ring_free_send_msg(bp, ring, type,
7577 INVALID_HW_RING_ID);
7578 ring->fw_ring_id = INVALID_HW_RING_ID;
7579 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7580 }
7581 }
7582 }
7583
7584 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7585 bool shared);
7586 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7587 bool shared);
7588
bnxt_hwrm_get_rings(struct bnxt * bp)7589 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7590 {
7591 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7592 struct hwrm_func_qcfg_output *resp;
7593 struct hwrm_func_qcfg_input *req;
7594 int rc;
7595
7596 if (bp->hwrm_spec_code < 0x10601)
7597 return 0;
7598
7599 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7600 if (rc)
7601 return rc;
7602
7603 req->fid = cpu_to_le16(0xffff);
7604 resp = hwrm_req_hold(bp, req);
7605 rc = hwrm_req_send(bp, req);
7606 if (rc) {
7607 hwrm_req_drop(bp, req);
7608 return rc;
7609 }
7610
7611 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7612 if (BNXT_NEW_RM(bp)) {
7613 u16 cp, stats;
7614
7615 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7616 hw_resc->resv_hw_ring_grps =
7617 le32_to_cpu(resp->alloc_hw_ring_grps);
7618 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7619 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7620 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7621 stats = le16_to_cpu(resp->alloc_stat_ctx);
7622 hw_resc->resv_irqs = cp;
7623 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7624 int rx = hw_resc->resv_rx_rings;
7625 int tx = hw_resc->resv_tx_rings;
7626
7627 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7628 rx >>= 1;
7629 if (cp < (rx + tx)) {
7630 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7631 if (rc)
7632 goto get_rings_exit;
7633 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7634 rx <<= 1;
7635 hw_resc->resv_rx_rings = rx;
7636 hw_resc->resv_tx_rings = tx;
7637 }
7638 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7639 hw_resc->resv_hw_ring_grps = rx;
7640 }
7641 hw_resc->resv_cp_rings = cp;
7642 hw_resc->resv_stat_ctxs = stats;
7643 }
7644 get_rings_exit:
7645 hwrm_req_drop(bp, req);
7646 return rc;
7647 }
7648
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7649 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7650 {
7651 struct hwrm_func_qcfg_output *resp;
7652 struct hwrm_func_qcfg_input *req;
7653 int rc;
7654
7655 if (bp->hwrm_spec_code < 0x10601)
7656 return 0;
7657
7658 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7659 if (rc)
7660 return rc;
7661
7662 req->fid = cpu_to_le16(fid);
7663 resp = hwrm_req_hold(bp, req);
7664 rc = hwrm_req_send(bp, req);
7665 if (!rc)
7666 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7667
7668 hwrm_req_drop(bp, req);
7669 return rc;
7670 }
7671
7672 static bool bnxt_rfs_supported(struct bnxt *bp);
7673
7674 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7675 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7676 {
7677 struct hwrm_func_cfg_input *req;
7678 u32 enables = 0;
7679
7680 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7681 return NULL;
7682
7683 req->fid = cpu_to_le16(0xffff);
7684 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7685 req->num_tx_rings = cpu_to_le16(hwr->tx);
7686 if (BNXT_NEW_RM(bp)) {
7687 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7688 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7689 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7690 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7691 enables |= hwr->cp_p5 ?
7692 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7693 } else {
7694 enables |= hwr->cp ?
7695 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7696 enables |= hwr->grp ?
7697 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7698 }
7699 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7700 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7701 0;
7702 req->num_rx_rings = cpu_to_le16(hwr->rx);
7703 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7704 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7705 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7706 req->num_msix = cpu_to_le16(hwr->cp);
7707 } else {
7708 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7709 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7710 }
7711 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7712 req->num_vnics = cpu_to_le16(hwr->vnic);
7713 }
7714 req->enables = cpu_to_le32(enables);
7715 return req;
7716 }
7717
7718 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7719 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7720 {
7721 struct hwrm_func_vf_cfg_input *req;
7722 u32 enables = 0;
7723
7724 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7725 return NULL;
7726
7727 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7728 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7729 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7730 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7731 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7732 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7733 enables |= hwr->cp_p5 ?
7734 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7735 } else {
7736 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7737 enables |= hwr->grp ?
7738 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7739 }
7740 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7741 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7742
7743 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7744 req->num_tx_rings = cpu_to_le16(hwr->tx);
7745 req->num_rx_rings = cpu_to_le16(hwr->rx);
7746 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7747 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7748 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7749 } else {
7750 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7751 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7752 }
7753 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7754 req->num_vnics = cpu_to_le16(hwr->vnic);
7755
7756 req->enables = cpu_to_le32(enables);
7757 return req;
7758 }
7759
7760 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7761 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7762 {
7763 struct hwrm_func_cfg_input *req;
7764 int rc;
7765
7766 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7767 if (!req)
7768 return -ENOMEM;
7769
7770 if (!req->enables) {
7771 hwrm_req_drop(bp, req);
7772 return 0;
7773 }
7774
7775 rc = hwrm_req_send(bp, req);
7776 if (rc)
7777 return rc;
7778
7779 if (bp->hwrm_spec_code < 0x10601)
7780 bp->hw_resc.resv_tx_rings = hwr->tx;
7781
7782 return bnxt_hwrm_get_rings(bp);
7783 }
7784
7785 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7786 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7787 {
7788 struct hwrm_func_vf_cfg_input *req;
7789 int rc;
7790
7791 if (!BNXT_NEW_RM(bp)) {
7792 bp->hw_resc.resv_tx_rings = hwr->tx;
7793 return 0;
7794 }
7795
7796 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7797 if (!req)
7798 return -ENOMEM;
7799
7800 rc = hwrm_req_send(bp, req);
7801 if (rc)
7802 return rc;
7803
7804 return bnxt_hwrm_get_rings(bp);
7805 }
7806
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7807 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7808 {
7809 if (BNXT_PF(bp))
7810 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7811 else
7812 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7813 }
7814
bnxt_nq_rings_in_use(struct bnxt * bp)7815 int bnxt_nq_rings_in_use(struct bnxt *bp)
7816 {
7817 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7818 }
7819
bnxt_cp_rings_in_use(struct bnxt * bp)7820 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7821 {
7822 int cp;
7823
7824 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7825 return bnxt_nq_rings_in_use(bp);
7826
7827 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7828 return cp;
7829 }
7830
bnxt_get_func_stat_ctxs(struct bnxt * bp)7831 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7832 {
7833 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7834 }
7835
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7836 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7837 {
7838 if (!hwr->grp)
7839 return 0;
7840 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7841 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7842
7843 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7844 rss_ctx *= hwr->vnic;
7845 return rss_ctx;
7846 }
7847 if (BNXT_VF(bp))
7848 return BNXT_VF_MAX_RSS_CTX;
7849 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7850 return hwr->grp + 1;
7851 return 1;
7852 }
7853
7854 /* Check if a default RSS map needs to be setup. This function is only
7855 * used on older firmware that does not require reserving RX rings.
7856 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)7857 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7858 {
7859 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7860
7861 /* The RSS map is valid for RX rings set to resv_rx_rings */
7862 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7863 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7864 if (!netif_is_rxfh_configured(bp->dev))
7865 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7866 }
7867 }
7868
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)7869 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7870 {
7871 if (bp->flags & BNXT_FLAG_RFS) {
7872 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7873 return 2 + bp->num_rss_ctx;
7874 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7875 return rx_rings + 1;
7876 }
7877 return 1;
7878 }
7879
bnxt_need_reserve_rings(struct bnxt * bp)7880 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7881 {
7882 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7883 int cp = bnxt_cp_rings_in_use(bp);
7884 int nq = bnxt_nq_rings_in_use(bp);
7885 int rx = bp->rx_nr_rings, stat;
7886 int vnic, grp = rx;
7887
7888 /* Old firmware does not need RX ring reservations but we still
7889 * need to setup a default RSS map when needed. With new firmware
7890 * we go through RX ring reservations first and then set up the
7891 * RSS map for the successfully reserved RX rings when needed.
7892 */
7893 if (!BNXT_NEW_RM(bp))
7894 bnxt_check_rss_tbl_no_rmgr(bp);
7895
7896 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7897 bp->hwrm_spec_code >= 0x10601)
7898 return true;
7899
7900 if (!BNXT_NEW_RM(bp))
7901 return false;
7902
7903 vnic = bnxt_get_total_vnics(bp, rx);
7904
7905 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7906 rx <<= 1;
7907 stat = bnxt_get_func_stat_ctxs(bp);
7908 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7909 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7910 (hw_resc->resv_hw_ring_grps != grp &&
7911 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7912 return true;
7913 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7914 hw_resc->resv_irqs != nq)
7915 return true;
7916 return false;
7917 }
7918
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7919 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7920 {
7921 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7922
7923 hwr->tx = hw_resc->resv_tx_rings;
7924 if (BNXT_NEW_RM(bp)) {
7925 hwr->rx = hw_resc->resv_rx_rings;
7926 hwr->cp = hw_resc->resv_irqs;
7927 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7928 hwr->cp_p5 = hw_resc->resv_cp_rings;
7929 hwr->grp = hw_resc->resv_hw_ring_grps;
7930 hwr->vnic = hw_resc->resv_vnics;
7931 hwr->stat = hw_resc->resv_stat_ctxs;
7932 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7933 }
7934 }
7935
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)7936 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7937 {
7938 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7939 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7940 }
7941
7942 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7943
__bnxt_reserve_rings(struct bnxt * bp)7944 static int __bnxt_reserve_rings(struct bnxt *bp)
7945 {
7946 struct bnxt_hw_rings hwr = {0};
7947 int rx_rings, old_rx_rings, rc;
7948 int cp = bp->cp_nr_rings;
7949 int ulp_msix = 0;
7950 bool sh = false;
7951 int tx_cp;
7952
7953 if (!bnxt_need_reserve_rings(bp))
7954 return 0;
7955
7956 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
7957 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7958 if (!ulp_msix)
7959 bnxt_set_ulp_stat_ctxs(bp, 0);
7960
7961 if (ulp_msix > bp->ulp_num_msix_want)
7962 ulp_msix = bp->ulp_num_msix_want;
7963 hwr.cp = cp + ulp_msix;
7964 } else {
7965 hwr.cp = bnxt_nq_rings_in_use(bp);
7966 }
7967
7968 hwr.tx = bp->tx_nr_rings;
7969 hwr.rx = bp->rx_nr_rings;
7970 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7971 sh = true;
7972 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7973 hwr.cp_p5 = hwr.rx + hwr.tx;
7974
7975 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7976
7977 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7978 hwr.rx <<= 1;
7979 hwr.grp = bp->rx_nr_rings;
7980 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7981 hwr.stat = bnxt_get_func_stat_ctxs(bp);
7982 old_rx_rings = bp->hw_resc.resv_rx_rings;
7983
7984 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7985 if (rc)
7986 return rc;
7987
7988 bnxt_copy_reserved_rings(bp, &hwr);
7989
7990 rx_rings = hwr.rx;
7991 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7992 if (hwr.rx >= 2) {
7993 rx_rings = hwr.rx >> 1;
7994 } else {
7995 if (netif_running(bp->dev))
7996 return -ENOMEM;
7997
7998 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7999 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8000 bp->dev->hw_features &= ~NETIF_F_LRO;
8001 bp->dev->features &= ~NETIF_F_LRO;
8002 bnxt_set_ring_params(bp);
8003 }
8004 }
8005 rx_rings = min_t(int, rx_rings, hwr.grp);
8006 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8007 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8008 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8009 hwr.cp = min_t(int, hwr.cp, hwr.stat);
8010 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8011 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8012 hwr.rx = rx_rings << 1;
8013 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8014 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8015 bp->tx_nr_rings = hwr.tx;
8016
8017 /* If we cannot reserve all the RX rings, reset the RSS map only
8018 * if absolutely necessary
8019 */
8020 if (rx_rings != bp->rx_nr_rings) {
8021 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8022 rx_rings, bp->rx_nr_rings);
8023 if (netif_is_rxfh_configured(bp->dev) &&
8024 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8025 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8026 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8027 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
8028 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
8029 }
8030 }
8031 bp->rx_nr_rings = rx_rings;
8032 bp->cp_nr_rings = hwr.cp;
8033
8034 if (!bnxt_rings_ok(bp, &hwr))
8035 return -ENOMEM;
8036
8037 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8038 !netif_is_rxfh_configured(bp->dev))
8039 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8040
8041 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8042 int resv_msix, resv_ctx, ulp_ctxs;
8043 struct bnxt_hw_resc *hw_resc;
8044
8045 hw_resc = &bp->hw_resc;
8046 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8047 ulp_msix = min_t(int, resv_msix, ulp_msix);
8048 bnxt_set_ulp_msix_num(bp, ulp_msix);
8049 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
8050 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8051 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8052 }
8053
8054 return rc;
8055 }
8056
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8057 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8058 {
8059 struct hwrm_func_vf_cfg_input *req;
8060 u32 flags;
8061
8062 if (!BNXT_NEW_RM(bp))
8063 return 0;
8064
8065 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8066 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8067 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8068 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8069 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8070 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8071 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8072 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8073 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8074
8075 req->flags = cpu_to_le32(flags);
8076 return hwrm_req_send_silent(bp, req);
8077 }
8078
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8079 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8080 {
8081 struct hwrm_func_cfg_input *req;
8082 u32 flags;
8083
8084 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8085 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8086 if (BNXT_NEW_RM(bp)) {
8087 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8088 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8089 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8090 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8091 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8092 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8093 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8094 else
8095 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8096 }
8097
8098 req->flags = cpu_to_le32(flags);
8099 return hwrm_req_send_silent(bp, req);
8100 }
8101
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8102 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8103 {
8104 if (bp->hwrm_spec_code < 0x10801)
8105 return 0;
8106
8107 if (BNXT_PF(bp))
8108 return bnxt_hwrm_check_pf_rings(bp, hwr);
8109
8110 return bnxt_hwrm_check_vf_rings(bp, hwr);
8111 }
8112
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8113 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8114 {
8115 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8116 struct hwrm_ring_aggint_qcaps_output *resp;
8117 struct hwrm_ring_aggint_qcaps_input *req;
8118 int rc;
8119
8120 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8121 coal_cap->num_cmpl_dma_aggr_max = 63;
8122 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8123 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8124 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8125 coal_cap->int_lat_tmr_min_max = 65535;
8126 coal_cap->int_lat_tmr_max_max = 65535;
8127 coal_cap->num_cmpl_aggr_int_max = 65535;
8128 coal_cap->timer_units = 80;
8129
8130 if (bp->hwrm_spec_code < 0x10902)
8131 return;
8132
8133 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8134 return;
8135
8136 resp = hwrm_req_hold(bp, req);
8137 rc = hwrm_req_send_silent(bp, req);
8138 if (!rc) {
8139 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8140 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8141 coal_cap->num_cmpl_dma_aggr_max =
8142 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8143 coal_cap->num_cmpl_dma_aggr_during_int_max =
8144 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8145 coal_cap->cmpl_aggr_dma_tmr_max =
8146 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8147 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8148 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8149 coal_cap->int_lat_tmr_min_max =
8150 le16_to_cpu(resp->int_lat_tmr_min_max);
8151 coal_cap->int_lat_tmr_max_max =
8152 le16_to_cpu(resp->int_lat_tmr_max_max);
8153 coal_cap->num_cmpl_aggr_int_max =
8154 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8155 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8156 }
8157 hwrm_req_drop(bp, req);
8158 }
8159
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8160 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8161 {
8162 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8163
8164 return usec * 1000 / coal_cap->timer_units;
8165 }
8166
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8167 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8168 struct bnxt_coal *hw_coal,
8169 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8170 {
8171 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8172 u16 val, tmr, max, flags = hw_coal->flags;
8173 u32 cmpl_params = coal_cap->cmpl_params;
8174
8175 max = hw_coal->bufs_per_record * 128;
8176 if (hw_coal->budget)
8177 max = hw_coal->bufs_per_record * hw_coal->budget;
8178 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8179
8180 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8181 req->num_cmpl_aggr_int = cpu_to_le16(val);
8182
8183 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8184 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8185
8186 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8187 coal_cap->num_cmpl_dma_aggr_during_int_max);
8188 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8189
8190 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8191 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8192 req->int_lat_tmr_max = cpu_to_le16(tmr);
8193
8194 /* min timer set to 1/2 of interrupt timer */
8195 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8196 val = tmr / 2;
8197 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8198 req->int_lat_tmr_min = cpu_to_le16(val);
8199 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8200 }
8201
8202 /* buf timer set to 1/4 of interrupt timer */
8203 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8204 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8205
8206 if (cmpl_params &
8207 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8208 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8209 val = clamp_t(u16, tmr, 1,
8210 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8211 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8212 req->enables |=
8213 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8214 }
8215
8216 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8217 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8218 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8219 req->flags = cpu_to_le16(flags);
8220 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8221 }
8222
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8223 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8224 struct bnxt_coal *hw_coal)
8225 {
8226 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8227 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8228 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8229 u32 nq_params = coal_cap->nq_params;
8230 u16 tmr;
8231 int rc;
8232
8233 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8234 return 0;
8235
8236 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8237 if (rc)
8238 return rc;
8239
8240 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8241 req->flags =
8242 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8243
8244 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8245 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8246 req->int_lat_tmr_min = cpu_to_le16(tmr);
8247 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8248 return hwrm_req_send(bp, req);
8249 }
8250
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8251 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8252 {
8253 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8254 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8255 struct bnxt_coal coal;
8256 int rc;
8257
8258 /* Tick values in micro seconds.
8259 * 1 coal_buf x bufs_per_record = 1 completion record.
8260 */
8261 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8262
8263 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8264 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8265
8266 if (!bnapi->rx_ring)
8267 return -ENODEV;
8268
8269 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8270 if (rc)
8271 return rc;
8272
8273 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8274
8275 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8276
8277 return hwrm_req_send(bp, req_rx);
8278 }
8279
8280 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8281 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8282 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8283 {
8284 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8285
8286 req->ring_id = cpu_to_le16(ring_id);
8287 return hwrm_req_send(bp, req);
8288 }
8289
8290 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8291 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8292 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8293 {
8294 struct bnxt_tx_ring_info *txr;
8295 int i, rc;
8296
8297 bnxt_for_each_napi_tx(i, bnapi, txr) {
8298 u16 ring_id;
8299
8300 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8301 req->ring_id = cpu_to_le16(ring_id);
8302 rc = hwrm_req_send(bp, req);
8303 if (rc)
8304 return rc;
8305 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8306 return 0;
8307 }
8308 return 0;
8309 }
8310
bnxt_hwrm_set_coal(struct bnxt * bp)8311 int bnxt_hwrm_set_coal(struct bnxt *bp)
8312 {
8313 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8314 int i, rc;
8315
8316 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8317 if (rc)
8318 return rc;
8319
8320 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8321 if (rc) {
8322 hwrm_req_drop(bp, req_rx);
8323 return rc;
8324 }
8325
8326 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8327 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8328
8329 hwrm_req_hold(bp, req_rx);
8330 hwrm_req_hold(bp, req_tx);
8331 for (i = 0; i < bp->cp_nr_rings; i++) {
8332 struct bnxt_napi *bnapi = bp->bnapi[i];
8333 struct bnxt_coal *hw_coal;
8334
8335 if (!bnapi->rx_ring)
8336 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8337 else
8338 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8339 if (rc)
8340 break;
8341
8342 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8343 continue;
8344
8345 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8346 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8347 if (rc)
8348 break;
8349 }
8350 if (bnapi->rx_ring)
8351 hw_coal = &bp->rx_coal;
8352 else
8353 hw_coal = &bp->tx_coal;
8354 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8355 }
8356 hwrm_req_drop(bp, req_rx);
8357 hwrm_req_drop(bp, req_tx);
8358 return rc;
8359 }
8360
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8361 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8362 {
8363 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8364 struct hwrm_stat_ctx_free_input *req;
8365 int i;
8366
8367 if (!bp->bnapi)
8368 return;
8369
8370 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8371 return;
8372
8373 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8374 return;
8375 if (BNXT_FW_MAJ(bp) <= 20) {
8376 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8377 hwrm_req_drop(bp, req);
8378 return;
8379 }
8380 hwrm_req_hold(bp, req0);
8381 }
8382 hwrm_req_hold(bp, req);
8383 for (i = 0; i < bp->cp_nr_rings; i++) {
8384 struct bnxt_napi *bnapi = bp->bnapi[i];
8385 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8386
8387 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8388 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8389 if (req0) {
8390 req0->stat_ctx_id = req->stat_ctx_id;
8391 hwrm_req_send(bp, req0);
8392 }
8393 hwrm_req_send(bp, req);
8394
8395 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8396 }
8397 }
8398 hwrm_req_drop(bp, req);
8399 if (req0)
8400 hwrm_req_drop(bp, req0);
8401 }
8402
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8403 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8404 {
8405 struct hwrm_stat_ctx_alloc_output *resp;
8406 struct hwrm_stat_ctx_alloc_input *req;
8407 int rc, i;
8408
8409 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8410 return 0;
8411
8412 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8413 if (rc)
8414 return rc;
8415
8416 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8417 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8418
8419 resp = hwrm_req_hold(bp, req);
8420 for (i = 0; i < bp->cp_nr_rings; i++) {
8421 struct bnxt_napi *bnapi = bp->bnapi[i];
8422 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8423
8424 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8425
8426 rc = hwrm_req_send(bp, req);
8427 if (rc)
8428 break;
8429
8430 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8431
8432 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8433 }
8434 hwrm_req_drop(bp, req);
8435 return rc;
8436 }
8437
bnxt_hwrm_func_qcfg(struct bnxt * bp)8438 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8439 {
8440 struct hwrm_func_qcfg_output *resp;
8441 struct hwrm_func_qcfg_input *req;
8442 u16 flags;
8443 int rc;
8444
8445 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8446 if (rc)
8447 return rc;
8448
8449 req->fid = cpu_to_le16(0xffff);
8450 resp = hwrm_req_hold(bp, req);
8451 rc = hwrm_req_send(bp, req);
8452 if (rc)
8453 goto func_qcfg_exit;
8454
8455 flags = le16_to_cpu(resp->flags);
8456 #ifdef CONFIG_BNXT_SRIOV
8457 if (BNXT_VF(bp)) {
8458 struct bnxt_vf_info *vf = &bp->vf;
8459
8460 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8461 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8462 vf->flags |= BNXT_VF_TRUST;
8463 else
8464 vf->flags &= ~BNXT_VF_TRUST;
8465 } else {
8466 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8467 }
8468 #endif
8469 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8470 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8471 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8472 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8473 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8474 }
8475 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8476 bp->flags |= BNXT_FLAG_MULTI_HOST;
8477
8478 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8479 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8480
8481 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8482 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8483
8484 switch (resp->port_partition_type) {
8485 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8486 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8487 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8488 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8489 bp->port_partition_type = resp->port_partition_type;
8490 break;
8491 }
8492 if (bp->hwrm_spec_code < 0x10707 ||
8493 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8494 bp->br_mode = BRIDGE_MODE_VEB;
8495 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8496 bp->br_mode = BRIDGE_MODE_VEPA;
8497 else
8498 bp->br_mode = BRIDGE_MODE_UNDEF;
8499
8500 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8501 if (!bp->max_mtu)
8502 bp->max_mtu = BNXT_MAX_MTU;
8503
8504 if (bp->db_size)
8505 goto func_qcfg_exit;
8506
8507 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8508 if (BNXT_CHIP_P5(bp)) {
8509 if (BNXT_PF(bp))
8510 bp->db_offset = DB_PF_OFFSET_P5;
8511 else
8512 bp->db_offset = DB_VF_OFFSET_P5;
8513 }
8514 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8515 1024);
8516 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8517 bp->db_size <= bp->db_offset)
8518 bp->db_size = pci_resource_len(bp->pdev, 2);
8519
8520 func_qcfg_exit:
8521 hwrm_req_drop(bp, req);
8522 return rc;
8523 }
8524
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8525 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8526 u8 init_val, u8 init_offset,
8527 bool init_mask_set)
8528 {
8529 ctxm->init_value = init_val;
8530 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8531 if (init_mask_set)
8532 ctxm->init_offset = init_offset * 4;
8533 else
8534 ctxm->init_value = 0;
8535 }
8536
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8537 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8538 {
8539 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8540 u16 type;
8541
8542 for (type = 0; type < ctx_max; type++) {
8543 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8544 int n = 1;
8545
8546 if (!ctxm->max_entries || ctxm->pg_info)
8547 continue;
8548
8549 if (ctxm->instance_bmap)
8550 n = hweight32(ctxm->instance_bmap);
8551 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8552 if (!ctxm->pg_info)
8553 return -ENOMEM;
8554 }
8555 return 0;
8556 }
8557
8558 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8559 struct bnxt_ctx_mem_type *ctxm, bool force);
8560
8561 #define BNXT_CTX_INIT_VALID(flags) \
8562 (!!((flags) & \
8563 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8564
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8565 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8566 {
8567 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8568 struct hwrm_func_backing_store_qcaps_v2_input *req;
8569 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8570 u16 type;
8571 int rc;
8572
8573 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8574 if (rc)
8575 return rc;
8576
8577 if (!ctx) {
8578 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8579 if (!ctx)
8580 return -ENOMEM;
8581 bp->ctx = ctx;
8582 }
8583
8584 resp = hwrm_req_hold(bp, req);
8585
8586 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8587 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8588 u8 init_val, init_off, i;
8589 u32 max_entries;
8590 u16 entry_size;
8591 __le32 *p;
8592 u32 flags;
8593
8594 req->type = cpu_to_le16(type);
8595 rc = hwrm_req_send(bp, req);
8596 if (rc)
8597 goto ctx_done;
8598 flags = le32_to_cpu(resp->flags);
8599 type = le16_to_cpu(resp->next_valid_type);
8600 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8601 bnxt_free_one_ctx_mem(bp, ctxm, true);
8602 continue;
8603 }
8604 entry_size = le16_to_cpu(resp->entry_size);
8605 max_entries = le32_to_cpu(resp->max_num_entries);
8606 if (ctxm->mem_valid) {
8607 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8608 ctxm->entry_size != entry_size ||
8609 ctxm->max_entries != max_entries)
8610 bnxt_free_one_ctx_mem(bp, ctxm, true);
8611 else
8612 continue;
8613 }
8614 ctxm->type = le16_to_cpu(resp->type);
8615 ctxm->entry_size = entry_size;
8616 ctxm->flags = flags;
8617 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8618 ctxm->entry_multiple = resp->entry_multiple;
8619 ctxm->max_entries = max_entries;
8620 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8621 init_val = resp->ctx_init_value;
8622 init_off = resp->ctx_init_offset;
8623 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8624 BNXT_CTX_INIT_VALID(flags));
8625 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8626 BNXT_MAX_SPLIT_ENTRY);
8627 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8628 i++, p++)
8629 ctxm->split[i] = le32_to_cpu(*p);
8630 }
8631 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8632
8633 ctx_done:
8634 hwrm_req_drop(bp, req);
8635 return rc;
8636 }
8637
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8638 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8639 {
8640 struct hwrm_func_backing_store_qcaps_output *resp;
8641 struct hwrm_func_backing_store_qcaps_input *req;
8642 int rc;
8643
8644 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8645 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8646 return 0;
8647
8648 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8649 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8650
8651 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8652 if (rc)
8653 return rc;
8654
8655 resp = hwrm_req_hold(bp, req);
8656 rc = hwrm_req_send_silent(bp, req);
8657 if (!rc) {
8658 struct bnxt_ctx_mem_type *ctxm;
8659 struct bnxt_ctx_mem_info *ctx;
8660 u8 init_val, init_idx = 0;
8661 u16 init_mask;
8662
8663 ctx = bp->ctx;
8664 if (!ctx) {
8665 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8666 if (!ctx) {
8667 rc = -ENOMEM;
8668 goto ctx_err;
8669 }
8670 bp->ctx = ctx;
8671 }
8672 init_val = resp->ctx_kind_initializer;
8673 init_mask = le16_to_cpu(resp->ctx_init_mask);
8674
8675 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8676 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8677 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8678 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8679 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8680 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8681 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8682 (init_mask & (1 << init_idx++)) != 0);
8683
8684 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8685 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8686 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8687 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8688 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8689 (init_mask & (1 << init_idx++)) != 0);
8690
8691 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8692 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8693 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8694 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8695 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8696 (init_mask & (1 << init_idx++)) != 0);
8697
8698 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8699 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8700 ctxm->max_entries = ctxm->vnic_entries +
8701 le16_to_cpu(resp->vnic_max_ring_table_entries);
8702 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8703 bnxt_init_ctx_initializer(ctxm, init_val,
8704 resp->vnic_init_offset,
8705 (init_mask & (1 << init_idx++)) != 0);
8706
8707 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8708 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8709 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8710 bnxt_init_ctx_initializer(ctxm, init_val,
8711 resp->stat_init_offset,
8712 (init_mask & (1 << init_idx++)) != 0);
8713
8714 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8715 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8716 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8717 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8718 ctxm->entry_multiple = resp->tqm_entries_multiple;
8719 if (!ctxm->entry_multiple)
8720 ctxm->entry_multiple = 1;
8721
8722 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8723
8724 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8725 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8726 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8727 ctxm->mrav_num_entries_units =
8728 le16_to_cpu(resp->mrav_num_entries_units);
8729 bnxt_init_ctx_initializer(ctxm, init_val,
8730 resp->mrav_init_offset,
8731 (init_mask & (1 << init_idx++)) != 0);
8732
8733 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8734 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8735 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8736
8737 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8738 if (!ctx->tqm_fp_rings_count)
8739 ctx->tqm_fp_rings_count = bp->max_q;
8740 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8741 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8742
8743 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8744 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8745 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8746
8747 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8748 } else {
8749 rc = 0;
8750 }
8751 ctx_err:
8752 hwrm_req_drop(bp, req);
8753 return rc;
8754 }
8755
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8756 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8757 __le64 *pg_dir)
8758 {
8759 if (!rmem->nr_pages)
8760 return;
8761
8762 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8763 if (rmem->depth >= 1) {
8764 if (rmem->depth == 2)
8765 *pg_attr |= 2;
8766 else
8767 *pg_attr |= 1;
8768 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8769 } else {
8770 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8771 }
8772 }
8773
8774 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8775 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8776 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8777 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8778 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8779 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8780
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8781 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8782 {
8783 struct hwrm_func_backing_store_cfg_input *req;
8784 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8785 struct bnxt_ctx_pg_info *ctx_pg;
8786 struct bnxt_ctx_mem_type *ctxm;
8787 void **__req = (void **)&req;
8788 u32 req_len = sizeof(*req);
8789 __le32 *num_entries;
8790 __le64 *pg_dir;
8791 u32 flags = 0;
8792 u8 *pg_attr;
8793 u32 ena;
8794 int rc;
8795 int i;
8796
8797 if (!ctx)
8798 return 0;
8799
8800 if (req_len > bp->hwrm_max_ext_req_len)
8801 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8802 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8803 if (rc)
8804 return rc;
8805
8806 req->enables = cpu_to_le32(enables);
8807 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8808 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8809 ctx_pg = ctxm->pg_info;
8810 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8811 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8812 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8813 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8814 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8815 &req->qpc_pg_size_qpc_lvl,
8816 &req->qpc_page_dir);
8817
8818 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8819 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8820 }
8821 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8822 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8823 ctx_pg = ctxm->pg_info;
8824 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8825 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8826 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8827 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8828 &req->srq_pg_size_srq_lvl,
8829 &req->srq_page_dir);
8830 }
8831 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8832 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8833 ctx_pg = ctxm->pg_info;
8834 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8835 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8836 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8837 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8838 &req->cq_pg_size_cq_lvl,
8839 &req->cq_page_dir);
8840 }
8841 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8842 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8843 ctx_pg = ctxm->pg_info;
8844 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8845 req->vnic_num_ring_table_entries =
8846 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8847 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8848 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8849 &req->vnic_pg_size_vnic_lvl,
8850 &req->vnic_page_dir);
8851 }
8852 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8853 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8854 ctx_pg = ctxm->pg_info;
8855 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8856 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8857 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8858 &req->stat_pg_size_stat_lvl,
8859 &req->stat_page_dir);
8860 }
8861 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8862 u32 units;
8863
8864 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8865 ctx_pg = ctxm->pg_info;
8866 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8867 units = ctxm->mrav_num_entries_units;
8868 if (units) {
8869 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8870 u32 entries;
8871
8872 num_mr = ctx_pg->entries - num_ah;
8873 entries = ((num_mr / units) << 16) | (num_ah / units);
8874 req->mrav_num_entries = cpu_to_le32(entries);
8875 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8876 }
8877 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8878 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8879 &req->mrav_pg_size_mrav_lvl,
8880 &req->mrav_page_dir);
8881 }
8882 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8883 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8884 ctx_pg = ctxm->pg_info;
8885 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8886 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8887 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8888 &req->tim_pg_size_tim_lvl,
8889 &req->tim_page_dir);
8890 }
8891 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8892 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8893 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8894 pg_dir = &req->tqm_sp_page_dir,
8895 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8896 ctx_pg = ctxm->pg_info;
8897 i < BNXT_MAX_TQM_RINGS;
8898 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8899 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8900 if (!(enables & ena))
8901 continue;
8902
8903 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8904 *num_entries = cpu_to_le32(ctx_pg->entries);
8905 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8906 }
8907 req->flags = cpu_to_le32(flags);
8908 return hwrm_req_send(bp, req);
8909 }
8910
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8911 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8912 struct bnxt_ctx_pg_info *ctx_pg)
8913 {
8914 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8915
8916 rmem->page_size = BNXT_PAGE_SIZE;
8917 rmem->pg_arr = ctx_pg->ctx_pg_arr;
8918 rmem->dma_arr = ctx_pg->ctx_dma_arr;
8919 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8920 if (rmem->depth >= 1)
8921 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8922 return bnxt_alloc_ring(bp, rmem);
8923 }
8924
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)8925 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8926 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8927 u8 depth, struct bnxt_ctx_mem_type *ctxm)
8928 {
8929 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8930 int rc;
8931
8932 if (!mem_size)
8933 return -EINVAL;
8934
8935 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8936 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8937 ctx_pg->nr_pages = 0;
8938 return -EINVAL;
8939 }
8940 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8941 int nr_tbls, i;
8942
8943 rmem->depth = 2;
8944 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8945 GFP_KERNEL);
8946 if (!ctx_pg->ctx_pg_tbl)
8947 return -ENOMEM;
8948 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8949 rmem->nr_pages = nr_tbls;
8950 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8951 if (rc)
8952 return rc;
8953 for (i = 0; i < nr_tbls; i++) {
8954 struct bnxt_ctx_pg_info *pg_tbl;
8955
8956 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8957 if (!pg_tbl)
8958 return -ENOMEM;
8959 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8960 rmem = &pg_tbl->ring_mem;
8961 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8962 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8963 rmem->depth = 1;
8964 rmem->nr_pages = MAX_CTX_PAGES;
8965 rmem->ctx_mem = ctxm;
8966 if (i == (nr_tbls - 1)) {
8967 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8968
8969 if (rem)
8970 rmem->nr_pages = rem;
8971 }
8972 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8973 if (rc)
8974 break;
8975 }
8976 } else {
8977 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8978 if (rmem->nr_pages > 1 || depth)
8979 rmem->depth = 1;
8980 rmem->ctx_mem = ctxm;
8981 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8982 }
8983 return rc;
8984 }
8985
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)8986 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
8987 struct bnxt_ctx_pg_info *ctx_pg,
8988 void *buf, size_t offset, size_t head,
8989 size_t tail)
8990 {
8991 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8992 size_t nr_pages = ctx_pg->nr_pages;
8993 int page_size = rmem->page_size;
8994 size_t len = 0, total_len = 0;
8995 u16 depth = rmem->depth;
8996
8997 tail %= nr_pages * page_size;
8998 do {
8999 if (depth > 1) {
9000 int i = head / (page_size * MAX_CTX_PAGES);
9001 struct bnxt_ctx_pg_info *pg_tbl;
9002
9003 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9004 rmem = &pg_tbl->ring_mem;
9005 }
9006 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9007 head += len;
9008 offset += len;
9009 total_len += len;
9010 if (head >= nr_pages * page_size)
9011 head = 0;
9012 } while (head != tail);
9013 return total_len;
9014 }
9015
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9016 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9017 struct bnxt_ctx_pg_info *ctx_pg)
9018 {
9019 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9020
9021 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9022 ctx_pg->ctx_pg_tbl) {
9023 int i, nr_tbls = rmem->nr_pages;
9024
9025 for (i = 0; i < nr_tbls; i++) {
9026 struct bnxt_ctx_pg_info *pg_tbl;
9027 struct bnxt_ring_mem_info *rmem2;
9028
9029 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9030 if (!pg_tbl)
9031 continue;
9032 rmem2 = &pg_tbl->ring_mem;
9033 bnxt_free_ring(bp, rmem2);
9034 ctx_pg->ctx_pg_arr[i] = NULL;
9035 kfree(pg_tbl);
9036 ctx_pg->ctx_pg_tbl[i] = NULL;
9037 }
9038 kfree(ctx_pg->ctx_pg_tbl);
9039 ctx_pg->ctx_pg_tbl = NULL;
9040 }
9041 bnxt_free_ring(bp, rmem);
9042 ctx_pg->nr_pages = 0;
9043 }
9044
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)9045 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9046 struct bnxt_ctx_mem_type *ctxm, u32 entries,
9047 u8 pg_lvl)
9048 {
9049 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9050 int i, rc = 0, n = 1;
9051 u32 mem_size;
9052
9053 if (!ctxm->entry_size || !ctx_pg)
9054 return -EINVAL;
9055 if (ctxm->instance_bmap)
9056 n = hweight32(ctxm->instance_bmap);
9057 if (ctxm->entry_multiple)
9058 entries = roundup(entries, ctxm->entry_multiple);
9059 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9060 mem_size = entries * ctxm->entry_size;
9061 for (i = 0; i < n && !rc; i++) {
9062 ctx_pg[i].entries = entries;
9063 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9064 ctxm->init_value ? ctxm : NULL);
9065 }
9066 if (!rc)
9067 ctxm->mem_valid = 1;
9068 return rc;
9069 }
9070
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)9071 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9072 struct bnxt_ctx_mem_type *ctxm,
9073 bool last)
9074 {
9075 struct hwrm_func_backing_store_cfg_v2_input *req;
9076 u32 instance_bmap = ctxm->instance_bmap;
9077 int i, j, rc = 0, n = 1;
9078 __le32 *p;
9079
9080 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9081 return 0;
9082
9083 if (instance_bmap)
9084 n = hweight32(ctxm->instance_bmap);
9085 else
9086 instance_bmap = 1;
9087
9088 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9089 if (rc)
9090 return rc;
9091 hwrm_req_hold(bp, req);
9092 req->type = cpu_to_le16(ctxm->type);
9093 req->entry_size = cpu_to_le16(ctxm->entry_size);
9094 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9095 bnxt_bs_trace_avail(bp, ctxm->type)) {
9096 struct bnxt_bs_trace_info *bs_trace;
9097 u32 enables;
9098
9099 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9100 req->enables = cpu_to_le32(enables);
9101 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9102 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9103 }
9104 req->subtype_valid_cnt = ctxm->split_entry_cnt;
9105 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9106 p[i] = cpu_to_le32(ctxm->split[i]);
9107 for (i = 0, j = 0; j < n && !rc; i++) {
9108 struct bnxt_ctx_pg_info *ctx_pg;
9109
9110 if (!(instance_bmap & (1 << i)))
9111 continue;
9112 req->instance = cpu_to_le16(i);
9113 ctx_pg = &ctxm->pg_info[j++];
9114 if (!ctx_pg->entries)
9115 continue;
9116 req->num_entries = cpu_to_le32(ctx_pg->entries);
9117 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9118 &req->page_size_pbl_level,
9119 &req->page_dir);
9120 if (last && j == n)
9121 req->flags =
9122 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9123 rc = hwrm_req_send(bp, req);
9124 }
9125 hwrm_req_drop(bp, req);
9126 return rc;
9127 }
9128
bnxt_backing_store_cfg_v2(struct bnxt * bp,u32 ena)9129 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
9130 {
9131 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9132 struct bnxt_ctx_mem_type *ctxm;
9133 u16 last_type = BNXT_CTX_INV;
9134 int rc = 0;
9135 u16 type;
9136
9137 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) {
9138 ctxm = &ctx->ctx_arr[type];
9139 if (!bnxt_bs_trace_avail(bp, type))
9140 continue;
9141 if (!ctxm->mem_valid) {
9142 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9143 ctxm->max_entries, 1);
9144 if (rc) {
9145 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9146 type);
9147 continue;
9148 }
9149 bnxt_bs_trace_init(bp, ctxm);
9150 }
9151 last_type = type;
9152 }
9153
9154 if (last_type == BNXT_CTX_INV) {
9155 if (!ena)
9156 return 0;
9157 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
9158 last_type = BNXT_CTX_MAX - 1;
9159 else
9160 last_type = BNXT_CTX_L2_MAX - 1;
9161 }
9162 ctx->ctx_arr[last_type].last = 1;
9163
9164 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9165 ctxm = &ctx->ctx_arr[type];
9166
9167 if (!ctxm->mem_valid)
9168 continue;
9169 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9170 if (rc)
9171 return rc;
9172 }
9173 return 0;
9174 }
9175
9176 /**
9177 * __bnxt_copy_ctx_mem - copy host context memory
9178 * @bp: The driver context
9179 * @ctxm: The pointer to the context memory type
9180 * @buf: The destination buffer or NULL to just obtain the length
9181 * @offset: The buffer offset to copy the data to
9182 * @head: The head offset of context memory to copy from
9183 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9184 *
9185 * This function is called for debugging purposes to dump the host context
9186 * used by the chip.
9187 *
9188 * Return: Length of memory copied
9189 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9190 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9191 struct bnxt_ctx_mem_type *ctxm, void *buf,
9192 size_t offset, size_t head, size_t tail)
9193 {
9194 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9195 size_t len = 0, total_len = 0;
9196 int i, n = 1;
9197
9198 if (!ctx_pg)
9199 return 0;
9200
9201 if (ctxm->instance_bmap)
9202 n = hweight32(ctxm->instance_bmap);
9203 for (i = 0; i < n; i++) {
9204 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9205 tail);
9206 offset += len;
9207 total_len += len;
9208 }
9209 return total_len;
9210 }
9211
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9212 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9213 void *buf, size_t offset)
9214 {
9215 size_t tail = ctxm->max_entries * ctxm->entry_size;
9216
9217 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9218 }
9219
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9220 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9221 struct bnxt_ctx_mem_type *ctxm, bool force)
9222 {
9223 struct bnxt_ctx_pg_info *ctx_pg;
9224 int i, n = 1;
9225
9226 ctxm->last = 0;
9227
9228 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9229 return;
9230
9231 ctx_pg = ctxm->pg_info;
9232 if (ctx_pg) {
9233 if (ctxm->instance_bmap)
9234 n = hweight32(ctxm->instance_bmap);
9235 for (i = 0; i < n; i++)
9236 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9237
9238 kfree(ctx_pg);
9239 ctxm->pg_info = NULL;
9240 ctxm->mem_valid = 0;
9241 }
9242 memset(ctxm, 0, sizeof(*ctxm));
9243 }
9244
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9245 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9246 {
9247 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9248 u16 type;
9249
9250 if (!ctx)
9251 return;
9252
9253 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9254 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9255
9256 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9257 if (force) {
9258 kfree(ctx);
9259 bp->ctx = NULL;
9260 }
9261 }
9262
bnxt_alloc_ctx_mem(struct bnxt * bp)9263 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9264 {
9265 struct bnxt_ctx_mem_type *ctxm;
9266 struct bnxt_ctx_mem_info *ctx;
9267 u32 l2_qps, qp1_qps, max_qps;
9268 u32 ena, entries_sp, entries;
9269 u32 srqs, max_srqs, min;
9270 u32 num_mr, num_ah;
9271 u32 extra_srqs = 0;
9272 u32 extra_qps = 0;
9273 u32 fast_qpmd_qps;
9274 u8 pg_lvl = 1;
9275 int i, rc;
9276
9277 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9278 if (rc) {
9279 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9280 rc);
9281 return rc;
9282 }
9283 ctx = bp->ctx;
9284 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9285 return 0;
9286
9287 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9288 l2_qps = ctxm->qp_l2_entries;
9289 qp1_qps = ctxm->qp_qp1_entries;
9290 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9291 max_qps = ctxm->max_entries;
9292 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9293 srqs = ctxm->srq_l2_entries;
9294 max_srqs = ctxm->max_entries;
9295 ena = 0;
9296 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9297 pg_lvl = 2;
9298 if (BNXT_SW_RES_LMT(bp)) {
9299 extra_qps = max_qps - l2_qps - qp1_qps;
9300 extra_srqs = max_srqs - srqs;
9301 } else {
9302 extra_qps = min_t(u32, 65536,
9303 max_qps - l2_qps - qp1_qps);
9304 /* allocate extra qps if fw supports RoCE fast qp
9305 * destroy feature
9306 */
9307 extra_qps += fast_qpmd_qps;
9308 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9309 }
9310 if (fast_qpmd_qps)
9311 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9312 }
9313
9314 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9315 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9316 pg_lvl);
9317 if (rc)
9318 return rc;
9319
9320 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9321 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9322 if (rc)
9323 return rc;
9324
9325 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9326 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9327 extra_qps * 2, pg_lvl);
9328 if (rc)
9329 return rc;
9330
9331 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9332 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9333 if (rc)
9334 return rc;
9335
9336 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9337 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9338 if (rc)
9339 return rc;
9340
9341 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9342 goto skip_rdma;
9343
9344 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9345 if (BNXT_SW_RES_LMT(bp) &&
9346 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9347 num_ah = ctxm->mrav_av_entries;
9348 num_mr = ctxm->max_entries - num_ah;
9349 } else {
9350 /* 128K extra is needed to accommodate static AH context
9351 * allocation by f/w.
9352 */
9353 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9354 num_ah = min_t(u32, num_mr, 1024 * 128);
9355 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9356 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9357 ctxm->mrav_av_entries = num_ah;
9358 }
9359
9360 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9361 if (rc)
9362 return rc;
9363 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9364
9365 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9366 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9367 if (rc)
9368 return rc;
9369 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9370
9371 skip_rdma:
9372 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9373 min = ctxm->min_entries;
9374 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9375 2 * (extra_qps + qp1_qps) + min;
9376 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9377 if (rc)
9378 return rc;
9379
9380 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9381 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9382 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9383 if (rc)
9384 return rc;
9385 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9386 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9387 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9388
9389 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9390 rc = bnxt_backing_store_cfg_v2(bp, ena);
9391 else
9392 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9393 if (rc) {
9394 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9395 rc);
9396 return rc;
9397 }
9398 ctx->flags |= BNXT_CTX_FLAG_INITED;
9399 return 0;
9400 }
9401
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9402 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9403 {
9404 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9405 u16 page_attr;
9406 int rc;
9407
9408 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9409 return 0;
9410
9411 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9412 if (rc)
9413 return rc;
9414
9415 if (BNXT_PAGE_SIZE == 0x2000)
9416 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9417 else if (BNXT_PAGE_SIZE == 0x10000)
9418 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9419 else
9420 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9421 req->pg_size_lvl = cpu_to_le16(page_attr |
9422 bp->fw_crash_mem->ring_mem.depth);
9423 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9424 req->size = cpu_to_le32(bp->fw_crash_len);
9425 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9426 return hwrm_req_send(bp, req);
9427 }
9428
bnxt_free_crash_dump_mem(struct bnxt * bp)9429 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9430 {
9431 if (bp->fw_crash_mem) {
9432 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9433 kfree(bp->fw_crash_mem);
9434 bp->fw_crash_mem = NULL;
9435 }
9436 }
9437
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9438 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9439 {
9440 u32 mem_size = 0;
9441 int rc;
9442
9443 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9444 return 0;
9445
9446 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9447 if (rc)
9448 return rc;
9449
9450 mem_size = round_up(mem_size, 4);
9451
9452 /* keep and use the existing pages */
9453 if (bp->fw_crash_mem &&
9454 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9455 goto alloc_done;
9456
9457 if (bp->fw_crash_mem)
9458 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9459 else
9460 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
9461 GFP_KERNEL);
9462 if (!bp->fw_crash_mem)
9463 return -ENOMEM;
9464
9465 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9466 if (rc) {
9467 bnxt_free_crash_dump_mem(bp);
9468 return rc;
9469 }
9470
9471 alloc_done:
9472 bp->fw_crash_len = mem_size;
9473 return 0;
9474 }
9475
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9476 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9477 {
9478 struct hwrm_func_resource_qcaps_output *resp;
9479 struct hwrm_func_resource_qcaps_input *req;
9480 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9481 int rc;
9482
9483 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9484 if (rc)
9485 return rc;
9486
9487 req->fid = cpu_to_le16(0xffff);
9488 resp = hwrm_req_hold(bp, req);
9489 rc = hwrm_req_send_silent(bp, req);
9490 if (rc)
9491 goto hwrm_func_resc_qcaps_exit;
9492
9493 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9494 if (!all)
9495 goto hwrm_func_resc_qcaps_exit;
9496
9497 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9498 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9499 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9500 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9501 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9502 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9503 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9504 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9505 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9506 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9507 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9508 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9509 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9510 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9511 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9512 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9513
9514 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9515 u16 max_msix = le16_to_cpu(resp->max_msix);
9516
9517 hw_resc->max_nqs = max_msix;
9518 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9519 }
9520
9521 if (BNXT_PF(bp)) {
9522 struct bnxt_pf_info *pf = &bp->pf;
9523
9524 pf->vf_resv_strategy =
9525 le16_to_cpu(resp->vf_reservation_strategy);
9526 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9527 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9528 }
9529 hwrm_func_resc_qcaps_exit:
9530 hwrm_req_drop(bp, req);
9531 return rc;
9532 }
9533
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9534 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9535 {
9536 struct hwrm_port_mac_ptp_qcfg_output *resp;
9537 struct hwrm_port_mac_ptp_qcfg_input *req;
9538 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9539 u8 flags;
9540 int rc;
9541
9542 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9543 rc = -ENODEV;
9544 goto no_ptp;
9545 }
9546
9547 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9548 if (rc)
9549 goto no_ptp;
9550
9551 req->port_id = cpu_to_le16(bp->pf.port_id);
9552 resp = hwrm_req_hold(bp, req);
9553 rc = hwrm_req_send(bp, req);
9554 if (rc)
9555 goto exit;
9556
9557 flags = resp->flags;
9558 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9559 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9560 rc = -ENODEV;
9561 goto exit;
9562 }
9563 if (!ptp) {
9564 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
9565 if (!ptp) {
9566 rc = -ENOMEM;
9567 goto exit;
9568 }
9569 ptp->bp = bp;
9570 bp->ptp_cfg = ptp;
9571 }
9572
9573 if (flags &
9574 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9575 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9576 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9577 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9578 } else if (BNXT_CHIP_P5(bp)) {
9579 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9580 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9581 } else {
9582 rc = -ENODEV;
9583 goto exit;
9584 }
9585 ptp->rtc_configured =
9586 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9587 rc = bnxt_ptp_init(bp);
9588 if (rc)
9589 netdev_warn(bp->dev, "PTP initialization failed.\n");
9590 exit:
9591 hwrm_req_drop(bp, req);
9592 if (!rc)
9593 return 0;
9594
9595 no_ptp:
9596 bnxt_ptp_clear(bp);
9597 kfree(ptp);
9598 bp->ptp_cfg = NULL;
9599 return rc;
9600 }
9601
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9602 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9603 {
9604 struct hwrm_func_qcaps_output *resp;
9605 struct hwrm_func_qcaps_input *req;
9606 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9607 u32 flags, flags_ext, flags_ext2;
9608 int rc;
9609
9610 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9611 if (rc)
9612 return rc;
9613
9614 req->fid = cpu_to_le16(0xffff);
9615 resp = hwrm_req_hold(bp, req);
9616 rc = hwrm_req_send(bp, req);
9617 if (rc)
9618 goto hwrm_func_qcaps_exit;
9619
9620 flags = le32_to_cpu(resp->flags);
9621 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9622 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9623 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9624 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9625 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9626 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9627 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9628 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9629 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9630 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9631 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9632 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9633 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9634 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9635 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9636 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9637 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9638 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9639
9640 flags_ext = le32_to_cpu(resp->flags_ext);
9641 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9642 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9643 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9644 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9645 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9646 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9647 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9648 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9649 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9650 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9651 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9652 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9653 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9654 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9655 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9656 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9657 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9658 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9659
9660 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9661 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9662 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9663 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9664 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9665 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9666 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9667 if (flags_ext2 &
9668 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9669 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9670 if (BNXT_PF(bp) &&
9671 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9672 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9673
9674 bp->tx_push_thresh = 0;
9675 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9676 BNXT_FW_MAJ(bp) > 217)
9677 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9678
9679 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9680 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9681 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9682 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9683 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9684 if (!hw_resc->max_hw_ring_grps)
9685 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9686 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9687 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9688 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9689
9690 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9691 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9692 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9693 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9694 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9695 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9696
9697 if (BNXT_PF(bp)) {
9698 struct bnxt_pf_info *pf = &bp->pf;
9699
9700 pf->fw_fid = le16_to_cpu(resp->fid);
9701 pf->port_id = le16_to_cpu(resp->port_id);
9702 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9703 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9704 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9705 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9706 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9707 bp->flags |= BNXT_FLAG_WOL_CAP;
9708 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9709 bp->fw_cap |= BNXT_FW_CAP_PTP;
9710 } else {
9711 bnxt_ptp_clear(bp);
9712 kfree(bp->ptp_cfg);
9713 bp->ptp_cfg = NULL;
9714 }
9715 } else {
9716 #ifdef CONFIG_BNXT_SRIOV
9717 struct bnxt_vf_info *vf = &bp->vf;
9718
9719 vf->fw_fid = le16_to_cpu(resp->fid);
9720 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9721 #endif
9722 }
9723 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9724
9725 hwrm_func_qcaps_exit:
9726 hwrm_req_drop(bp, req);
9727 return rc;
9728 }
9729
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9730 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9731 {
9732 struct hwrm_dbg_qcaps_output *resp;
9733 struct hwrm_dbg_qcaps_input *req;
9734 int rc;
9735
9736 bp->fw_dbg_cap = 0;
9737 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9738 return;
9739
9740 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9741 if (rc)
9742 return;
9743
9744 req->fid = cpu_to_le16(0xffff);
9745 resp = hwrm_req_hold(bp, req);
9746 rc = hwrm_req_send(bp, req);
9747 if (rc)
9748 goto hwrm_dbg_qcaps_exit;
9749
9750 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9751
9752 hwrm_dbg_qcaps_exit:
9753 hwrm_req_drop(bp, req);
9754 }
9755
9756 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9757
bnxt_hwrm_func_qcaps(struct bnxt * bp)9758 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9759 {
9760 int rc;
9761
9762 rc = __bnxt_hwrm_func_qcaps(bp);
9763 if (rc)
9764 return rc;
9765
9766 bnxt_hwrm_dbg_qcaps(bp);
9767
9768 rc = bnxt_hwrm_queue_qportcfg(bp);
9769 if (rc) {
9770 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9771 return rc;
9772 }
9773 if (bp->hwrm_spec_code >= 0x10803) {
9774 rc = bnxt_alloc_ctx_mem(bp);
9775 if (rc)
9776 return rc;
9777 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9778 if (!rc)
9779 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9780 }
9781 return 0;
9782 }
9783
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9784 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9785 {
9786 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9787 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9788 u32 flags;
9789 int rc;
9790
9791 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9792 return 0;
9793
9794 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9795 if (rc)
9796 return rc;
9797
9798 resp = hwrm_req_hold(bp, req);
9799 rc = hwrm_req_send(bp, req);
9800 if (rc)
9801 goto hwrm_cfa_adv_qcaps_exit;
9802
9803 flags = le32_to_cpu(resp->flags);
9804 if (flags &
9805 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9806 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9807
9808 if (flags &
9809 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9810 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9811
9812 if (flags &
9813 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9814 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9815
9816 hwrm_cfa_adv_qcaps_exit:
9817 hwrm_req_drop(bp, req);
9818 return rc;
9819 }
9820
__bnxt_alloc_fw_health(struct bnxt * bp)9821 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9822 {
9823 if (bp->fw_health)
9824 return 0;
9825
9826 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9827 if (!bp->fw_health)
9828 return -ENOMEM;
9829
9830 mutex_init(&bp->fw_health->lock);
9831 return 0;
9832 }
9833
bnxt_alloc_fw_health(struct bnxt * bp)9834 static int bnxt_alloc_fw_health(struct bnxt *bp)
9835 {
9836 int rc;
9837
9838 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9839 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9840 return 0;
9841
9842 rc = __bnxt_alloc_fw_health(bp);
9843 if (rc) {
9844 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9845 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9846 return rc;
9847 }
9848
9849 return 0;
9850 }
9851
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)9852 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9853 {
9854 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9855 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9856 BNXT_FW_HEALTH_WIN_MAP_OFF);
9857 }
9858
bnxt_inv_fw_health_reg(struct bnxt * bp)9859 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9860 {
9861 struct bnxt_fw_health *fw_health = bp->fw_health;
9862 u32 reg_type;
9863
9864 if (!fw_health)
9865 return;
9866
9867 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9868 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9869 fw_health->status_reliable = false;
9870
9871 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9872 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9873 fw_health->resets_reliable = false;
9874 }
9875
bnxt_try_map_fw_health_reg(struct bnxt * bp)9876 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9877 {
9878 void __iomem *hs;
9879 u32 status_loc;
9880 u32 reg_type;
9881 u32 sig;
9882
9883 if (bp->fw_health)
9884 bp->fw_health->status_reliable = false;
9885
9886 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9887 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9888
9889 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9890 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9891 if (!bp->chip_num) {
9892 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9893 bp->chip_num = readl(bp->bar0 +
9894 BNXT_FW_HEALTH_WIN_BASE +
9895 BNXT_GRC_REG_CHIP_NUM);
9896 }
9897 if (!BNXT_CHIP_P5_PLUS(bp))
9898 return;
9899
9900 status_loc = BNXT_GRC_REG_STATUS_P5 |
9901 BNXT_FW_HEALTH_REG_TYPE_BAR0;
9902 } else {
9903 status_loc = readl(hs + offsetof(struct hcomm_status,
9904 fw_status_loc));
9905 }
9906
9907 if (__bnxt_alloc_fw_health(bp)) {
9908 netdev_warn(bp->dev, "no memory for firmware status checks\n");
9909 return;
9910 }
9911
9912 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9913 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9914 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9915 __bnxt_map_fw_health_reg(bp, status_loc);
9916 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9917 BNXT_FW_HEALTH_WIN_OFF(status_loc);
9918 }
9919
9920 bp->fw_health->status_reliable = true;
9921 }
9922
bnxt_map_fw_health_regs(struct bnxt * bp)9923 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9924 {
9925 struct bnxt_fw_health *fw_health = bp->fw_health;
9926 u32 reg_base = 0xffffffff;
9927 int i;
9928
9929 bp->fw_health->status_reliable = false;
9930 bp->fw_health->resets_reliable = false;
9931 /* Only pre-map the monitoring GRC registers using window 3 */
9932 for (i = 0; i < 4; i++) {
9933 u32 reg = fw_health->regs[i];
9934
9935 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9936 continue;
9937 if (reg_base == 0xffffffff)
9938 reg_base = reg & BNXT_GRC_BASE_MASK;
9939 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9940 return -ERANGE;
9941 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9942 }
9943 bp->fw_health->status_reliable = true;
9944 bp->fw_health->resets_reliable = true;
9945 if (reg_base == 0xffffffff)
9946 return 0;
9947
9948 __bnxt_map_fw_health_reg(bp, reg_base);
9949 return 0;
9950 }
9951
bnxt_remap_fw_health_regs(struct bnxt * bp)9952 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9953 {
9954 if (!bp->fw_health)
9955 return;
9956
9957 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9958 bp->fw_health->status_reliable = true;
9959 bp->fw_health->resets_reliable = true;
9960 } else {
9961 bnxt_try_map_fw_health_reg(bp);
9962 }
9963 }
9964
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)9965 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9966 {
9967 struct bnxt_fw_health *fw_health = bp->fw_health;
9968 struct hwrm_error_recovery_qcfg_output *resp;
9969 struct hwrm_error_recovery_qcfg_input *req;
9970 int rc, i;
9971
9972 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9973 return 0;
9974
9975 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9976 if (rc)
9977 return rc;
9978
9979 resp = hwrm_req_hold(bp, req);
9980 rc = hwrm_req_send(bp, req);
9981 if (rc)
9982 goto err_recovery_out;
9983 fw_health->flags = le32_to_cpu(resp->flags);
9984 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
9985 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
9986 rc = -EINVAL;
9987 goto err_recovery_out;
9988 }
9989 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
9990 fw_health->master_func_wait_dsecs =
9991 le32_to_cpu(resp->master_func_wait_period);
9992 fw_health->normal_func_wait_dsecs =
9993 le32_to_cpu(resp->normal_func_wait_period);
9994 fw_health->post_reset_wait_dsecs =
9995 le32_to_cpu(resp->master_func_wait_period_after_reset);
9996 fw_health->post_reset_max_wait_dsecs =
9997 le32_to_cpu(resp->max_bailout_time_after_reset);
9998 fw_health->regs[BNXT_FW_HEALTH_REG] =
9999 le32_to_cpu(resp->fw_health_status_reg);
10000 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10001 le32_to_cpu(resp->fw_heartbeat_reg);
10002 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10003 le32_to_cpu(resp->fw_reset_cnt_reg);
10004 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10005 le32_to_cpu(resp->reset_inprogress_reg);
10006 fw_health->fw_reset_inprog_reg_mask =
10007 le32_to_cpu(resp->reset_inprogress_reg_mask);
10008 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10009 if (fw_health->fw_reset_seq_cnt >= 16) {
10010 rc = -EINVAL;
10011 goto err_recovery_out;
10012 }
10013 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10014 fw_health->fw_reset_seq_regs[i] =
10015 le32_to_cpu(resp->reset_reg[i]);
10016 fw_health->fw_reset_seq_vals[i] =
10017 le32_to_cpu(resp->reset_reg_val[i]);
10018 fw_health->fw_reset_seq_delay_msec[i] =
10019 resp->delay_after_reset[i];
10020 }
10021 err_recovery_out:
10022 hwrm_req_drop(bp, req);
10023 if (!rc)
10024 rc = bnxt_map_fw_health_regs(bp);
10025 if (rc)
10026 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10027 return rc;
10028 }
10029
bnxt_hwrm_func_reset(struct bnxt * bp)10030 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10031 {
10032 struct hwrm_func_reset_input *req;
10033 int rc;
10034
10035 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10036 if (rc)
10037 return rc;
10038
10039 req->enables = 0;
10040 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10041 return hwrm_req_send(bp, req);
10042 }
10043
bnxt_nvm_cfg_ver_get(struct bnxt * bp)10044 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10045 {
10046 struct hwrm_nvm_get_dev_info_output nvm_info;
10047
10048 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10049 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10050 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10051 nvm_info.nvm_cfg_ver_upd);
10052 }
10053
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)10054 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10055 {
10056 struct hwrm_queue_qportcfg_output *resp;
10057 struct hwrm_queue_qportcfg_input *req;
10058 u8 i, j, *qptr;
10059 bool no_rdma;
10060 int rc = 0;
10061
10062 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10063 if (rc)
10064 return rc;
10065
10066 resp = hwrm_req_hold(bp, req);
10067 rc = hwrm_req_send(bp, req);
10068 if (rc)
10069 goto qportcfg_exit;
10070
10071 if (!resp->max_configurable_queues) {
10072 rc = -EINVAL;
10073 goto qportcfg_exit;
10074 }
10075 bp->max_tc = resp->max_configurable_queues;
10076 bp->max_lltc = resp->max_configurable_lossless_queues;
10077 if (bp->max_tc > BNXT_MAX_QUEUE)
10078 bp->max_tc = BNXT_MAX_QUEUE;
10079
10080 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10081 qptr = &resp->queue_id0;
10082 for (i = 0, j = 0; i < bp->max_tc; i++) {
10083 bp->q_info[j].queue_id = *qptr;
10084 bp->q_ids[i] = *qptr++;
10085 bp->q_info[j].queue_profile = *qptr++;
10086 bp->tc_to_qidx[j] = j;
10087 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10088 (no_rdma && BNXT_PF(bp)))
10089 j++;
10090 }
10091 bp->max_q = bp->max_tc;
10092 bp->max_tc = max_t(u8, j, 1);
10093
10094 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10095 bp->max_tc = 1;
10096
10097 if (bp->max_lltc > bp->max_tc)
10098 bp->max_lltc = bp->max_tc;
10099
10100 qportcfg_exit:
10101 hwrm_req_drop(bp, req);
10102 return rc;
10103 }
10104
bnxt_hwrm_poll(struct bnxt * bp)10105 static int bnxt_hwrm_poll(struct bnxt *bp)
10106 {
10107 struct hwrm_ver_get_input *req;
10108 int rc;
10109
10110 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10111 if (rc)
10112 return rc;
10113
10114 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10115 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10116 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10117
10118 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10119 rc = hwrm_req_send(bp, req);
10120 return rc;
10121 }
10122
bnxt_hwrm_ver_get(struct bnxt * bp)10123 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10124 {
10125 struct hwrm_ver_get_output *resp;
10126 struct hwrm_ver_get_input *req;
10127 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10128 u32 dev_caps_cfg, hwrm_ver;
10129 int rc, len, max_tmo_secs;
10130
10131 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10132 if (rc)
10133 return rc;
10134
10135 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10136 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10137 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10138 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10139 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10140
10141 resp = hwrm_req_hold(bp, req);
10142 rc = hwrm_req_send(bp, req);
10143 if (rc)
10144 goto hwrm_ver_get_exit;
10145
10146 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10147
10148 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10149 resp->hwrm_intf_min_8b << 8 |
10150 resp->hwrm_intf_upd_8b;
10151 if (resp->hwrm_intf_maj_8b < 1) {
10152 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10153 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10154 resp->hwrm_intf_upd_8b);
10155 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10156 }
10157
10158 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10159 HWRM_VERSION_UPDATE;
10160
10161 if (bp->hwrm_spec_code > hwrm_ver)
10162 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10163 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10164 HWRM_VERSION_UPDATE);
10165 else
10166 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10167 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10168 resp->hwrm_intf_upd_8b);
10169
10170 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10171 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10172 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10173 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10174 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10175 len = FW_VER_STR_LEN;
10176 } else {
10177 fw_maj = resp->hwrm_fw_maj_8b;
10178 fw_min = resp->hwrm_fw_min_8b;
10179 fw_bld = resp->hwrm_fw_bld_8b;
10180 fw_rsv = resp->hwrm_fw_rsvd_8b;
10181 len = BC_HWRM_STR_LEN;
10182 }
10183 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10184 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10185 fw_rsv);
10186
10187 if (strlen(resp->active_pkg_name)) {
10188 int fw_ver_len = strlen(bp->fw_ver_str);
10189
10190 snprintf(bp->fw_ver_str + fw_ver_len,
10191 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10192 resp->active_pkg_name);
10193 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10194 }
10195
10196 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10197 if (!bp->hwrm_cmd_timeout)
10198 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10199 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10200 if (!bp->hwrm_cmd_max_timeout)
10201 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10202 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10203 #ifdef CONFIG_DETECT_HUNG_TASK
10204 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10205 max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10206 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10207 max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10208 }
10209 #endif
10210
10211 if (resp->hwrm_intf_maj_8b >= 1) {
10212 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10213 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10214 }
10215 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10216 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10217
10218 bp->chip_num = le16_to_cpu(resp->chip_num);
10219 bp->chip_rev = resp->chip_rev;
10220 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10221 !resp->chip_metal)
10222 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10223
10224 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10225 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10226 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10227 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10228
10229 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10230 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10231
10232 if (dev_caps_cfg &
10233 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10234 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10235
10236 if (dev_caps_cfg &
10237 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10238 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10239
10240 if (dev_caps_cfg &
10241 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10242 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10243
10244 hwrm_ver_get_exit:
10245 hwrm_req_drop(bp, req);
10246 return rc;
10247 }
10248
bnxt_hwrm_fw_set_time(struct bnxt * bp)10249 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10250 {
10251 struct hwrm_fw_set_time_input *req;
10252 struct tm tm;
10253 time64_t now = ktime_get_real_seconds();
10254 int rc;
10255
10256 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10257 bp->hwrm_spec_code < 0x10400)
10258 return -EOPNOTSUPP;
10259
10260 time64_to_tm(now, 0, &tm);
10261 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10262 if (rc)
10263 return rc;
10264
10265 req->year = cpu_to_le16(1900 + tm.tm_year);
10266 req->month = 1 + tm.tm_mon;
10267 req->day = tm.tm_mday;
10268 req->hour = tm.tm_hour;
10269 req->minute = tm.tm_min;
10270 req->second = tm.tm_sec;
10271 return hwrm_req_send(bp, req);
10272 }
10273
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10274 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10275 {
10276 u64 sw_tmp;
10277
10278 hw &= mask;
10279 sw_tmp = (*sw & ~mask) | hw;
10280 if (hw < (*sw & mask))
10281 sw_tmp += mask + 1;
10282 WRITE_ONCE(*sw, sw_tmp);
10283 }
10284
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10285 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10286 int count, bool ignore_zero)
10287 {
10288 int i;
10289
10290 for (i = 0; i < count; i++) {
10291 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10292
10293 if (ignore_zero && !hw)
10294 continue;
10295
10296 if (masks[i] == -1ULL)
10297 sw_stats[i] = hw;
10298 else
10299 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10300 }
10301 }
10302
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10303 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10304 {
10305 if (!stats->hw_stats)
10306 return;
10307
10308 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10309 stats->hw_masks, stats->len / 8, false);
10310 }
10311
bnxt_accumulate_all_stats(struct bnxt * bp)10312 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10313 {
10314 struct bnxt_stats_mem *ring0_stats;
10315 bool ignore_zero = false;
10316 int i;
10317
10318 /* Chip bug. Counter intermittently becomes 0. */
10319 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10320 ignore_zero = true;
10321
10322 for (i = 0; i < bp->cp_nr_rings; i++) {
10323 struct bnxt_napi *bnapi = bp->bnapi[i];
10324 struct bnxt_cp_ring_info *cpr;
10325 struct bnxt_stats_mem *stats;
10326
10327 cpr = &bnapi->cp_ring;
10328 stats = &cpr->stats;
10329 if (!i)
10330 ring0_stats = stats;
10331 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10332 ring0_stats->hw_masks,
10333 ring0_stats->len / 8, ignore_zero);
10334 }
10335 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10336 struct bnxt_stats_mem *stats = &bp->port_stats;
10337 __le64 *hw_stats = stats->hw_stats;
10338 u64 *sw_stats = stats->sw_stats;
10339 u64 *masks = stats->hw_masks;
10340 int cnt;
10341
10342 cnt = sizeof(struct rx_port_stats) / 8;
10343 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10344
10345 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10346 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10347 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10348 cnt = sizeof(struct tx_port_stats) / 8;
10349 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10350 }
10351 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10352 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10353 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10354 }
10355 }
10356
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10357 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10358 {
10359 struct hwrm_port_qstats_input *req;
10360 struct bnxt_pf_info *pf = &bp->pf;
10361 int rc;
10362
10363 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10364 return 0;
10365
10366 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10367 return -EOPNOTSUPP;
10368
10369 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10370 if (rc)
10371 return rc;
10372
10373 req->flags = flags;
10374 req->port_id = cpu_to_le16(pf->port_id);
10375 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10376 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10377 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10378 return hwrm_req_send(bp, req);
10379 }
10380
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10381 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10382 {
10383 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10384 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10385 struct hwrm_port_qstats_ext_output *resp_qs;
10386 struct hwrm_port_qstats_ext_input *req_qs;
10387 struct bnxt_pf_info *pf = &bp->pf;
10388 u32 tx_stat_size;
10389 int rc;
10390
10391 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10392 return 0;
10393
10394 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10395 return -EOPNOTSUPP;
10396
10397 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10398 if (rc)
10399 return rc;
10400
10401 req_qs->flags = flags;
10402 req_qs->port_id = cpu_to_le16(pf->port_id);
10403 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10404 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10405 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10406 sizeof(struct tx_port_stats_ext) : 0;
10407 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10408 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10409 resp_qs = hwrm_req_hold(bp, req_qs);
10410 rc = hwrm_req_send(bp, req_qs);
10411 if (!rc) {
10412 bp->fw_rx_stats_ext_size =
10413 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10414 if (BNXT_FW_MAJ(bp) < 220 &&
10415 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10416 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10417
10418 bp->fw_tx_stats_ext_size = tx_stat_size ?
10419 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10420 } else {
10421 bp->fw_rx_stats_ext_size = 0;
10422 bp->fw_tx_stats_ext_size = 0;
10423 }
10424 hwrm_req_drop(bp, req_qs);
10425
10426 if (flags)
10427 return rc;
10428
10429 if (bp->fw_tx_stats_ext_size <=
10430 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10431 bp->pri2cos_valid = 0;
10432 return rc;
10433 }
10434
10435 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10436 if (rc)
10437 return rc;
10438
10439 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10440
10441 resp_qc = hwrm_req_hold(bp, req_qc);
10442 rc = hwrm_req_send(bp, req_qc);
10443 if (!rc) {
10444 u8 *pri2cos;
10445 int i, j;
10446
10447 pri2cos = &resp_qc->pri0_cos_queue_id;
10448 for (i = 0; i < 8; i++) {
10449 u8 queue_id = pri2cos[i];
10450 u8 queue_idx;
10451
10452 /* Per port queue IDs start from 0, 10, 20, etc */
10453 queue_idx = queue_id % 10;
10454 if (queue_idx > BNXT_MAX_QUEUE) {
10455 bp->pri2cos_valid = false;
10456 hwrm_req_drop(bp, req_qc);
10457 return rc;
10458 }
10459 for (j = 0; j < bp->max_q; j++) {
10460 if (bp->q_ids[j] == queue_id)
10461 bp->pri2cos_idx[i] = queue_idx;
10462 }
10463 }
10464 bp->pri2cos_valid = true;
10465 }
10466 hwrm_req_drop(bp, req_qc);
10467
10468 return rc;
10469 }
10470
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10471 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10472 {
10473 bnxt_hwrm_tunnel_dst_port_free(bp,
10474 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10475 bnxt_hwrm_tunnel_dst_port_free(bp,
10476 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10477 }
10478
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10479 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10480 {
10481 int rc, i;
10482 u32 tpa_flags = 0;
10483
10484 if (set_tpa)
10485 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10486 else if (BNXT_NO_FW_ACCESS(bp))
10487 return 0;
10488 for (i = 0; i < bp->nr_vnics; i++) {
10489 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10490 if (rc) {
10491 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10492 i, rc);
10493 return rc;
10494 }
10495 }
10496 return 0;
10497 }
10498
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10499 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10500 {
10501 int i;
10502
10503 for (i = 0; i < bp->nr_vnics; i++)
10504 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10505 }
10506
bnxt_clear_vnic(struct bnxt * bp)10507 static void bnxt_clear_vnic(struct bnxt *bp)
10508 {
10509 if (!bp->vnic_info)
10510 return;
10511
10512 bnxt_hwrm_clear_vnic_filter(bp);
10513 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10514 /* clear all RSS setting before free vnic ctx */
10515 bnxt_hwrm_clear_vnic_rss(bp);
10516 bnxt_hwrm_vnic_ctx_free(bp);
10517 }
10518 /* before free the vnic, undo the vnic tpa settings */
10519 if (bp->flags & BNXT_FLAG_TPA)
10520 bnxt_set_tpa(bp, false);
10521 bnxt_hwrm_vnic_free(bp);
10522 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10523 bnxt_hwrm_vnic_ctx_free(bp);
10524 }
10525
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10526 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10527 bool irq_re_init)
10528 {
10529 bnxt_clear_vnic(bp);
10530 bnxt_hwrm_ring_free(bp, close_path);
10531 bnxt_hwrm_ring_grp_free(bp);
10532 if (irq_re_init) {
10533 bnxt_hwrm_stat_ctx_free(bp);
10534 bnxt_hwrm_free_tunnel_ports(bp);
10535 }
10536 }
10537
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10538 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10539 {
10540 struct hwrm_func_cfg_input *req;
10541 u8 evb_mode;
10542 int rc;
10543
10544 if (br_mode == BRIDGE_MODE_VEB)
10545 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10546 else if (br_mode == BRIDGE_MODE_VEPA)
10547 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10548 else
10549 return -EINVAL;
10550
10551 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10552 if (rc)
10553 return rc;
10554
10555 req->fid = cpu_to_le16(0xffff);
10556 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10557 req->evb_mode = evb_mode;
10558 return hwrm_req_send(bp, req);
10559 }
10560
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10561 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10562 {
10563 struct hwrm_func_cfg_input *req;
10564 int rc;
10565
10566 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10567 return 0;
10568
10569 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10570 if (rc)
10571 return rc;
10572
10573 req->fid = cpu_to_le16(0xffff);
10574 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10575 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10576 if (size == 128)
10577 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10578
10579 return hwrm_req_send(bp, req);
10580 }
10581
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10582 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10583 {
10584 int rc;
10585
10586 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10587 goto skip_rss_ctx;
10588
10589 /* allocate context for vnic */
10590 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10591 if (rc) {
10592 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10593 vnic->vnic_id, rc);
10594 goto vnic_setup_err;
10595 }
10596 bp->rsscos_nr_ctxs++;
10597
10598 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10599 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10600 if (rc) {
10601 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10602 vnic->vnic_id, rc);
10603 goto vnic_setup_err;
10604 }
10605 bp->rsscos_nr_ctxs++;
10606 }
10607
10608 skip_rss_ctx:
10609 /* configure default vnic, ring grp */
10610 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10611 if (rc) {
10612 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10613 vnic->vnic_id, rc);
10614 goto vnic_setup_err;
10615 }
10616
10617 /* Enable RSS hashing on vnic */
10618 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10619 if (rc) {
10620 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10621 vnic->vnic_id, rc);
10622 goto vnic_setup_err;
10623 }
10624
10625 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10626 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10627 if (rc) {
10628 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10629 vnic->vnic_id, rc);
10630 }
10631 }
10632
10633 vnic_setup_err:
10634 return rc;
10635 }
10636
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10637 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10638 u8 valid)
10639 {
10640 struct hwrm_vnic_update_input *req;
10641 int rc;
10642
10643 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10644 if (rc)
10645 return rc;
10646
10647 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10648
10649 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10650 req->mru = cpu_to_le16(vnic->mru);
10651
10652 req->enables = cpu_to_le32(valid);
10653
10654 return hwrm_req_send(bp, req);
10655 }
10656
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10657 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10658 {
10659 int rc;
10660
10661 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10662 if (rc) {
10663 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10664 vnic->vnic_id, rc);
10665 return rc;
10666 }
10667 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10668 if (rc)
10669 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10670 vnic->vnic_id, rc);
10671 return rc;
10672 }
10673
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10674 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10675 {
10676 int rc, i, nr_ctxs;
10677
10678 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10679 for (i = 0; i < nr_ctxs; i++) {
10680 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10681 if (rc) {
10682 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10683 vnic->vnic_id, i, rc);
10684 break;
10685 }
10686 bp->rsscos_nr_ctxs++;
10687 }
10688 if (i < nr_ctxs)
10689 return -ENOMEM;
10690
10691 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10692 if (rc)
10693 return rc;
10694
10695 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10696 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10697 if (rc) {
10698 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10699 vnic->vnic_id, rc);
10700 }
10701 }
10702 return rc;
10703 }
10704
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10705 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10706 {
10707 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10708 return __bnxt_setup_vnic_p5(bp, vnic);
10709 else
10710 return __bnxt_setup_vnic(bp, vnic);
10711 }
10712
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10713 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10714 struct bnxt_vnic_info *vnic,
10715 u16 start_rx_ring_idx, int rx_rings)
10716 {
10717 int rc;
10718
10719 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10720 if (rc) {
10721 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10722 vnic->vnic_id, rc);
10723 return rc;
10724 }
10725 return bnxt_setup_vnic(bp, vnic);
10726 }
10727
bnxt_alloc_rfs_vnics(struct bnxt * bp)10728 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10729 {
10730 struct bnxt_vnic_info *vnic;
10731 int i, rc = 0;
10732
10733 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10734 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10735 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10736 }
10737
10738 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10739 return 0;
10740
10741 for (i = 0; i < bp->rx_nr_rings; i++) {
10742 u16 vnic_id = i + 1;
10743 u16 ring_id = i;
10744
10745 if (vnic_id >= bp->nr_vnics)
10746 break;
10747
10748 vnic = &bp->vnic_info[vnic_id];
10749 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10750 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10751 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10752 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10753 break;
10754 }
10755 return rc;
10756 }
10757
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10758 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10759 bool all)
10760 {
10761 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10762 struct bnxt_filter_base *usr_fltr, *tmp;
10763 struct bnxt_ntuple_filter *ntp_fltr;
10764 int i;
10765
10766 if (netif_running(bp->dev)) {
10767 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10768 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10769 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10770 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10771 }
10772 }
10773 if (!all)
10774 return;
10775
10776 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10777 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10778 usr_fltr->fw_vnic_id == rss_ctx->index) {
10779 ntp_fltr = container_of(usr_fltr,
10780 struct bnxt_ntuple_filter,
10781 base);
10782 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10783 bnxt_del_ntp_filter(bp, ntp_fltr);
10784 bnxt_del_one_usr_fltr(bp, usr_fltr);
10785 }
10786 }
10787
10788 if (vnic->rss_table)
10789 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10790 vnic->rss_table,
10791 vnic->rss_table_dma_addr);
10792 bp->num_rss_ctx--;
10793 }
10794
bnxt_vnic_has_rx_ring(struct bnxt * bp,struct bnxt_vnic_info * vnic,int rxr_id)10795 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10796 int rxr_id)
10797 {
10798 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10799 int i, vnic_rx;
10800
10801 /* Ntuple VNIC always has all the rx rings. Any change of ring id
10802 * must be updated because a future filter may use it.
10803 */
10804 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10805 return true;
10806
10807 for (i = 0; i < tbl_size; i++) {
10808 if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10809 vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10810 else
10811 vnic_rx = bp->rss_indir_tbl[i];
10812
10813 if (rxr_id == vnic_rx)
10814 return true;
10815 }
10816
10817 return false;
10818 }
10819
bnxt_set_vnic_mru_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 mru,int rxr_id)10820 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10821 u16 mru, int rxr_id)
10822 {
10823 int rc;
10824
10825 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10826 return 0;
10827
10828 if (mru) {
10829 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10830 if (rc) {
10831 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10832 vnic->vnic_id, rc);
10833 return rc;
10834 }
10835 }
10836 vnic->mru = mru;
10837 bnxt_hwrm_vnic_update(bp, vnic,
10838 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10839
10840 return 0;
10841 }
10842
bnxt_set_rss_ctx_vnic_mru(struct bnxt * bp,u16 mru,int rxr_id)10843 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10844 {
10845 struct ethtool_rxfh_context *ctx;
10846 unsigned long context;
10847 int rc;
10848
10849 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10850 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10851 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10852
10853 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10854 if (rc)
10855 return rc;
10856 }
10857
10858 return 0;
10859 }
10860
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)10861 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10862 {
10863 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10864 struct ethtool_rxfh_context *ctx;
10865 unsigned long context;
10866
10867 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10868 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10869 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10870
10871 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10872 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10873 __bnxt_setup_vnic_p5(bp, vnic)) {
10874 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10875 rss_ctx->index);
10876 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10877 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10878 }
10879 }
10880 }
10881
bnxt_clear_rss_ctxs(struct bnxt * bp)10882 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10883 {
10884 struct ethtool_rxfh_context *ctx;
10885 unsigned long context;
10886
10887 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10888 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10889
10890 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
10891 }
10892 }
10893
10894 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)10895 static bool bnxt_promisc_ok(struct bnxt *bp)
10896 {
10897 #ifdef CONFIG_BNXT_SRIOV
10898 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10899 return false;
10900 #endif
10901 return true;
10902 }
10903
bnxt_setup_nitroa0_vnic(struct bnxt * bp)10904 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10905 {
10906 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10907 unsigned int rc = 0;
10908
10909 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10910 if (rc) {
10911 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10912 rc);
10913 return rc;
10914 }
10915
10916 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10917 if (rc) {
10918 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10919 rc);
10920 return rc;
10921 }
10922 return rc;
10923 }
10924
10925 static int bnxt_cfg_rx_mode(struct bnxt *);
10926 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10927
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)10928 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10929 {
10930 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10931 int rc = 0;
10932 unsigned int rx_nr_rings = bp->rx_nr_rings;
10933
10934 if (irq_re_init) {
10935 rc = bnxt_hwrm_stat_ctx_alloc(bp);
10936 if (rc) {
10937 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10938 rc);
10939 goto err_out;
10940 }
10941 }
10942
10943 rc = bnxt_hwrm_ring_alloc(bp);
10944 if (rc) {
10945 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
10946 goto err_out;
10947 }
10948
10949 rc = bnxt_hwrm_ring_grp_alloc(bp);
10950 if (rc) {
10951 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
10952 goto err_out;
10953 }
10954
10955 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10956 rx_nr_rings--;
10957
10958 /* default vnic 0 */
10959 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
10960 if (rc) {
10961 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
10962 goto err_out;
10963 }
10964
10965 if (BNXT_VF(bp))
10966 bnxt_hwrm_func_qcfg(bp);
10967
10968 rc = bnxt_setup_vnic(bp, vnic);
10969 if (rc)
10970 goto err_out;
10971 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
10972 bnxt_hwrm_update_rss_hash_cfg(bp);
10973
10974 if (bp->flags & BNXT_FLAG_RFS) {
10975 rc = bnxt_alloc_rfs_vnics(bp);
10976 if (rc)
10977 goto err_out;
10978 }
10979
10980 if (bp->flags & BNXT_FLAG_TPA) {
10981 rc = bnxt_set_tpa(bp, true);
10982 if (rc)
10983 goto err_out;
10984 }
10985
10986 if (BNXT_VF(bp))
10987 bnxt_update_vf_mac(bp);
10988
10989 /* Filter for default vnic 0 */
10990 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10991 if (rc) {
10992 if (BNXT_VF(bp) && rc == -ENODEV)
10993 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
10994 else
10995 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10996 goto err_out;
10997 }
10998 vnic->uc_filter_count = 1;
10999
11000 vnic->rx_mask = 0;
11001 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11002 goto skip_rx_mask;
11003
11004 if (bp->dev->flags & IFF_BROADCAST)
11005 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11006
11007 if (bp->dev->flags & IFF_PROMISC)
11008 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11009
11010 if (bp->dev->flags & IFF_ALLMULTI) {
11011 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11012 vnic->mc_list_count = 0;
11013 } else if (bp->dev->flags & IFF_MULTICAST) {
11014 u32 mask = 0;
11015
11016 bnxt_mc_list_updated(bp, &mask);
11017 vnic->rx_mask |= mask;
11018 }
11019
11020 rc = bnxt_cfg_rx_mode(bp);
11021 if (rc)
11022 goto err_out;
11023
11024 skip_rx_mask:
11025 rc = bnxt_hwrm_set_coal(bp);
11026 if (rc)
11027 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11028 rc);
11029
11030 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11031 rc = bnxt_setup_nitroa0_vnic(bp);
11032 if (rc)
11033 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11034 rc);
11035 }
11036
11037 if (BNXT_VF(bp)) {
11038 bnxt_hwrm_func_qcfg(bp);
11039 netdev_update_features(bp->dev);
11040 }
11041
11042 return 0;
11043
11044 err_out:
11045 bnxt_hwrm_resource_free(bp, 0, true);
11046
11047 return rc;
11048 }
11049
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)11050 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11051 {
11052 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11053 return 0;
11054 }
11055
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)11056 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11057 {
11058 bnxt_init_cp_rings(bp);
11059 bnxt_init_rx_rings(bp);
11060 bnxt_init_tx_rings(bp);
11061 bnxt_init_ring_grps(bp, irq_re_init);
11062 bnxt_init_vnics(bp);
11063
11064 return bnxt_init_chip(bp, irq_re_init);
11065 }
11066
bnxt_set_real_num_queues(struct bnxt * bp)11067 static int bnxt_set_real_num_queues(struct bnxt *bp)
11068 {
11069 int rc;
11070 struct net_device *dev = bp->dev;
11071
11072 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11073 bp->tx_nr_rings_xdp);
11074 if (rc)
11075 return rc;
11076
11077 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11078 if (rc)
11079 return rc;
11080
11081 #ifdef CONFIG_RFS_ACCEL
11082 if (bp->flags & BNXT_FLAG_RFS)
11083 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11084 #endif
11085
11086 return rc;
11087 }
11088
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)11089 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11090 bool shared)
11091 {
11092 int _rx = *rx, _tx = *tx;
11093
11094 if (shared) {
11095 *rx = min_t(int, _rx, max);
11096 *tx = min_t(int, _tx, max);
11097 } else {
11098 if (max < 2)
11099 return -ENOMEM;
11100
11101 while (_rx + _tx > max) {
11102 if (_rx > _tx && _rx > 1)
11103 _rx--;
11104 else if (_tx > 1)
11105 _tx--;
11106 }
11107 *rx = _rx;
11108 *tx = _tx;
11109 }
11110 return 0;
11111 }
11112
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)11113 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11114 {
11115 return (tx - tx_xdp) / tx_sets + tx_xdp;
11116 }
11117
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)11118 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11119 {
11120 int tcs = bp->num_tc;
11121
11122 if (!tcs)
11123 tcs = 1;
11124 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11125 }
11126
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)11127 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11128 {
11129 int tcs = bp->num_tc;
11130
11131 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11132 bp->tx_nr_rings_xdp;
11133 }
11134
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)11135 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11136 bool sh)
11137 {
11138 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11139
11140 if (tx_cp != *tx) {
11141 int tx_saved = tx_cp, rc;
11142
11143 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11144 if (rc)
11145 return rc;
11146 if (tx_cp != tx_saved)
11147 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
11148 return 0;
11149 }
11150 return __bnxt_trim_rings(bp, rx, tx, max, sh);
11151 }
11152
bnxt_setup_msix(struct bnxt * bp)11153 static void bnxt_setup_msix(struct bnxt *bp)
11154 {
11155 const int len = sizeof(bp->irq_tbl[0].name);
11156 struct net_device *dev = bp->dev;
11157 int tcs, i;
11158
11159 tcs = bp->num_tc;
11160 if (tcs) {
11161 int i, off, count;
11162
11163 for (i = 0; i < tcs; i++) {
11164 count = bp->tx_nr_rings_per_tc;
11165 off = BNXT_TC_TO_RING_BASE(bp, i);
11166 netdev_set_tc_queue(dev, i, count, off);
11167 }
11168 }
11169
11170 for (i = 0; i < bp->cp_nr_rings; i++) {
11171 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11172 char *attr;
11173
11174 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11175 attr = "TxRx";
11176 else if (i < bp->rx_nr_rings)
11177 attr = "rx";
11178 else
11179 attr = "tx";
11180
11181 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11182 attr, i);
11183 bp->irq_tbl[map_idx].handler = bnxt_msix;
11184 }
11185 }
11186
11187 static int bnxt_init_int_mode(struct bnxt *bp);
11188
bnxt_change_msix(struct bnxt * bp,int total)11189 static int bnxt_change_msix(struct bnxt *bp, int total)
11190 {
11191 struct msi_map map;
11192 int i;
11193
11194 /* add MSIX to the end if needed */
11195 for (i = bp->total_irqs; i < total; i++) {
11196 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11197 if (map.index < 0)
11198 return bp->total_irqs;
11199 bp->irq_tbl[i].vector = map.virq;
11200 bp->total_irqs++;
11201 }
11202
11203 /* trim MSIX from the end if needed */
11204 for (i = bp->total_irqs; i > total; i--) {
11205 map.index = i - 1;
11206 map.virq = bp->irq_tbl[i - 1].vector;
11207 pci_msix_free_irq(bp->pdev, map);
11208 bp->total_irqs--;
11209 }
11210 return bp->total_irqs;
11211 }
11212
bnxt_setup_int_mode(struct bnxt * bp)11213 static int bnxt_setup_int_mode(struct bnxt *bp)
11214 {
11215 int rc;
11216
11217 if (!bp->irq_tbl) {
11218 rc = bnxt_init_int_mode(bp);
11219 if (rc || !bp->irq_tbl)
11220 return rc ?: -ENODEV;
11221 }
11222
11223 bnxt_setup_msix(bp);
11224
11225 rc = bnxt_set_real_num_queues(bp);
11226 return rc;
11227 }
11228
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11229 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11230 {
11231 return bp->hw_resc.max_rsscos_ctxs;
11232 }
11233
bnxt_get_max_func_vnics(struct bnxt * bp)11234 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11235 {
11236 return bp->hw_resc.max_vnics;
11237 }
11238
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11239 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11240 {
11241 return bp->hw_resc.max_stat_ctxs;
11242 }
11243
bnxt_get_max_func_cp_rings(struct bnxt * bp)11244 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11245 {
11246 return bp->hw_resc.max_cp_rings;
11247 }
11248
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11249 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11250 {
11251 unsigned int cp = bp->hw_resc.max_cp_rings;
11252
11253 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11254 cp -= bnxt_get_ulp_msix_num(bp);
11255
11256 return cp;
11257 }
11258
bnxt_get_max_func_irqs(struct bnxt * bp)11259 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11260 {
11261 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11262
11263 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11264 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11265
11266 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11267 }
11268
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11269 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11270 {
11271 bp->hw_resc.max_irqs = max_irqs;
11272 }
11273
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11274 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11275 {
11276 unsigned int cp;
11277
11278 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11279 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11280 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11281 else
11282 return cp - bp->cp_nr_rings;
11283 }
11284
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11285 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11286 {
11287 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11288 }
11289
bnxt_get_avail_msix(struct bnxt * bp,int num)11290 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11291 {
11292 int max_irq = bnxt_get_max_func_irqs(bp);
11293 int total_req = bp->cp_nr_rings + num;
11294
11295 if (max_irq < total_req) {
11296 num = max_irq - bp->cp_nr_rings;
11297 if (num <= 0)
11298 return 0;
11299 }
11300 return num;
11301 }
11302
bnxt_get_num_msix(struct bnxt * bp)11303 static int bnxt_get_num_msix(struct bnxt *bp)
11304 {
11305 if (!BNXT_NEW_RM(bp))
11306 return bnxt_get_max_func_irqs(bp);
11307
11308 return bnxt_nq_rings_in_use(bp);
11309 }
11310
bnxt_init_int_mode(struct bnxt * bp)11311 static int bnxt_init_int_mode(struct bnxt *bp)
11312 {
11313 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11314
11315 total_vecs = bnxt_get_num_msix(bp);
11316 max = bnxt_get_max_func_irqs(bp);
11317 if (total_vecs > max)
11318 total_vecs = max;
11319
11320 if (!total_vecs)
11321 return 0;
11322
11323 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11324 min = 2;
11325
11326 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11327 PCI_IRQ_MSIX);
11328 ulp_msix = bnxt_get_ulp_msix_num(bp);
11329 if (total_vecs < 0 || total_vecs < ulp_msix) {
11330 rc = -ENODEV;
11331 goto msix_setup_exit;
11332 }
11333
11334 tbl_size = total_vecs;
11335 if (pci_msix_can_alloc_dyn(bp->pdev))
11336 tbl_size = max;
11337 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
11338 if (bp->irq_tbl) {
11339 for (i = 0; i < total_vecs; i++)
11340 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11341
11342 bp->total_irqs = total_vecs;
11343 /* Trim rings based upon num of vectors allocated */
11344 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11345 total_vecs - ulp_msix, min == 1);
11346 if (rc)
11347 goto msix_setup_exit;
11348
11349 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11350 bp->cp_nr_rings = (min == 1) ?
11351 max_t(int, tx_cp, bp->rx_nr_rings) :
11352 tx_cp + bp->rx_nr_rings;
11353
11354 } else {
11355 rc = -ENOMEM;
11356 goto msix_setup_exit;
11357 }
11358 return 0;
11359
11360 msix_setup_exit:
11361 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11362 kfree(bp->irq_tbl);
11363 bp->irq_tbl = NULL;
11364 pci_free_irq_vectors(bp->pdev);
11365 return rc;
11366 }
11367
bnxt_clear_int_mode(struct bnxt * bp)11368 static void bnxt_clear_int_mode(struct bnxt *bp)
11369 {
11370 pci_free_irq_vectors(bp->pdev);
11371
11372 kfree(bp->irq_tbl);
11373 bp->irq_tbl = NULL;
11374 }
11375
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11376 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11377 {
11378 bool irq_cleared = false;
11379 bool irq_change = false;
11380 int tcs = bp->num_tc;
11381 int irqs_required;
11382 int rc;
11383
11384 if (!bnxt_need_reserve_rings(bp))
11385 return 0;
11386
11387 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11388 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11389
11390 if (ulp_msix > bp->ulp_num_msix_want)
11391 ulp_msix = bp->ulp_num_msix_want;
11392 irqs_required = ulp_msix + bp->cp_nr_rings;
11393 } else {
11394 irqs_required = bnxt_get_num_msix(bp);
11395 }
11396
11397 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11398 irq_change = true;
11399 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11400 bnxt_ulp_irq_stop(bp);
11401 bnxt_clear_int_mode(bp);
11402 irq_cleared = true;
11403 }
11404 }
11405 rc = __bnxt_reserve_rings(bp);
11406 if (irq_cleared) {
11407 if (!rc)
11408 rc = bnxt_init_int_mode(bp);
11409 bnxt_ulp_irq_restart(bp, rc);
11410 } else if (irq_change && !rc) {
11411 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11412 rc = -ENOSPC;
11413 }
11414 if (rc) {
11415 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11416 return rc;
11417 }
11418 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11419 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11420 netdev_err(bp->dev, "tx ring reservation failure\n");
11421 netdev_reset_tc(bp->dev);
11422 bp->num_tc = 0;
11423 if (bp->tx_nr_rings_xdp)
11424 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11425 else
11426 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11427 return -ENOMEM;
11428 }
11429 return 0;
11430 }
11431
bnxt_tx_queue_stop(struct bnxt * bp,int idx)11432 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11433 {
11434 struct bnxt_tx_ring_info *txr;
11435 struct netdev_queue *txq;
11436 struct bnxt_napi *bnapi;
11437 int i;
11438
11439 bnapi = bp->bnapi[idx];
11440 bnxt_for_each_napi_tx(i, bnapi, txr) {
11441 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11442 synchronize_net();
11443
11444 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11445 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11446 if (txq) {
11447 __netif_tx_lock_bh(txq);
11448 netif_tx_stop_queue(txq);
11449 __netif_tx_unlock_bh(txq);
11450 }
11451 }
11452
11453 if (!bp->tph_mode)
11454 continue;
11455
11456 bnxt_hwrm_tx_ring_free(bp, txr, true);
11457 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11458 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11459 bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11460 }
11461 }
11462
bnxt_tx_queue_start(struct bnxt * bp,int idx)11463 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11464 {
11465 struct bnxt_tx_ring_info *txr;
11466 struct netdev_queue *txq;
11467 struct bnxt_napi *bnapi;
11468 int rc, i;
11469
11470 bnapi = bp->bnapi[idx];
11471 /* All rings have been reserved and previously allocated.
11472 * Reallocating with the same parameters should never fail.
11473 */
11474 bnxt_for_each_napi_tx(i, bnapi, txr) {
11475 if (!bp->tph_mode)
11476 goto start_tx;
11477
11478 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11479 if (rc)
11480 return rc;
11481
11482 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11483 if (rc)
11484 return rc;
11485
11486 txr->tx_prod = 0;
11487 txr->tx_cons = 0;
11488 txr->tx_hw_cons = 0;
11489 start_tx:
11490 WRITE_ONCE(txr->dev_state, 0);
11491 synchronize_net();
11492
11493 if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11494 continue;
11495
11496 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11497 if (txq)
11498 netif_tx_start_queue(txq);
11499 }
11500
11501 return 0;
11502 }
11503
bnxt_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)11504 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11505 const cpumask_t *mask)
11506 {
11507 struct bnxt_irq *irq;
11508 u16 tag;
11509 int err;
11510
11511 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11512
11513 if (!irq->bp->tph_mode)
11514 return;
11515
11516 cpumask_copy(irq->cpu_mask, mask);
11517
11518 if (irq->ring_nr >= irq->bp->rx_nr_rings)
11519 return;
11520
11521 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11522 cpumask_first(irq->cpu_mask), &tag))
11523 return;
11524
11525 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11526 return;
11527
11528 netdev_lock(irq->bp->dev);
11529 if (netif_running(irq->bp->dev)) {
11530 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11531 if (err)
11532 netdev_err(irq->bp->dev,
11533 "RX queue restart failed: err=%d\n", err);
11534 }
11535 netdev_unlock(irq->bp->dev);
11536 }
11537
bnxt_irq_affinity_release(struct kref * ref)11538 static void bnxt_irq_affinity_release(struct kref *ref)
11539 {
11540 struct irq_affinity_notify *notify =
11541 container_of(ref, struct irq_affinity_notify, kref);
11542 struct bnxt_irq *irq;
11543
11544 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11545
11546 if (!irq->bp->tph_mode)
11547 return;
11548
11549 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11550 netdev_err(irq->bp->dev,
11551 "Setting ST=0 for MSIX entry %d failed\n",
11552 irq->msix_nr);
11553 return;
11554 }
11555 }
11556
bnxt_release_irq_notifier(struct bnxt_irq * irq)11557 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11558 {
11559 irq_set_affinity_notifier(irq->vector, NULL);
11560 }
11561
bnxt_register_irq_notifier(struct bnxt * bp,struct bnxt_irq * irq)11562 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11563 {
11564 struct irq_affinity_notify *notify;
11565
11566 irq->bp = bp;
11567
11568 /* Nothing to do if TPH is not enabled */
11569 if (!bp->tph_mode)
11570 return;
11571
11572 /* Register IRQ affinity notifier */
11573 notify = &irq->affinity_notify;
11574 notify->irq = irq->vector;
11575 notify->notify = bnxt_irq_affinity_notify;
11576 notify->release = bnxt_irq_affinity_release;
11577
11578 irq_set_affinity_notifier(irq->vector, notify);
11579 }
11580
bnxt_free_irq(struct bnxt * bp)11581 static void bnxt_free_irq(struct bnxt *bp)
11582 {
11583 struct bnxt_irq *irq;
11584 int i;
11585
11586 #ifdef CONFIG_RFS_ACCEL
11587 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11588 bp->dev->rx_cpu_rmap = NULL;
11589 #endif
11590 if (!bp->irq_tbl || !bp->bnapi)
11591 return;
11592
11593 for (i = 0; i < bp->cp_nr_rings; i++) {
11594 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11595
11596 irq = &bp->irq_tbl[map_idx];
11597 if (irq->requested) {
11598 if (irq->have_cpumask) {
11599 irq_update_affinity_hint(irq->vector, NULL);
11600 free_cpumask_var(irq->cpu_mask);
11601 irq->have_cpumask = 0;
11602 }
11603
11604 bnxt_release_irq_notifier(irq);
11605
11606 free_irq(irq->vector, bp->bnapi[i]);
11607 }
11608
11609 irq->requested = 0;
11610 }
11611
11612 /* Disable TPH support */
11613 pcie_disable_tph(bp->pdev);
11614 bp->tph_mode = 0;
11615 }
11616
bnxt_request_irq(struct bnxt * bp)11617 static int bnxt_request_irq(struct bnxt *bp)
11618 {
11619 struct cpu_rmap *rmap = NULL;
11620 int i, j, rc = 0;
11621 unsigned long flags = 0;
11622
11623 rc = bnxt_setup_int_mode(bp);
11624 if (rc) {
11625 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11626 rc);
11627 return rc;
11628 }
11629 #ifdef CONFIG_RFS_ACCEL
11630 rmap = bp->dev->rx_cpu_rmap;
11631 #endif
11632
11633 /* Enable TPH support as part of IRQ request */
11634 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11635 if (!rc)
11636 bp->tph_mode = PCI_TPH_ST_IV_MODE;
11637
11638 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11639 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11640 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11641
11642 if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11643 rmap && bp->bnapi[i]->rx_ring) {
11644 rc = irq_cpu_rmap_add(rmap, irq->vector);
11645 if (rc)
11646 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11647 j);
11648 j++;
11649 }
11650
11651 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11652 bp->bnapi[i]);
11653 if (rc)
11654 break;
11655
11656 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11657 irq->requested = 1;
11658
11659 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11660 int numa_node = dev_to_node(&bp->pdev->dev);
11661 u16 tag;
11662
11663 irq->have_cpumask = 1;
11664 irq->msix_nr = map_idx;
11665 irq->ring_nr = i;
11666 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11667 irq->cpu_mask);
11668 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11669 if (rc) {
11670 netdev_warn(bp->dev,
11671 "Update affinity hint failed, IRQ = %d\n",
11672 irq->vector);
11673 break;
11674 }
11675
11676 bnxt_register_irq_notifier(bp, irq);
11677
11678 /* Init ST table entry */
11679 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11680 cpumask_first(irq->cpu_mask),
11681 &tag))
11682 continue;
11683
11684 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11685 }
11686 }
11687 return rc;
11688 }
11689
bnxt_del_napi(struct bnxt * bp)11690 static void bnxt_del_napi(struct bnxt *bp)
11691 {
11692 int i;
11693
11694 if (!bp->bnapi)
11695 return;
11696
11697 for (i = 0; i < bp->rx_nr_rings; i++)
11698 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11699 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11700 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11701
11702 for (i = 0; i < bp->cp_nr_rings; i++) {
11703 struct bnxt_napi *bnapi = bp->bnapi[i];
11704
11705 __netif_napi_del_locked(&bnapi->napi);
11706 }
11707 /* We called __netif_napi_del_locked(), we need
11708 * to respect an RCU grace period before freeing napi structures.
11709 */
11710 synchronize_net();
11711 }
11712
bnxt_init_napi(struct bnxt * bp)11713 static void bnxt_init_napi(struct bnxt *bp)
11714 {
11715 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11716 unsigned int cp_nr_rings = bp->cp_nr_rings;
11717 struct bnxt_napi *bnapi;
11718 int i;
11719
11720 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11721 poll_fn = bnxt_poll_p5;
11722 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11723 cp_nr_rings--;
11724
11725 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11726
11727 for (i = 0; i < cp_nr_rings; i++) {
11728 bnapi = bp->bnapi[i];
11729 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11730 bnapi->index);
11731 }
11732 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11733 bnapi = bp->bnapi[cp_nr_rings];
11734 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11735 }
11736 }
11737
bnxt_disable_napi(struct bnxt * bp)11738 static void bnxt_disable_napi(struct bnxt *bp)
11739 {
11740 int i;
11741
11742 if (!bp->bnapi ||
11743 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11744 return;
11745
11746 for (i = 0; i < bp->cp_nr_rings; i++) {
11747 struct bnxt_napi *bnapi = bp->bnapi[i];
11748 struct bnxt_cp_ring_info *cpr;
11749
11750 cpr = &bnapi->cp_ring;
11751 if (bnapi->tx_fault)
11752 cpr->sw_stats->tx.tx_resets++;
11753 if (bnapi->in_reset)
11754 cpr->sw_stats->rx.rx_resets++;
11755 napi_disable_locked(&bnapi->napi);
11756 }
11757 }
11758
bnxt_enable_napi(struct bnxt * bp)11759 static void bnxt_enable_napi(struct bnxt *bp)
11760 {
11761 int i;
11762
11763 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11764 for (i = 0; i < bp->cp_nr_rings; i++) {
11765 struct bnxt_napi *bnapi = bp->bnapi[i];
11766 struct bnxt_cp_ring_info *cpr;
11767
11768 bnapi->tx_fault = 0;
11769
11770 cpr = &bnapi->cp_ring;
11771 bnapi->in_reset = false;
11772
11773 if (bnapi->rx_ring) {
11774 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11775 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11776 }
11777 napi_enable_locked(&bnapi->napi);
11778 }
11779 }
11780
bnxt_tx_disable(struct bnxt * bp)11781 void bnxt_tx_disable(struct bnxt *bp)
11782 {
11783 int i;
11784 struct bnxt_tx_ring_info *txr;
11785
11786 if (bp->tx_ring) {
11787 for (i = 0; i < bp->tx_nr_rings; i++) {
11788 txr = &bp->tx_ring[i];
11789 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11790 }
11791 }
11792 /* Make sure napi polls see @dev_state change */
11793 synchronize_net();
11794 /* Drop carrier first to prevent TX timeout */
11795 netif_carrier_off(bp->dev);
11796 /* Stop all TX queues */
11797 netif_tx_disable(bp->dev);
11798 }
11799
bnxt_tx_enable(struct bnxt * bp)11800 void bnxt_tx_enable(struct bnxt *bp)
11801 {
11802 int i;
11803 struct bnxt_tx_ring_info *txr;
11804
11805 for (i = 0; i < bp->tx_nr_rings; i++) {
11806 txr = &bp->tx_ring[i];
11807 WRITE_ONCE(txr->dev_state, 0);
11808 }
11809 /* Make sure napi polls see @dev_state change */
11810 synchronize_net();
11811 netif_tx_wake_all_queues(bp->dev);
11812 if (BNXT_LINK_IS_UP(bp))
11813 netif_carrier_on(bp->dev);
11814 }
11815
bnxt_report_fec(struct bnxt_link_info * link_info)11816 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11817 {
11818 u8 active_fec = link_info->active_fec_sig_mode &
11819 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11820
11821 switch (active_fec) {
11822 default:
11823 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11824 return "None";
11825 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11826 return "Clause 74 BaseR";
11827 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11828 return "Clause 91 RS(528,514)";
11829 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11830 return "Clause 91 RS544_1XN";
11831 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11832 return "Clause 91 RS(544,514)";
11833 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11834 return "Clause 91 RS272_1XN";
11835 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11836 return "Clause 91 RS(272,257)";
11837 }
11838 }
11839
bnxt_report_link(struct bnxt * bp)11840 void bnxt_report_link(struct bnxt *bp)
11841 {
11842 if (BNXT_LINK_IS_UP(bp)) {
11843 const char *signal = "";
11844 const char *flow_ctrl;
11845 const char *duplex;
11846 u32 speed;
11847 u16 fec;
11848
11849 netif_carrier_on(bp->dev);
11850 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11851 if (speed == SPEED_UNKNOWN) {
11852 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11853 return;
11854 }
11855 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11856 duplex = "full";
11857 else
11858 duplex = "half";
11859 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11860 flow_ctrl = "ON - receive & transmit";
11861 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11862 flow_ctrl = "ON - transmit";
11863 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11864 flow_ctrl = "ON - receive";
11865 else
11866 flow_ctrl = "none";
11867 if (bp->link_info.phy_qcfg_resp.option_flags &
11868 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11869 u8 sig_mode = bp->link_info.active_fec_sig_mode &
11870 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
11871 switch (sig_mode) {
11872 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
11873 signal = "(NRZ) ";
11874 break;
11875 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
11876 signal = "(PAM4 56Gbps) ";
11877 break;
11878 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
11879 signal = "(PAM4 112Gbps) ";
11880 break;
11881 default:
11882 break;
11883 }
11884 }
11885 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
11886 speed, signal, duplex, flow_ctrl);
11887 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
11888 netdev_info(bp->dev, "EEE is %s\n",
11889 bp->eee.eee_active ? "active" :
11890 "not active");
11891 fec = bp->link_info.fec_cfg;
11892 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
11893 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
11894 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
11895 bnxt_report_fec(&bp->link_info));
11896 } else {
11897 netif_carrier_off(bp->dev);
11898 netdev_err(bp->dev, "NIC Link is Down\n");
11899 }
11900 }
11901
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)11902 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
11903 {
11904 if (!resp->supported_speeds_auto_mode &&
11905 !resp->supported_speeds_force_mode &&
11906 !resp->supported_pam4_speeds_auto_mode &&
11907 !resp->supported_pam4_speeds_force_mode &&
11908 !resp->supported_speeds2_auto_mode &&
11909 !resp->supported_speeds2_force_mode)
11910 return true;
11911 return false;
11912 }
11913
bnxt_hwrm_phy_qcaps(struct bnxt * bp)11914 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
11915 {
11916 struct bnxt_link_info *link_info = &bp->link_info;
11917 struct hwrm_port_phy_qcaps_output *resp;
11918 struct hwrm_port_phy_qcaps_input *req;
11919 int rc = 0;
11920
11921 if (bp->hwrm_spec_code < 0x10201)
11922 return 0;
11923
11924 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
11925 if (rc)
11926 return rc;
11927
11928 resp = hwrm_req_hold(bp, req);
11929 rc = hwrm_req_send(bp, req);
11930 if (rc)
11931 goto hwrm_phy_qcaps_exit;
11932
11933 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11934 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11935 struct ethtool_keee *eee = &bp->eee;
11936 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11937
11938 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11939 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11940 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11941 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11942 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11943 }
11944
11945 if (bp->hwrm_spec_code >= 0x10a01) {
11946 if (bnxt_phy_qcaps_no_speed(resp)) {
11947 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
11948 netdev_warn(bp->dev, "Ethernet link disabled\n");
11949 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
11950 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
11951 netdev_info(bp->dev, "Ethernet link enabled\n");
11952 /* Phy re-enabled, reprobe the speeds */
11953 link_info->support_auto_speeds = 0;
11954 link_info->support_pam4_auto_speeds = 0;
11955 link_info->support_auto_speeds2 = 0;
11956 }
11957 }
11958 if (resp->supported_speeds_auto_mode)
11959 link_info->support_auto_speeds =
11960 le16_to_cpu(resp->supported_speeds_auto_mode);
11961 if (resp->supported_pam4_speeds_auto_mode)
11962 link_info->support_pam4_auto_speeds =
11963 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
11964 if (resp->supported_speeds2_auto_mode)
11965 link_info->support_auto_speeds2 =
11966 le16_to_cpu(resp->supported_speeds2_auto_mode);
11967
11968 bp->port_count = resp->port_cnt;
11969
11970 hwrm_phy_qcaps_exit:
11971 hwrm_req_drop(bp, req);
11972 return rc;
11973 }
11974
bnxt_hwrm_mac_qcaps(struct bnxt * bp)11975 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
11976 {
11977 struct hwrm_port_mac_qcaps_output *resp;
11978 struct hwrm_port_mac_qcaps_input *req;
11979 int rc;
11980
11981 if (bp->hwrm_spec_code < 0x10a03)
11982 return;
11983
11984 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
11985 if (rc)
11986 return;
11987
11988 resp = hwrm_req_hold(bp, req);
11989 rc = hwrm_req_send_silent(bp, req);
11990 if (!rc)
11991 bp->mac_flags = resp->flags;
11992 hwrm_req_drop(bp, req);
11993 }
11994
bnxt_support_dropped(u16 advertising,u16 supported)11995 static bool bnxt_support_dropped(u16 advertising, u16 supported)
11996 {
11997 u16 diff = advertising ^ supported;
11998
11999 return ((supported | diff) != supported);
12000 }
12001
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)12002 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12003 {
12004 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12005
12006 /* Check if any advertised speeds are no longer supported. The caller
12007 * holds the link_lock mutex, so we can modify link_info settings.
12008 */
12009 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12010 if (bnxt_support_dropped(link_info->advertising,
12011 link_info->support_auto_speeds2)) {
12012 link_info->advertising = link_info->support_auto_speeds2;
12013 return true;
12014 }
12015 return false;
12016 }
12017 if (bnxt_support_dropped(link_info->advertising,
12018 link_info->support_auto_speeds)) {
12019 link_info->advertising = link_info->support_auto_speeds;
12020 return true;
12021 }
12022 if (bnxt_support_dropped(link_info->advertising_pam4,
12023 link_info->support_pam4_auto_speeds)) {
12024 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12025 return true;
12026 }
12027 return false;
12028 }
12029
bnxt_update_link(struct bnxt * bp,bool chng_link_state)12030 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12031 {
12032 struct bnxt_link_info *link_info = &bp->link_info;
12033 struct hwrm_port_phy_qcfg_output *resp;
12034 struct hwrm_port_phy_qcfg_input *req;
12035 u8 link_state = link_info->link_state;
12036 bool support_changed;
12037 int rc;
12038
12039 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12040 if (rc)
12041 return rc;
12042
12043 resp = hwrm_req_hold(bp, req);
12044 rc = hwrm_req_send(bp, req);
12045 if (rc) {
12046 hwrm_req_drop(bp, req);
12047 if (BNXT_VF(bp) && rc == -ENODEV) {
12048 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12049 rc = 0;
12050 }
12051 return rc;
12052 }
12053
12054 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12055 link_info->phy_link_status = resp->link;
12056 link_info->duplex = resp->duplex_cfg;
12057 if (bp->hwrm_spec_code >= 0x10800)
12058 link_info->duplex = resp->duplex_state;
12059 link_info->pause = resp->pause;
12060 link_info->auto_mode = resp->auto_mode;
12061 link_info->auto_pause_setting = resp->auto_pause;
12062 link_info->lp_pause = resp->link_partner_adv_pause;
12063 link_info->force_pause_setting = resp->force_pause;
12064 link_info->duplex_setting = resp->duplex_cfg;
12065 if (link_info->phy_link_status == BNXT_LINK_LINK) {
12066 link_info->link_speed = le16_to_cpu(resp->link_speed);
12067 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12068 link_info->active_lanes = resp->active_lanes;
12069 } else {
12070 link_info->link_speed = 0;
12071 link_info->active_lanes = 0;
12072 }
12073 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12074 link_info->force_pam4_link_speed =
12075 le16_to_cpu(resp->force_pam4_link_speed);
12076 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12077 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12078 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12079 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12080 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12081 link_info->auto_pam4_link_speeds =
12082 le16_to_cpu(resp->auto_pam4_link_speed_mask);
12083 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12084 link_info->lp_auto_link_speeds =
12085 le16_to_cpu(resp->link_partner_adv_speeds);
12086 link_info->lp_auto_pam4_link_speeds =
12087 resp->link_partner_pam4_adv_speeds;
12088 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12089 link_info->phy_ver[0] = resp->phy_maj;
12090 link_info->phy_ver[1] = resp->phy_min;
12091 link_info->phy_ver[2] = resp->phy_bld;
12092 link_info->media_type = resp->media_type;
12093 link_info->phy_type = resp->phy_type;
12094 link_info->transceiver = resp->xcvr_pkg_type;
12095 link_info->phy_addr = resp->eee_config_phy_addr &
12096 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12097 link_info->module_status = resp->module_status;
12098
12099 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12100 struct ethtool_keee *eee = &bp->eee;
12101 u16 fw_speeds;
12102
12103 eee->eee_active = 0;
12104 if (resp->eee_config_phy_addr &
12105 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12106 eee->eee_active = 1;
12107 fw_speeds = le16_to_cpu(
12108 resp->link_partner_adv_eee_link_speed_mask);
12109 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12110 }
12111
12112 /* Pull initial EEE config */
12113 if (!chng_link_state) {
12114 if (resp->eee_config_phy_addr &
12115 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12116 eee->eee_enabled = 1;
12117
12118 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12119 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12120
12121 if (resp->eee_config_phy_addr &
12122 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12123 __le32 tmr;
12124
12125 eee->tx_lpi_enabled = 1;
12126 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12127 eee->tx_lpi_timer = le32_to_cpu(tmr) &
12128 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12129 }
12130 }
12131 }
12132
12133 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12134 if (bp->hwrm_spec_code >= 0x10504) {
12135 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12136 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12137 }
12138 /* TODO: need to add more logic to report VF link */
12139 if (chng_link_state) {
12140 if (link_info->phy_link_status == BNXT_LINK_LINK)
12141 link_info->link_state = BNXT_LINK_STATE_UP;
12142 else
12143 link_info->link_state = BNXT_LINK_STATE_DOWN;
12144 if (link_state != link_info->link_state)
12145 bnxt_report_link(bp);
12146 } else {
12147 /* always link down if not require to update link state */
12148 link_info->link_state = BNXT_LINK_STATE_DOWN;
12149 }
12150 hwrm_req_drop(bp, req);
12151
12152 if (!BNXT_PHY_CFG_ABLE(bp))
12153 return 0;
12154
12155 support_changed = bnxt_support_speed_dropped(link_info);
12156 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12157 bnxt_hwrm_set_link_setting(bp, true, false);
12158 return 0;
12159 }
12160
bnxt_get_port_module_status(struct bnxt * bp)12161 static void bnxt_get_port_module_status(struct bnxt *bp)
12162 {
12163 struct bnxt_link_info *link_info = &bp->link_info;
12164 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12165 u8 module_status;
12166
12167 if (bnxt_update_link(bp, true))
12168 return;
12169
12170 module_status = link_info->module_status;
12171 switch (module_status) {
12172 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12173 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12174 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12175 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12176 bp->pf.port_id);
12177 if (bp->hwrm_spec_code >= 0x10201) {
12178 netdev_warn(bp->dev, "Module part number %s\n",
12179 resp->phy_vendor_partnumber);
12180 }
12181 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12182 netdev_warn(bp->dev, "TX is disabled\n");
12183 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12184 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12185 }
12186 }
12187
12188 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12189 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12190 {
12191 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12192 if (bp->hwrm_spec_code >= 0x10201)
12193 req->auto_pause =
12194 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12195 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12196 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12197 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12198 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12199 req->enables |=
12200 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12201 } else {
12202 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12203 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12204 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12205 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12206 req->enables |=
12207 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12208 if (bp->hwrm_spec_code >= 0x10201) {
12209 req->auto_pause = req->force_pause;
12210 req->enables |= cpu_to_le32(
12211 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12212 }
12213 }
12214 }
12215
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12216 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12217 {
12218 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12219 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12220 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12221 req->enables |=
12222 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12223 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12224 } else if (bp->link_info.advertising) {
12225 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12226 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12227 }
12228 if (bp->link_info.advertising_pam4) {
12229 req->enables |=
12230 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12231 req->auto_link_pam4_speed_mask =
12232 cpu_to_le16(bp->link_info.advertising_pam4);
12233 }
12234 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12235 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12236 } else {
12237 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12238 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12239 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12240 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12241 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12242 (u32)bp->link_info.req_link_speed);
12243 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12244 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12245 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12246 } else {
12247 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12248 }
12249 }
12250
12251 /* tell chimp that the setting takes effect immediately */
12252 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12253 }
12254
bnxt_hwrm_set_pause(struct bnxt * bp)12255 int bnxt_hwrm_set_pause(struct bnxt *bp)
12256 {
12257 struct hwrm_port_phy_cfg_input *req;
12258 int rc;
12259
12260 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12261 if (rc)
12262 return rc;
12263
12264 bnxt_hwrm_set_pause_common(bp, req);
12265
12266 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12267 bp->link_info.force_link_chng)
12268 bnxt_hwrm_set_link_common(bp, req);
12269
12270 rc = hwrm_req_send(bp, req);
12271 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12272 /* since changing of pause setting doesn't trigger any link
12273 * change event, the driver needs to update the current pause
12274 * result upon successfully return of the phy_cfg command
12275 */
12276 bp->link_info.pause =
12277 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12278 bp->link_info.auto_pause_setting = 0;
12279 if (!bp->link_info.force_link_chng)
12280 bnxt_report_link(bp);
12281 }
12282 bp->link_info.force_link_chng = false;
12283 return rc;
12284 }
12285
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12286 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12287 struct hwrm_port_phy_cfg_input *req)
12288 {
12289 struct ethtool_keee *eee = &bp->eee;
12290
12291 if (eee->eee_enabled) {
12292 u16 eee_speeds;
12293 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12294
12295 if (eee->tx_lpi_enabled)
12296 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12297 else
12298 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12299
12300 req->flags |= cpu_to_le32(flags);
12301 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12302 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12303 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12304 } else {
12305 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12306 }
12307 }
12308
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)12309 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12310 {
12311 struct hwrm_port_phy_cfg_input *req;
12312 int rc;
12313
12314 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12315 if (rc)
12316 return rc;
12317
12318 if (set_pause)
12319 bnxt_hwrm_set_pause_common(bp, req);
12320
12321 bnxt_hwrm_set_link_common(bp, req);
12322
12323 if (set_eee)
12324 bnxt_hwrm_set_eee(bp, req);
12325 return hwrm_req_send(bp, req);
12326 }
12327
bnxt_hwrm_shutdown_link(struct bnxt * bp)12328 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12329 {
12330 struct hwrm_port_phy_cfg_input *req;
12331 int rc;
12332
12333 if (!BNXT_SINGLE_PF(bp))
12334 return 0;
12335
12336 if (pci_num_vf(bp->pdev) &&
12337 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12338 return 0;
12339
12340 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12341 if (rc)
12342 return rc;
12343
12344 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12345 rc = hwrm_req_send(bp, req);
12346 if (!rc) {
12347 mutex_lock(&bp->link_lock);
12348 /* Device is not obliged link down in certain scenarios, even
12349 * when forced. Setting the state unknown is consistent with
12350 * driver startup and will force link state to be reported
12351 * during subsequent open based on PORT_PHY_QCFG.
12352 */
12353 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12354 mutex_unlock(&bp->link_lock);
12355 }
12356 return rc;
12357 }
12358
bnxt_fw_reset_via_optee(struct bnxt * bp)12359 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12360 {
12361 #ifdef CONFIG_TEE_BNXT_FW
12362 int rc = tee_bnxt_fw_load();
12363
12364 if (rc)
12365 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12366
12367 return rc;
12368 #else
12369 netdev_err(bp->dev, "OP-TEE not supported\n");
12370 return -ENODEV;
12371 #endif
12372 }
12373
bnxt_try_recover_fw(struct bnxt * bp)12374 static int bnxt_try_recover_fw(struct bnxt *bp)
12375 {
12376 if (bp->fw_health && bp->fw_health->status_reliable) {
12377 int retry = 0, rc;
12378 u32 sts;
12379
12380 do {
12381 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12382 rc = bnxt_hwrm_poll(bp);
12383 if (!BNXT_FW_IS_BOOTING(sts) &&
12384 !BNXT_FW_IS_RECOVERING(sts))
12385 break;
12386 retry++;
12387 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12388
12389 if (!BNXT_FW_IS_HEALTHY(sts)) {
12390 netdev_err(bp->dev,
12391 "Firmware not responding, status: 0x%x\n",
12392 sts);
12393 rc = -ENODEV;
12394 }
12395 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12396 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12397 return bnxt_fw_reset_via_optee(bp);
12398 }
12399 return rc;
12400 }
12401
12402 return -ENODEV;
12403 }
12404
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12405 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12406 {
12407 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12408
12409 if (!BNXT_NEW_RM(bp))
12410 return; /* no resource reservations required */
12411
12412 hw_resc->resv_cp_rings = 0;
12413 hw_resc->resv_stat_ctxs = 0;
12414 hw_resc->resv_irqs = 0;
12415 hw_resc->resv_tx_rings = 0;
12416 hw_resc->resv_rx_rings = 0;
12417 hw_resc->resv_hw_ring_grps = 0;
12418 hw_resc->resv_vnics = 0;
12419 hw_resc->resv_rsscos_ctxs = 0;
12420 if (!fw_reset) {
12421 bp->tx_nr_rings = 0;
12422 bp->rx_nr_rings = 0;
12423 }
12424 }
12425
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12426 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12427 {
12428 int rc;
12429
12430 if (!BNXT_NEW_RM(bp))
12431 return 0; /* no resource reservations required */
12432
12433 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12434 if (rc)
12435 netdev_err(bp->dev, "resc_qcaps failed\n");
12436
12437 bnxt_clear_reservations(bp, fw_reset);
12438
12439 return rc;
12440 }
12441
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12442 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12443 {
12444 struct hwrm_func_drv_if_change_output *resp;
12445 struct hwrm_func_drv_if_change_input *req;
12446 bool resc_reinit = false;
12447 bool caps_change = false;
12448 int rc, retry = 0;
12449 bool fw_reset;
12450 u32 flags = 0;
12451
12452 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12453 bp->fw_reset_state = 0;
12454
12455 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12456 return 0;
12457
12458 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12459 if (rc)
12460 return rc;
12461
12462 if (up)
12463 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12464 resp = hwrm_req_hold(bp, req);
12465
12466 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12467 while (retry < BNXT_FW_IF_RETRY) {
12468 rc = hwrm_req_send(bp, req);
12469 if (rc != -EAGAIN)
12470 break;
12471
12472 msleep(50);
12473 retry++;
12474 }
12475
12476 if (rc == -EAGAIN) {
12477 hwrm_req_drop(bp, req);
12478 return rc;
12479 } else if (!rc) {
12480 flags = le32_to_cpu(resp->flags);
12481 } else if (up) {
12482 rc = bnxt_try_recover_fw(bp);
12483 fw_reset = true;
12484 }
12485 hwrm_req_drop(bp, req);
12486 if (rc)
12487 return rc;
12488
12489 if (!up) {
12490 bnxt_inv_fw_health_reg(bp);
12491 return 0;
12492 }
12493
12494 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12495 resc_reinit = true;
12496 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12497 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12498 fw_reset = true;
12499 else
12500 bnxt_remap_fw_health_regs(bp);
12501
12502 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12503 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12504 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12505 return -ENODEV;
12506 }
12507 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12508 caps_change = true;
12509
12510 if (resc_reinit || fw_reset || caps_change) {
12511 if (fw_reset || caps_change) {
12512 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12513 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12514 bnxt_ulp_irq_stop(bp);
12515 bnxt_free_ctx_mem(bp, false);
12516 bnxt_dcb_free(bp);
12517 rc = bnxt_fw_init_one(bp);
12518 if (rc) {
12519 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12520 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12521 return rc;
12522 }
12523 /* IRQ will be initialized later in bnxt_request_irq()*/
12524 bnxt_clear_int_mode(bp);
12525 }
12526 rc = bnxt_cancel_reservations(bp, fw_reset);
12527 }
12528 return rc;
12529 }
12530
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12531 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12532 {
12533 struct hwrm_port_led_qcaps_output *resp;
12534 struct hwrm_port_led_qcaps_input *req;
12535 struct bnxt_pf_info *pf = &bp->pf;
12536 int rc;
12537
12538 bp->num_leds = 0;
12539 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12540 return 0;
12541
12542 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12543 if (rc)
12544 return rc;
12545
12546 req->port_id = cpu_to_le16(pf->port_id);
12547 resp = hwrm_req_hold(bp, req);
12548 rc = hwrm_req_send(bp, req);
12549 if (rc) {
12550 hwrm_req_drop(bp, req);
12551 return rc;
12552 }
12553 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12554 int i;
12555
12556 bp->num_leds = resp->num_leds;
12557 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12558 bp->num_leds);
12559 for (i = 0; i < bp->num_leds; i++) {
12560 struct bnxt_led_info *led = &bp->leds[i];
12561 __le16 caps = led->led_state_caps;
12562
12563 if (!led->led_group_id ||
12564 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12565 bp->num_leds = 0;
12566 break;
12567 }
12568 }
12569 }
12570 hwrm_req_drop(bp, req);
12571 return 0;
12572 }
12573
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12574 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12575 {
12576 struct hwrm_wol_filter_alloc_output *resp;
12577 struct hwrm_wol_filter_alloc_input *req;
12578 int rc;
12579
12580 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12581 if (rc)
12582 return rc;
12583
12584 req->port_id = cpu_to_le16(bp->pf.port_id);
12585 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12586 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12587 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12588
12589 resp = hwrm_req_hold(bp, req);
12590 rc = hwrm_req_send(bp, req);
12591 if (!rc)
12592 bp->wol_filter_id = resp->wol_filter_id;
12593 hwrm_req_drop(bp, req);
12594 return rc;
12595 }
12596
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12597 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12598 {
12599 struct hwrm_wol_filter_free_input *req;
12600 int rc;
12601
12602 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12603 if (rc)
12604 return rc;
12605
12606 req->port_id = cpu_to_le16(bp->pf.port_id);
12607 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12608 req->wol_filter_id = bp->wol_filter_id;
12609
12610 return hwrm_req_send(bp, req);
12611 }
12612
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12613 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12614 {
12615 struct hwrm_wol_filter_qcfg_output *resp;
12616 struct hwrm_wol_filter_qcfg_input *req;
12617 u16 next_handle = 0;
12618 int rc;
12619
12620 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12621 if (rc)
12622 return rc;
12623
12624 req->port_id = cpu_to_le16(bp->pf.port_id);
12625 req->handle = cpu_to_le16(handle);
12626 resp = hwrm_req_hold(bp, req);
12627 rc = hwrm_req_send(bp, req);
12628 if (!rc) {
12629 next_handle = le16_to_cpu(resp->next_handle);
12630 if (next_handle != 0) {
12631 if (resp->wol_type ==
12632 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12633 bp->wol = 1;
12634 bp->wol_filter_id = resp->wol_filter_id;
12635 }
12636 }
12637 }
12638 hwrm_req_drop(bp, req);
12639 return next_handle;
12640 }
12641
bnxt_get_wol_settings(struct bnxt * bp)12642 static void bnxt_get_wol_settings(struct bnxt *bp)
12643 {
12644 u16 handle = 0;
12645
12646 bp->wol = 0;
12647 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12648 return;
12649
12650 do {
12651 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12652 } while (handle && handle != 0xffff);
12653 }
12654
bnxt_eee_config_ok(struct bnxt * bp)12655 static bool bnxt_eee_config_ok(struct bnxt *bp)
12656 {
12657 struct ethtool_keee *eee = &bp->eee;
12658 struct bnxt_link_info *link_info = &bp->link_info;
12659
12660 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12661 return true;
12662
12663 if (eee->eee_enabled) {
12664 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12665 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12666
12667 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12668
12669 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12670 eee->eee_enabled = 0;
12671 return false;
12672 }
12673 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12674 linkmode_and(eee->advertised, advertising,
12675 eee->supported);
12676 return false;
12677 }
12678 }
12679 return true;
12680 }
12681
bnxt_update_phy_setting(struct bnxt * bp)12682 static int bnxt_update_phy_setting(struct bnxt *bp)
12683 {
12684 int rc;
12685 bool update_link = false;
12686 bool update_pause = false;
12687 bool update_eee = false;
12688 struct bnxt_link_info *link_info = &bp->link_info;
12689
12690 rc = bnxt_update_link(bp, true);
12691 if (rc) {
12692 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12693 rc);
12694 return rc;
12695 }
12696 if (!BNXT_SINGLE_PF(bp))
12697 return 0;
12698
12699 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12700 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12701 link_info->req_flow_ctrl)
12702 update_pause = true;
12703 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12704 link_info->force_pause_setting != link_info->req_flow_ctrl)
12705 update_pause = true;
12706 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12707 if (BNXT_AUTO_MODE(link_info->auto_mode))
12708 update_link = true;
12709 if (bnxt_force_speed_updated(link_info))
12710 update_link = true;
12711 if (link_info->req_duplex != link_info->duplex_setting)
12712 update_link = true;
12713 } else {
12714 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12715 update_link = true;
12716 if (bnxt_auto_speed_updated(link_info))
12717 update_link = true;
12718 }
12719
12720 /* The last close may have shutdown the link, so need to call
12721 * PHY_CFG to bring it back up.
12722 */
12723 if (!BNXT_LINK_IS_UP(bp))
12724 update_link = true;
12725
12726 if (!bnxt_eee_config_ok(bp))
12727 update_eee = true;
12728
12729 if (update_link)
12730 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12731 else if (update_pause)
12732 rc = bnxt_hwrm_set_pause(bp);
12733 if (rc) {
12734 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12735 rc);
12736 return rc;
12737 }
12738
12739 return rc;
12740 }
12741
12742 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12743
bnxt_reinit_after_abort(struct bnxt * bp)12744 static int bnxt_reinit_after_abort(struct bnxt *bp)
12745 {
12746 int rc;
12747
12748 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12749 return -EBUSY;
12750
12751 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12752 return -ENODEV;
12753
12754 rc = bnxt_fw_init_one(bp);
12755 if (!rc) {
12756 bnxt_clear_int_mode(bp);
12757 rc = bnxt_init_int_mode(bp);
12758 if (!rc) {
12759 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12760 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12761 }
12762 }
12763 return rc;
12764 }
12765
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)12766 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12767 {
12768 struct bnxt_ntuple_filter *ntp_fltr;
12769 struct bnxt_l2_filter *l2_fltr;
12770
12771 if (list_empty(&fltr->list))
12772 return;
12773
12774 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12775 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12776 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12777 atomic_inc(&l2_fltr->refcnt);
12778 ntp_fltr->l2_fltr = l2_fltr;
12779 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12780 bnxt_del_ntp_filter(bp, ntp_fltr);
12781 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12782 fltr->sw_id);
12783 }
12784 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12785 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12786 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12787 bnxt_del_l2_filter(bp, l2_fltr);
12788 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12789 fltr->sw_id);
12790 }
12791 }
12792 }
12793
bnxt_cfg_usr_fltrs(struct bnxt * bp)12794 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12795 {
12796 struct bnxt_filter_base *usr_fltr, *tmp;
12797
12798 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12799 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12800 }
12801
bnxt_set_xps_mapping(struct bnxt * bp)12802 static int bnxt_set_xps_mapping(struct bnxt *bp)
12803 {
12804 int numa_node = dev_to_node(&bp->pdev->dev);
12805 unsigned int q_idx, map_idx, cpu, i;
12806 const struct cpumask *cpu_mask_ptr;
12807 int nr_cpus = num_online_cpus();
12808 cpumask_t *q_map;
12809 int rc = 0;
12810
12811 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
12812 if (!q_map)
12813 return -ENOMEM;
12814
12815 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12816 * Each TC has the same number of TX queues. The nth TX queue for each
12817 * TC will have the same CPU mask.
12818 */
12819 for (i = 0; i < nr_cpus; i++) {
12820 map_idx = i % bp->tx_nr_rings_per_tc;
12821 cpu = cpumask_local_spread(i, numa_node);
12822 cpu_mask_ptr = get_cpu_mask(cpu);
12823 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12824 }
12825
12826 /* Register CPU mask for each TX queue except the ones marked for XDP */
12827 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12828 map_idx = q_idx % bp->tx_nr_rings_per_tc;
12829 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12830 if (rc) {
12831 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12832 q_idx);
12833 break;
12834 }
12835 }
12836
12837 kfree(q_map);
12838
12839 return rc;
12840 }
12841
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12842 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12843 {
12844 int rc = 0;
12845
12846 netif_carrier_off(bp->dev);
12847 if (irq_re_init) {
12848 /* Reserve rings now if none were reserved at driver probe. */
12849 rc = bnxt_init_dflt_ring_mode(bp);
12850 if (rc) {
12851 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12852 return rc;
12853 }
12854 }
12855 rc = bnxt_reserve_rings(bp, irq_re_init);
12856 if (rc)
12857 return rc;
12858
12859 rc = bnxt_alloc_mem(bp, irq_re_init);
12860 if (rc) {
12861 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12862 goto open_err_free_mem;
12863 }
12864
12865 if (irq_re_init) {
12866 bnxt_init_napi(bp);
12867 rc = bnxt_request_irq(bp);
12868 if (rc) {
12869 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
12870 goto open_err_irq;
12871 }
12872 }
12873
12874 rc = bnxt_init_nic(bp, irq_re_init);
12875 if (rc) {
12876 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12877 goto open_err_irq;
12878 }
12879
12880 bnxt_enable_napi(bp);
12881 bnxt_debug_dev_init(bp);
12882
12883 if (link_re_init) {
12884 mutex_lock(&bp->link_lock);
12885 rc = bnxt_update_phy_setting(bp);
12886 mutex_unlock(&bp->link_lock);
12887 if (rc) {
12888 netdev_warn(bp->dev, "failed to update phy settings\n");
12889 if (BNXT_SINGLE_PF(bp)) {
12890 bp->link_info.phy_retry = true;
12891 bp->link_info.phy_retry_expires =
12892 jiffies + 5 * HZ;
12893 }
12894 }
12895 }
12896
12897 if (irq_re_init) {
12898 udp_tunnel_nic_reset_ntf(bp->dev);
12899 rc = bnxt_set_xps_mapping(bp);
12900 if (rc)
12901 netdev_warn(bp->dev, "failed to set xps mapping\n");
12902 }
12903
12904 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
12905 if (!static_key_enabled(&bnxt_xdp_locking_key))
12906 static_branch_enable(&bnxt_xdp_locking_key);
12907 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
12908 static_branch_disable(&bnxt_xdp_locking_key);
12909 }
12910 set_bit(BNXT_STATE_OPEN, &bp->state);
12911 bnxt_enable_int(bp);
12912 /* Enable TX queues */
12913 bnxt_tx_enable(bp);
12914 mod_timer(&bp->timer, jiffies + bp->current_interval);
12915 /* Poll link status and check for SFP+ module status */
12916 mutex_lock(&bp->link_lock);
12917 bnxt_get_port_module_status(bp);
12918 mutex_unlock(&bp->link_lock);
12919
12920 /* VF-reps may need to be re-opened after the PF is re-opened */
12921 if (BNXT_PF(bp))
12922 bnxt_vf_reps_open(bp);
12923 bnxt_ptp_init_rtc(bp, true);
12924 bnxt_ptp_cfg_tstamp_filters(bp);
12925 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12926 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
12927 bnxt_cfg_usr_fltrs(bp);
12928 return 0;
12929
12930 open_err_irq:
12931 bnxt_del_napi(bp);
12932
12933 open_err_free_mem:
12934 bnxt_free_skbs(bp);
12935 bnxt_free_irq(bp);
12936 bnxt_free_mem(bp, true);
12937 return rc;
12938 }
12939
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12940 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12941 {
12942 int rc = 0;
12943
12944 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
12945 rc = -EIO;
12946 if (!rc)
12947 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
12948 if (rc) {
12949 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
12950 netif_close(bp->dev);
12951 }
12952 return rc;
12953 }
12954
12955 /* netdev instance lock held, open the NIC half way by allocating all
12956 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
12957 * for offline self tests.
12958 */
bnxt_half_open_nic(struct bnxt * bp)12959 int bnxt_half_open_nic(struct bnxt *bp)
12960 {
12961 int rc = 0;
12962
12963 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12964 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
12965 rc = -ENODEV;
12966 goto half_open_err;
12967 }
12968
12969 rc = bnxt_alloc_mem(bp, true);
12970 if (rc) {
12971 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12972 goto half_open_err;
12973 }
12974 bnxt_init_napi(bp);
12975 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12976 rc = bnxt_init_nic(bp, true);
12977 if (rc) {
12978 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12979 bnxt_del_napi(bp);
12980 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12981 goto half_open_err;
12982 }
12983 return 0;
12984
12985 half_open_err:
12986 bnxt_free_skbs(bp);
12987 bnxt_free_mem(bp, true);
12988 netif_close(bp->dev);
12989 return rc;
12990 }
12991
12992 /* netdev instance lock held, this call can only be made after a previous
12993 * successful call to bnxt_half_open_nic().
12994 */
bnxt_half_close_nic(struct bnxt * bp)12995 void bnxt_half_close_nic(struct bnxt *bp)
12996 {
12997 bnxt_hwrm_resource_free(bp, false, true);
12998 bnxt_del_napi(bp);
12999 bnxt_free_skbs(bp);
13000 bnxt_free_mem(bp, true);
13001 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13002 }
13003
bnxt_reenable_sriov(struct bnxt * bp)13004 void bnxt_reenable_sriov(struct bnxt *bp)
13005 {
13006 if (BNXT_PF(bp)) {
13007 struct bnxt_pf_info *pf = &bp->pf;
13008 int n = pf->active_vfs;
13009
13010 if (n)
13011 bnxt_cfg_hw_sriov(bp, &n, true);
13012 }
13013 }
13014
bnxt_open(struct net_device * dev)13015 static int bnxt_open(struct net_device *dev)
13016 {
13017 struct bnxt *bp = netdev_priv(dev);
13018 int rc;
13019
13020 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13021 rc = bnxt_reinit_after_abort(bp);
13022 if (rc) {
13023 if (rc == -EBUSY)
13024 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13025 else
13026 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13027 return -ENODEV;
13028 }
13029 }
13030
13031 rc = bnxt_hwrm_if_change(bp, true);
13032 if (rc)
13033 return rc;
13034
13035 rc = __bnxt_open_nic(bp, true, true);
13036 if (rc) {
13037 bnxt_hwrm_if_change(bp, false);
13038 } else {
13039 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13040 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13041 bnxt_queue_sp_work(bp,
13042 BNXT_RESTART_ULP_SP_EVENT);
13043 }
13044 }
13045
13046 return rc;
13047 }
13048
bnxt_drv_busy(struct bnxt * bp)13049 static bool bnxt_drv_busy(struct bnxt *bp)
13050 {
13051 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13052 test_bit(BNXT_STATE_READ_STATS, &bp->state));
13053 }
13054
13055 static void bnxt_get_ring_stats(struct bnxt *bp,
13056 struct rtnl_link_stats64 *stats);
13057
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13058 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13059 bool link_re_init)
13060 {
13061 /* Close the VF-reps before closing PF */
13062 if (BNXT_PF(bp))
13063 bnxt_vf_reps_close(bp);
13064
13065 /* Change device state to avoid TX queue wake up's */
13066 bnxt_tx_disable(bp);
13067
13068 clear_bit(BNXT_STATE_OPEN, &bp->state);
13069 smp_mb__after_atomic();
13070 while (bnxt_drv_busy(bp))
13071 msleep(20);
13072
13073 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13074 bnxt_clear_rss_ctxs(bp);
13075 /* Flush rings and disable interrupts */
13076 bnxt_shutdown_nic(bp, irq_re_init);
13077
13078 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13079
13080 bnxt_debug_dev_exit(bp);
13081 bnxt_disable_napi(bp);
13082 timer_delete_sync(&bp->timer);
13083 bnxt_free_skbs(bp);
13084
13085 /* Save ring stats before shutdown */
13086 if (bp->bnapi && irq_re_init) {
13087 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13088 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
13089 }
13090 if (irq_re_init) {
13091 bnxt_free_irq(bp);
13092 bnxt_del_napi(bp);
13093 }
13094 bnxt_free_mem(bp, irq_re_init);
13095 }
13096
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13097 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13098 {
13099 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13100 /* If we get here, it means firmware reset is in progress
13101 * while we are trying to close. We can safely proceed with
13102 * the close because we are holding netdev instance lock.
13103 * Some firmware messages may fail as we proceed to close.
13104 * We set the ABORT_ERR flag here so that the FW reset thread
13105 * will later abort when it gets the netdev instance lock
13106 * and sees the flag.
13107 */
13108 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13109 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13110 }
13111
13112 #ifdef CONFIG_BNXT_SRIOV
13113 if (bp->sriov_cfg) {
13114 int rc;
13115
13116 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13117 !bp->sriov_cfg,
13118 BNXT_SRIOV_CFG_WAIT_TMO);
13119 if (!rc)
13120 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13121 else if (rc < 0)
13122 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13123 }
13124 #endif
13125 __bnxt_close_nic(bp, irq_re_init, link_re_init);
13126 }
13127
bnxt_close(struct net_device * dev)13128 static int bnxt_close(struct net_device *dev)
13129 {
13130 struct bnxt *bp = netdev_priv(dev);
13131
13132 bnxt_close_nic(bp, true, true);
13133 bnxt_hwrm_shutdown_link(bp);
13134 bnxt_hwrm_if_change(bp, false);
13135 return 0;
13136 }
13137
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)13138 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13139 u16 *val)
13140 {
13141 struct hwrm_port_phy_mdio_read_output *resp;
13142 struct hwrm_port_phy_mdio_read_input *req;
13143 int rc;
13144
13145 if (bp->hwrm_spec_code < 0x10a00)
13146 return -EOPNOTSUPP;
13147
13148 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13149 if (rc)
13150 return rc;
13151
13152 req->port_id = cpu_to_le16(bp->pf.port_id);
13153 req->phy_addr = phy_addr;
13154 req->reg_addr = cpu_to_le16(reg & 0x1f);
13155 if (mdio_phy_id_is_c45(phy_addr)) {
13156 req->cl45_mdio = 1;
13157 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13158 req->dev_addr = mdio_phy_id_devad(phy_addr);
13159 req->reg_addr = cpu_to_le16(reg);
13160 }
13161
13162 resp = hwrm_req_hold(bp, req);
13163 rc = hwrm_req_send(bp, req);
13164 if (!rc)
13165 *val = le16_to_cpu(resp->reg_data);
13166 hwrm_req_drop(bp, req);
13167 return rc;
13168 }
13169
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)13170 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13171 u16 val)
13172 {
13173 struct hwrm_port_phy_mdio_write_input *req;
13174 int rc;
13175
13176 if (bp->hwrm_spec_code < 0x10a00)
13177 return -EOPNOTSUPP;
13178
13179 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13180 if (rc)
13181 return rc;
13182
13183 req->port_id = cpu_to_le16(bp->pf.port_id);
13184 req->phy_addr = phy_addr;
13185 req->reg_addr = cpu_to_le16(reg & 0x1f);
13186 if (mdio_phy_id_is_c45(phy_addr)) {
13187 req->cl45_mdio = 1;
13188 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13189 req->dev_addr = mdio_phy_id_devad(phy_addr);
13190 req->reg_addr = cpu_to_le16(reg);
13191 }
13192 req->reg_data = cpu_to_le16(val);
13193
13194 return hwrm_req_send(bp, req);
13195 }
13196
13197 /* netdev instance lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13198 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13199 {
13200 struct mii_ioctl_data *mdio = if_mii(ifr);
13201 struct bnxt *bp = netdev_priv(dev);
13202 int rc;
13203
13204 switch (cmd) {
13205 case SIOCGMIIPHY:
13206 mdio->phy_id = bp->link_info.phy_addr;
13207
13208 fallthrough;
13209 case SIOCGMIIREG: {
13210 u16 mii_regval = 0;
13211
13212 if (!netif_running(dev))
13213 return -EAGAIN;
13214
13215 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13216 &mii_regval);
13217 mdio->val_out = mii_regval;
13218 return rc;
13219 }
13220
13221 case SIOCSMIIREG:
13222 if (!netif_running(dev))
13223 return -EAGAIN;
13224
13225 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13226 mdio->val_in);
13227
13228 case SIOCSHWTSTAMP:
13229 return bnxt_hwtstamp_set(dev, ifr);
13230
13231 case SIOCGHWTSTAMP:
13232 return bnxt_hwtstamp_get(dev, ifr);
13233
13234 default:
13235 /* do nothing */
13236 break;
13237 }
13238 return -EOPNOTSUPP;
13239 }
13240
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13241 static void bnxt_get_ring_stats(struct bnxt *bp,
13242 struct rtnl_link_stats64 *stats)
13243 {
13244 int i;
13245
13246 for (i = 0; i < bp->cp_nr_rings; i++) {
13247 struct bnxt_napi *bnapi = bp->bnapi[i];
13248 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13249 u64 *sw = cpr->stats.sw_stats;
13250
13251 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13252 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13253 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13254
13255 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13256 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13257 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13258
13259 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13260 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13261 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13262
13263 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13264 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13265 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13266
13267 stats->rx_missed_errors +=
13268 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13269
13270 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13271
13272 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13273
13274 stats->rx_dropped +=
13275 cpr->sw_stats->rx.rx_netpoll_discards +
13276 cpr->sw_stats->rx.rx_oom_discards;
13277 }
13278 }
13279
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13280 static void bnxt_add_prev_stats(struct bnxt *bp,
13281 struct rtnl_link_stats64 *stats)
13282 {
13283 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13284
13285 stats->rx_packets += prev_stats->rx_packets;
13286 stats->tx_packets += prev_stats->tx_packets;
13287 stats->rx_bytes += prev_stats->rx_bytes;
13288 stats->tx_bytes += prev_stats->tx_bytes;
13289 stats->rx_missed_errors += prev_stats->rx_missed_errors;
13290 stats->multicast += prev_stats->multicast;
13291 stats->rx_dropped += prev_stats->rx_dropped;
13292 stats->tx_dropped += prev_stats->tx_dropped;
13293 }
13294
13295 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)13296 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13297 {
13298 struct bnxt *bp = netdev_priv(dev);
13299
13300 set_bit(BNXT_STATE_READ_STATS, &bp->state);
13301 /* Make sure bnxt_close_nic() sees that we are reading stats before
13302 * we check the BNXT_STATE_OPEN flag.
13303 */
13304 smp_mb__after_atomic();
13305 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13306 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13307 *stats = bp->net_stats_prev;
13308 return;
13309 }
13310
13311 bnxt_get_ring_stats(bp, stats);
13312 bnxt_add_prev_stats(bp, stats);
13313
13314 if (bp->flags & BNXT_FLAG_PORT_STATS) {
13315 u64 *rx = bp->port_stats.sw_stats;
13316 u64 *tx = bp->port_stats.sw_stats +
13317 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13318
13319 stats->rx_crc_errors =
13320 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13321 stats->rx_frame_errors =
13322 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13323 stats->rx_length_errors =
13324 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13325 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13326 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13327 stats->rx_errors =
13328 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13329 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13330 stats->collisions =
13331 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13332 stats->tx_fifo_errors =
13333 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13334 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13335 }
13336 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13337 }
13338
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)13339 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
13340 struct bnxt_total_ring_err_stats *stats,
13341 struct bnxt_cp_ring_info *cpr)
13342 {
13343 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13344 u64 *hw_stats = cpr->stats.sw_stats;
13345
13346 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13347 stats->rx_total_resets += sw_stats->rx.rx_resets;
13348 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13349 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13350 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13351 stats->rx_total_ring_discards +=
13352 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13353 stats->tx_total_resets += sw_stats->tx.tx_resets;
13354 stats->tx_total_ring_discards +=
13355 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13356 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13357 }
13358
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)13359 void bnxt_get_ring_err_stats(struct bnxt *bp,
13360 struct bnxt_total_ring_err_stats *stats)
13361 {
13362 int i;
13363
13364 for (i = 0; i < bp->cp_nr_rings; i++)
13365 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13366 }
13367
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)13368 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13369 {
13370 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13371 struct net_device *dev = bp->dev;
13372 struct netdev_hw_addr *ha;
13373 u8 *haddr;
13374 int mc_count = 0;
13375 bool update = false;
13376 int off = 0;
13377
13378 netdev_for_each_mc_addr(ha, dev) {
13379 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13380 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13381 vnic->mc_list_count = 0;
13382 return false;
13383 }
13384 haddr = ha->addr;
13385 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13386 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13387 update = true;
13388 }
13389 off += ETH_ALEN;
13390 mc_count++;
13391 }
13392 if (mc_count)
13393 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13394
13395 if (mc_count != vnic->mc_list_count) {
13396 vnic->mc_list_count = mc_count;
13397 update = true;
13398 }
13399 return update;
13400 }
13401
bnxt_uc_list_updated(struct bnxt * bp)13402 static bool bnxt_uc_list_updated(struct bnxt *bp)
13403 {
13404 struct net_device *dev = bp->dev;
13405 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13406 struct netdev_hw_addr *ha;
13407 int off = 0;
13408
13409 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13410 return true;
13411
13412 netdev_for_each_uc_addr(ha, dev) {
13413 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13414 return true;
13415
13416 off += ETH_ALEN;
13417 }
13418 return false;
13419 }
13420
bnxt_set_rx_mode(struct net_device * dev)13421 static void bnxt_set_rx_mode(struct net_device *dev)
13422 {
13423 struct bnxt *bp = netdev_priv(dev);
13424 struct bnxt_vnic_info *vnic;
13425 bool mc_update = false;
13426 bool uc_update;
13427 u32 mask;
13428
13429 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13430 return;
13431
13432 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13433 mask = vnic->rx_mask;
13434 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13435 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13436 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13437 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13438
13439 if (dev->flags & IFF_PROMISC)
13440 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13441
13442 uc_update = bnxt_uc_list_updated(bp);
13443
13444 if (dev->flags & IFF_BROADCAST)
13445 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13446 if (dev->flags & IFF_ALLMULTI) {
13447 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13448 vnic->mc_list_count = 0;
13449 } else if (dev->flags & IFF_MULTICAST) {
13450 mc_update = bnxt_mc_list_updated(bp, &mask);
13451 }
13452
13453 if (mask != vnic->rx_mask || uc_update || mc_update) {
13454 vnic->rx_mask = mask;
13455
13456 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13457 }
13458 }
13459
bnxt_cfg_rx_mode(struct bnxt * bp)13460 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13461 {
13462 struct net_device *dev = bp->dev;
13463 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13464 struct netdev_hw_addr *ha;
13465 int i, off = 0, rc;
13466 bool uc_update;
13467
13468 netif_addr_lock_bh(dev);
13469 uc_update = bnxt_uc_list_updated(bp);
13470 netif_addr_unlock_bh(dev);
13471
13472 if (!uc_update)
13473 goto skip_uc;
13474
13475 for (i = 1; i < vnic->uc_filter_count; i++) {
13476 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13477
13478 bnxt_hwrm_l2_filter_free(bp, fltr);
13479 bnxt_del_l2_filter(bp, fltr);
13480 }
13481
13482 vnic->uc_filter_count = 1;
13483
13484 netif_addr_lock_bh(dev);
13485 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13486 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13487 } else {
13488 netdev_for_each_uc_addr(ha, dev) {
13489 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13490 off += ETH_ALEN;
13491 vnic->uc_filter_count++;
13492 }
13493 }
13494 netif_addr_unlock_bh(dev);
13495
13496 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13497 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13498 if (rc) {
13499 if (BNXT_VF(bp) && rc == -ENODEV) {
13500 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13501 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13502 else
13503 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13504 rc = 0;
13505 } else {
13506 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13507 }
13508 vnic->uc_filter_count = i;
13509 return rc;
13510 }
13511 }
13512 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13513 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13514
13515 skip_uc:
13516 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13517 !bnxt_promisc_ok(bp))
13518 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13519 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13520 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13521 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13522 rc);
13523 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13524 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13525 vnic->mc_list_count = 0;
13526 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13527 }
13528 if (rc)
13529 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13530 rc);
13531
13532 return rc;
13533 }
13534
bnxt_can_reserve_rings(struct bnxt * bp)13535 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13536 {
13537 #ifdef CONFIG_BNXT_SRIOV
13538 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13539 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13540
13541 /* No minimum rings were provisioned by the PF. Don't
13542 * reserve rings by default when device is down.
13543 */
13544 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13545 return true;
13546
13547 if (!netif_running(bp->dev))
13548 return false;
13549 }
13550 #endif
13551 return true;
13552 }
13553
13554 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13555 static bool bnxt_rfs_supported(struct bnxt *bp)
13556 {
13557 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13558 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13559 return true;
13560 return false;
13561 }
13562 /* 212 firmware is broken for aRFS */
13563 if (BNXT_FW_MAJ(bp) == 212)
13564 return false;
13565 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13566 return true;
13567 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13568 return true;
13569 return false;
13570 }
13571
13572 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13573 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13574 {
13575 struct bnxt_hw_rings hwr = {0};
13576 int max_vnics, max_rss_ctxs;
13577
13578 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13579 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13580 return bnxt_rfs_supported(bp);
13581
13582 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13583 return false;
13584
13585 hwr.grp = bp->rx_nr_rings;
13586 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13587 if (new_rss_ctx)
13588 hwr.vnic++;
13589 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13590 max_vnics = bnxt_get_max_func_vnics(bp);
13591 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13592
13593 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13594 if (bp->rx_nr_rings > 1)
13595 netdev_warn(bp->dev,
13596 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13597 min(max_rss_ctxs - 1, max_vnics - 1));
13598 return false;
13599 }
13600
13601 if (!BNXT_NEW_RM(bp))
13602 return true;
13603
13604 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13605 * issue that will mess up the default VNIC if we reduce the
13606 * reservations.
13607 */
13608 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13609 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13610 return true;
13611
13612 bnxt_hwrm_reserve_rings(bp, &hwr);
13613 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13614 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13615 return true;
13616
13617 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13618 hwr.vnic = 1;
13619 hwr.rss_ctx = 0;
13620 bnxt_hwrm_reserve_rings(bp, &hwr);
13621 return false;
13622 }
13623
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13624 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13625 netdev_features_t features)
13626 {
13627 struct bnxt *bp = netdev_priv(dev);
13628 netdev_features_t vlan_features;
13629
13630 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13631 features &= ~NETIF_F_NTUPLE;
13632
13633 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13634 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13635
13636 if (!(features & NETIF_F_GRO))
13637 features &= ~NETIF_F_GRO_HW;
13638
13639 if (features & NETIF_F_GRO_HW)
13640 features &= ~NETIF_F_LRO;
13641
13642 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13643 * turned on or off together.
13644 */
13645 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13646 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13647 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13648 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13649 else if (vlan_features)
13650 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13651 }
13652 #ifdef CONFIG_BNXT_SRIOV
13653 if (BNXT_VF(bp) && bp->vf.vlan)
13654 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13655 #endif
13656 return features;
13657 }
13658
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13659 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13660 bool link_re_init, u32 flags, bool update_tpa)
13661 {
13662 bnxt_close_nic(bp, irq_re_init, link_re_init);
13663 bp->flags = flags;
13664 if (update_tpa)
13665 bnxt_set_ring_params(bp);
13666 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13667 }
13668
bnxt_set_features(struct net_device * dev,netdev_features_t features)13669 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13670 {
13671 bool update_tpa = false, update_ntuple = false;
13672 struct bnxt *bp = netdev_priv(dev);
13673 u32 flags = bp->flags;
13674 u32 changes;
13675 int rc = 0;
13676 bool re_init = false;
13677
13678 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13679 if (features & NETIF_F_GRO_HW)
13680 flags |= BNXT_FLAG_GRO;
13681 else if (features & NETIF_F_LRO)
13682 flags |= BNXT_FLAG_LRO;
13683
13684 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13685 flags &= ~BNXT_FLAG_TPA;
13686
13687 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13688 flags |= BNXT_FLAG_STRIP_VLAN;
13689
13690 if (features & NETIF_F_NTUPLE)
13691 flags |= BNXT_FLAG_RFS;
13692 else
13693 bnxt_clear_usr_fltrs(bp, true);
13694
13695 changes = flags ^ bp->flags;
13696 if (changes & BNXT_FLAG_TPA) {
13697 update_tpa = true;
13698 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13699 (flags & BNXT_FLAG_TPA) == 0 ||
13700 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13701 re_init = true;
13702 }
13703
13704 if (changes & ~BNXT_FLAG_TPA)
13705 re_init = true;
13706
13707 if (changes & BNXT_FLAG_RFS)
13708 update_ntuple = true;
13709
13710 if (flags != bp->flags) {
13711 u32 old_flags = bp->flags;
13712
13713 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13714 bp->flags = flags;
13715 if (update_tpa)
13716 bnxt_set_ring_params(bp);
13717 return rc;
13718 }
13719
13720 if (update_ntuple)
13721 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13722
13723 if (re_init)
13724 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13725
13726 if (update_tpa) {
13727 bp->flags = flags;
13728 rc = bnxt_set_tpa(bp,
13729 (flags & BNXT_FLAG_TPA) ?
13730 true : false);
13731 if (rc)
13732 bp->flags = old_flags;
13733 }
13734 }
13735 return rc;
13736 }
13737
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)13738 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13739 u8 **nextp)
13740 {
13741 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13742 struct hop_jumbo_hdr *jhdr;
13743 int hdr_count = 0;
13744 u8 *nexthdr;
13745 int start;
13746
13747 /* Check that there are at most 2 IPv6 extension headers, no
13748 * fragment header, and each is <= 64 bytes.
13749 */
13750 start = nw_off + sizeof(*ip6h);
13751 nexthdr = &ip6h->nexthdr;
13752 while (ipv6_ext_hdr(*nexthdr)) {
13753 struct ipv6_opt_hdr *hp;
13754 int hdrlen;
13755
13756 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13757 *nexthdr == NEXTHDR_FRAGMENT)
13758 return false;
13759 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13760 skb_headlen(skb), NULL);
13761 if (!hp)
13762 return false;
13763 if (*nexthdr == NEXTHDR_AUTH)
13764 hdrlen = ipv6_authlen(hp);
13765 else
13766 hdrlen = ipv6_optlen(hp);
13767
13768 if (hdrlen > 64)
13769 return false;
13770
13771 /* The ext header may be a hop-by-hop header inserted for
13772 * big TCP purposes. This will be removed before sending
13773 * from NIC, so do not count it.
13774 */
13775 if (*nexthdr == NEXTHDR_HOP) {
13776 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
13777 goto increment_hdr;
13778
13779 jhdr = (struct hop_jumbo_hdr *)hp;
13780 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
13781 jhdr->nexthdr != IPPROTO_TCP)
13782 goto increment_hdr;
13783
13784 goto next_hdr;
13785 }
13786 increment_hdr:
13787 hdr_count++;
13788 next_hdr:
13789 nexthdr = &hp->nexthdr;
13790 start += hdrlen;
13791 }
13792 if (nextp) {
13793 /* Caller will check inner protocol */
13794 if (skb->encapsulation) {
13795 *nextp = nexthdr;
13796 return true;
13797 }
13798 *nextp = NULL;
13799 }
13800 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13801 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13802 }
13803
13804 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)13805 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13806 {
13807 struct udphdr *uh = udp_hdr(skb);
13808 __be16 udp_port = uh->dest;
13809
13810 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13811 udp_port != bp->vxlan_gpe_port)
13812 return false;
13813 if (skb->inner_protocol == htons(ETH_P_TEB)) {
13814 struct ethhdr *eh = inner_eth_hdr(skb);
13815
13816 switch (eh->h_proto) {
13817 case htons(ETH_P_IP):
13818 return true;
13819 case htons(ETH_P_IPV6):
13820 return bnxt_exthdr_check(bp, skb,
13821 skb_inner_network_offset(skb),
13822 NULL);
13823 }
13824 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13825 return true;
13826 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13827 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13828 NULL);
13829 }
13830 return false;
13831 }
13832
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)13833 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13834 {
13835 switch (l4_proto) {
13836 case IPPROTO_UDP:
13837 return bnxt_udp_tunl_check(bp, skb);
13838 case IPPROTO_IPIP:
13839 return true;
13840 case IPPROTO_GRE: {
13841 switch (skb->inner_protocol) {
13842 default:
13843 return false;
13844 case htons(ETH_P_IP):
13845 return true;
13846 case htons(ETH_P_IPV6):
13847 fallthrough;
13848 }
13849 }
13850 case IPPROTO_IPV6:
13851 /* Check ext headers of inner ipv6 */
13852 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13853 NULL);
13854 }
13855 return false;
13856 }
13857
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)13858 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13859 struct net_device *dev,
13860 netdev_features_t features)
13861 {
13862 struct bnxt *bp = netdev_priv(dev);
13863 u8 *l4_proto;
13864
13865 features = vlan_features_check(skb, features);
13866 switch (vlan_get_protocol(skb)) {
13867 case htons(ETH_P_IP):
13868 if (!skb->encapsulation)
13869 return features;
13870 l4_proto = &ip_hdr(skb)->protocol;
13871 if (bnxt_tunl_check(bp, skb, *l4_proto))
13872 return features;
13873 break;
13874 case htons(ETH_P_IPV6):
13875 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
13876 &l4_proto))
13877 break;
13878 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
13879 return features;
13880 break;
13881 }
13882 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13883 }
13884
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)13885 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
13886 u32 *reg_buf)
13887 {
13888 struct hwrm_dbg_read_direct_output *resp;
13889 struct hwrm_dbg_read_direct_input *req;
13890 __le32 *dbg_reg_buf;
13891 dma_addr_t mapping;
13892 int rc, i;
13893
13894 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
13895 if (rc)
13896 return rc;
13897
13898 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
13899 &mapping);
13900 if (!dbg_reg_buf) {
13901 rc = -ENOMEM;
13902 goto dbg_rd_reg_exit;
13903 }
13904
13905 req->host_dest_addr = cpu_to_le64(mapping);
13906
13907 resp = hwrm_req_hold(bp, req);
13908 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
13909 req->read_len32 = cpu_to_le32(num_words);
13910
13911 rc = hwrm_req_send(bp, req);
13912 if (rc || resp->error_code) {
13913 rc = -EIO;
13914 goto dbg_rd_reg_exit;
13915 }
13916 for (i = 0; i < num_words; i++)
13917 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
13918
13919 dbg_rd_reg_exit:
13920 hwrm_req_drop(bp, req);
13921 return rc;
13922 }
13923
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)13924 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
13925 u32 ring_id, u32 *prod, u32 *cons)
13926 {
13927 struct hwrm_dbg_ring_info_get_output *resp;
13928 struct hwrm_dbg_ring_info_get_input *req;
13929 int rc;
13930
13931 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
13932 if (rc)
13933 return rc;
13934
13935 req->ring_type = ring_type;
13936 req->fw_ring_id = cpu_to_le32(ring_id);
13937 resp = hwrm_req_hold(bp, req);
13938 rc = hwrm_req_send(bp, req);
13939 if (!rc) {
13940 *prod = le32_to_cpu(resp->producer_index);
13941 *cons = le32_to_cpu(resp->consumer_index);
13942 }
13943 hwrm_req_drop(bp, req);
13944 return rc;
13945 }
13946
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)13947 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
13948 {
13949 struct bnxt_tx_ring_info *txr;
13950 int i = bnapi->index, j;
13951
13952 bnxt_for_each_napi_tx(j, bnapi, txr)
13953 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
13954 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
13955 txr->tx_cons);
13956 }
13957
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)13958 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
13959 {
13960 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
13961 int i = bnapi->index;
13962
13963 if (!rxr)
13964 return;
13965
13966 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
13967 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
13968 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
13969 rxr->rx_sw_agg_prod);
13970 }
13971
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)13972 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
13973 {
13974 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13975 int i = bnapi->index;
13976
13977 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
13978 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
13979 }
13980
bnxt_dbg_dump_states(struct bnxt * bp)13981 static void bnxt_dbg_dump_states(struct bnxt *bp)
13982 {
13983 int i;
13984 struct bnxt_napi *bnapi;
13985
13986 for (i = 0; i < bp->cp_nr_rings; i++) {
13987 bnapi = bp->bnapi[i];
13988 if (netif_msg_drv(bp)) {
13989 bnxt_dump_tx_sw_state(bnapi);
13990 bnxt_dump_rx_sw_state(bnapi);
13991 bnxt_dump_cp_sw_state(bnapi);
13992 }
13993 }
13994 }
13995
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)13996 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
13997 {
13998 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
13999 struct hwrm_ring_reset_input *req;
14000 struct bnxt_napi *bnapi = rxr->bnapi;
14001 struct bnxt_cp_ring_info *cpr;
14002 u16 cp_ring_id;
14003 int rc;
14004
14005 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14006 if (rc)
14007 return rc;
14008
14009 cpr = &bnapi->cp_ring;
14010 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14011 req->cmpl_ring = cpu_to_le16(cp_ring_id);
14012 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14013 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14014 return hwrm_req_send_silent(bp, req);
14015 }
14016
bnxt_reset_task(struct bnxt * bp,bool silent)14017 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14018 {
14019 if (!silent)
14020 bnxt_dbg_dump_states(bp);
14021 if (netif_running(bp->dev)) {
14022 bnxt_close_nic(bp, !silent, false);
14023 bnxt_open_nic(bp, !silent, false);
14024 }
14025 }
14026
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)14027 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14028 {
14029 struct bnxt *bp = netdev_priv(dev);
14030
14031 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
14032 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14033 }
14034
bnxt_fw_health_check(struct bnxt * bp)14035 static void bnxt_fw_health_check(struct bnxt *bp)
14036 {
14037 struct bnxt_fw_health *fw_health = bp->fw_health;
14038 struct pci_dev *pdev = bp->pdev;
14039 u32 val;
14040
14041 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14042 return;
14043
14044 /* Make sure it is enabled before checking the tmr_counter. */
14045 smp_rmb();
14046 if (fw_health->tmr_counter) {
14047 fw_health->tmr_counter--;
14048 return;
14049 }
14050
14051 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14052 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14053 fw_health->arrests++;
14054 goto fw_reset;
14055 }
14056
14057 fw_health->last_fw_heartbeat = val;
14058
14059 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14060 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14061 fw_health->discoveries++;
14062 goto fw_reset;
14063 }
14064
14065 fw_health->tmr_counter = fw_health->tmr_multiplier;
14066 return;
14067
14068 fw_reset:
14069 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14070 }
14071
bnxt_timer(struct timer_list * t)14072 static void bnxt_timer(struct timer_list *t)
14073 {
14074 struct bnxt *bp = timer_container_of(bp, t, timer);
14075 struct net_device *dev = bp->dev;
14076
14077 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14078 return;
14079
14080 if (atomic_read(&bp->intr_sem) != 0)
14081 goto bnxt_restart_timer;
14082
14083 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14084 bnxt_fw_health_check(bp);
14085
14086 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14087 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14088
14089 if (bnxt_tc_flower_enabled(bp))
14090 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14091
14092 #ifdef CONFIG_RFS_ACCEL
14093 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14094 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14095 #endif /*CONFIG_RFS_ACCEL*/
14096
14097 if (bp->link_info.phy_retry) {
14098 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14099 bp->link_info.phy_retry = false;
14100 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14101 } else {
14102 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14103 }
14104 }
14105
14106 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14107 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14108
14109 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14110 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14111
14112 bnxt_restart_timer:
14113 mod_timer(&bp->timer, jiffies + bp->current_interval);
14114 }
14115
bnxt_lock_sp(struct bnxt * bp)14116 static void bnxt_lock_sp(struct bnxt *bp)
14117 {
14118 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14119 * set. If the device is being closed, bnxt_close() may be holding
14120 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14121 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14122 * instance lock.
14123 */
14124 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14125 netdev_lock(bp->dev);
14126 }
14127
bnxt_unlock_sp(struct bnxt * bp)14128 static void bnxt_unlock_sp(struct bnxt *bp)
14129 {
14130 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14131 netdev_unlock(bp->dev);
14132 }
14133
14134 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)14135 static void bnxt_reset(struct bnxt *bp, bool silent)
14136 {
14137 bnxt_lock_sp(bp);
14138 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14139 bnxt_reset_task(bp, silent);
14140 bnxt_unlock_sp(bp);
14141 }
14142
14143 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)14144 static void bnxt_rx_ring_reset(struct bnxt *bp)
14145 {
14146 int i;
14147
14148 bnxt_lock_sp(bp);
14149 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14150 bnxt_unlock_sp(bp);
14151 return;
14152 }
14153 /* Disable and flush TPA before resetting the RX ring */
14154 if (bp->flags & BNXT_FLAG_TPA)
14155 bnxt_set_tpa(bp, false);
14156 for (i = 0; i < bp->rx_nr_rings; i++) {
14157 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14158 struct bnxt_cp_ring_info *cpr;
14159 int rc;
14160
14161 if (!rxr->bnapi->in_reset)
14162 continue;
14163
14164 rc = bnxt_hwrm_rx_ring_reset(bp, i);
14165 if (rc) {
14166 if (rc == -EINVAL || rc == -EOPNOTSUPP)
14167 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14168 else
14169 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14170 rc);
14171 bnxt_reset_task(bp, true);
14172 break;
14173 }
14174 bnxt_free_one_rx_ring_skbs(bp, rxr);
14175 rxr->rx_prod = 0;
14176 rxr->rx_agg_prod = 0;
14177 rxr->rx_sw_agg_prod = 0;
14178 rxr->rx_next_cons = 0;
14179 rxr->bnapi->in_reset = false;
14180 bnxt_alloc_one_rx_ring(bp, i);
14181 cpr = &rxr->bnapi->cp_ring;
14182 cpr->sw_stats->rx.rx_resets++;
14183 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14184 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14185 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14186 }
14187 if (bp->flags & BNXT_FLAG_TPA)
14188 bnxt_set_tpa(bp, true);
14189 bnxt_unlock_sp(bp);
14190 }
14191
bnxt_fw_fatal_close(struct bnxt * bp)14192 static void bnxt_fw_fatal_close(struct bnxt *bp)
14193 {
14194 bnxt_tx_disable(bp);
14195 bnxt_disable_napi(bp);
14196 bnxt_disable_int_sync(bp);
14197 bnxt_free_irq(bp);
14198 bnxt_clear_int_mode(bp);
14199 pci_disable_device(bp->pdev);
14200 }
14201
bnxt_fw_reset_close(struct bnxt * bp)14202 static void bnxt_fw_reset_close(struct bnxt *bp)
14203 {
14204 /* When firmware is in fatal state, quiesce device and disable
14205 * bus master to prevent any potential bad DMAs before freeing
14206 * kernel memory.
14207 */
14208 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14209 u16 val = 0;
14210
14211 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14212 if (val == 0xffff)
14213 bp->fw_reset_min_dsecs = 0;
14214 bnxt_fw_fatal_close(bp);
14215 }
14216 __bnxt_close_nic(bp, true, false);
14217 bnxt_vf_reps_free(bp);
14218 bnxt_clear_int_mode(bp);
14219 bnxt_hwrm_func_drv_unrgtr(bp);
14220 if (pci_is_enabled(bp->pdev))
14221 pci_disable_device(bp->pdev);
14222 bnxt_free_ctx_mem(bp, false);
14223 }
14224
is_bnxt_fw_ok(struct bnxt * bp)14225 static bool is_bnxt_fw_ok(struct bnxt *bp)
14226 {
14227 struct bnxt_fw_health *fw_health = bp->fw_health;
14228 bool no_heartbeat = false, has_reset = false;
14229 u32 val;
14230
14231 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14232 if (val == fw_health->last_fw_heartbeat)
14233 no_heartbeat = true;
14234
14235 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14236 if (val != fw_health->last_fw_reset_cnt)
14237 has_reset = true;
14238
14239 if (!no_heartbeat && has_reset)
14240 return true;
14241
14242 return false;
14243 }
14244
14245 /* netdev instance lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)14246 static void bnxt_force_fw_reset(struct bnxt *bp)
14247 {
14248 struct bnxt_fw_health *fw_health = bp->fw_health;
14249 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14250 u32 wait_dsecs;
14251
14252 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14253 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14254 return;
14255
14256 /* we have to serialize with bnxt_refclk_read()*/
14257 if (ptp) {
14258 unsigned long flags;
14259
14260 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14261 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14262 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14263 } else {
14264 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14265 }
14266 bnxt_fw_reset_close(bp);
14267 wait_dsecs = fw_health->master_func_wait_dsecs;
14268 if (fw_health->primary) {
14269 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14270 wait_dsecs = 0;
14271 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14272 } else {
14273 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14274 wait_dsecs = fw_health->normal_func_wait_dsecs;
14275 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14276 }
14277
14278 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14279 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14280 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14281 }
14282
bnxt_fw_exception(struct bnxt * bp)14283 void bnxt_fw_exception(struct bnxt *bp)
14284 {
14285 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14286 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14287 bnxt_ulp_stop(bp);
14288 bnxt_lock_sp(bp);
14289 bnxt_force_fw_reset(bp);
14290 bnxt_unlock_sp(bp);
14291 }
14292
14293 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14294 * < 0 on error.
14295 */
bnxt_get_registered_vfs(struct bnxt * bp)14296 static int bnxt_get_registered_vfs(struct bnxt *bp)
14297 {
14298 #ifdef CONFIG_BNXT_SRIOV
14299 int rc;
14300
14301 if (!BNXT_PF(bp))
14302 return 0;
14303
14304 rc = bnxt_hwrm_func_qcfg(bp);
14305 if (rc) {
14306 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14307 return rc;
14308 }
14309 if (bp->pf.registered_vfs)
14310 return bp->pf.registered_vfs;
14311 if (bp->sriov_cfg)
14312 return 1;
14313 #endif
14314 return 0;
14315 }
14316
bnxt_fw_reset(struct bnxt * bp)14317 void bnxt_fw_reset(struct bnxt *bp)
14318 {
14319 bnxt_ulp_stop(bp);
14320 bnxt_lock_sp(bp);
14321 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14322 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14323 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14324 int n = 0, tmo;
14325
14326 /* we have to serialize with bnxt_refclk_read()*/
14327 if (ptp) {
14328 unsigned long flags;
14329
14330 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14331 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14332 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14333 } else {
14334 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14335 }
14336 if (bp->pf.active_vfs &&
14337 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14338 n = bnxt_get_registered_vfs(bp);
14339 if (n < 0) {
14340 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14341 n);
14342 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14343 netif_close(bp->dev);
14344 goto fw_reset_exit;
14345 } else if (n > 0) {
14346 u16 vf_tmo_dsecs = n * 10;
14347
14348 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14349 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14350 bp->fw_reset_state =
14351 BNXT_FW_RESET_STATE_POLL_VF;
14352 bnxt_queue_fw_reset_work(bp, HZ / 10);
14353 goto fw_reset_exit;
14354 }
14355 bnxt_fw_reset_close(bp);
14356 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14357 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14358 tmo = HZ / 10;
14359 } else {
14360 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14361 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14362 }
14363 bnxt_queue_fw_reset_work(bp, tmo);
14364 }
14365 fw_reset_exit:
14366 bnxt_unlock_sp(bp);
14367 }
14368
bnxt_chk_missed_irq(struct bnxt * bp)14369 static void bnxt_chk_missed_irq(struct bnxt *bp)
14370 {
14371 int i;
14372
14373 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14374 return;
14375
14376 for (i = 0; i < bp->cp_nr_rings; i++) {
14377 struct bnxt_napi *bnapi = bp->bnapi[i];
14378 struct bnxt_cp_ring_info *cpr;
14379 u32 fw_ring_id;
14380 int j;
14381
14382 if (!bnapi)
14383 continue;
14384
14385 cpr = &bnapi->cp_ring;
14386 for (j = 0; j < cpr->cp_ring_count; j++) {
14387 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14388 u32 val[2];
14389
14390 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14391 continue;
14392
14393 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14394 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14395 continue;
14396 }
14397 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14398 bnxt_dbg_hwrm_ring_info_get(bp,
14399 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14400 fw_ring_id, &val[0], &val[1]);
14401 cpr->sw_stats->cmn.missed_irqs++;
14402 }
14403 }
14404 }
14405
14406 static void bnxt_cfg_ntp_filters(struct bnxt *);
14407
bnxt_init_ethtool_link_settings(struct bnxt * bp)14408 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14409 {
14410 struct bnxt_link_info *link_info = &bp->link_info;
14411
14412 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14413 link_info->autoneg = BNXT_AUTONEG_SPEED;
14414 if (bp->hwrm_spec_code >= 0x10201) {
14415 if (link_info->auto_pause_setting &
14416 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14417 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14418 } else {
14419 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14420 }
14421 bnxt_set_auto_speed(link_info);
14422 } else {
14423 bnxt_set_force_speed(link_info);
14424 link_info->req_duplex = link_info->duplex_setting;
14425 }
14426 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14427 link_info->req_flow_ctrl =
14428 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14429 else
14430 link_info->req_flow_ctrl = link_info->force_pause_setting;
14431 }
14432
bnxt_fw_echo_reply(struct bnxt * bp)14433 static void bnxt_fw_echo_reply(struct bnxt *bp)
14434 {
14435 struct bnxt_fw_health *fw_health = bp->fw_health;
14436 struct hwrm_func_echo_response_input *req;
14437 int rc;
14438
14439 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14440 if (rc)
14441 return;
14442 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14443 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14444 hwrm_req_send(bp, req);
14445 }
14446
bnxt_ulp_restart(struct bnxt * bp)14447 static void bnxt_ulp_restart(struct bnxt *bp)
14448 {
14449 bnxt_ulp_stop(bp);
14450 bnxt_ulp_start(bp, 0);
14451 }
14452
bnxt_sp_task(struct work_struct * work)14453 static void bnxt_sp_task(struct work_struct *work)
14454 {
14455 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14456
14457 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14458 smp_mb__after_atomic();
14459 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14460 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14461 return;
14462 }
14463
14464 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14465 bnxt_ulp_restart(bp);
14466 bnxt_reenable_sriov(bp);
14467 }
14468
14469 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14470 bnxt_cfg_rx_mode(bp);
14471
14472 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14473 bnxt_cfg_ntp_filters(bp);
14474 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14475 bnxt_hwrm_exec_fwd_req(bp);
14476 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14477 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14478 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14479 bnxt_hwrm_port_qstats(bp, 0);
14480 bnxt_hwrm_port_qstats_ext(bp, 0);
14481 bnxt_accumulate_all_stats(bp);
14482 }
14483
14484 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14485 int rc;
14486
14487 mutex_lock(&bp->link_lock);
14488 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14489 &bp->sp_event))
14490 bnxt_hwrm_phy_qcaps(bp);
14491
14492 rc = bnxt_update_link(bp, true);
14493 if (rc)
14494 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14495 rc);
14496
14497 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14498 &bp->sp_event))
14499 bnxt_init_ethtool_link_settings(bp);
14500 mutex_unlock(&bp->link_lock);
14501 }
14502 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14503 int rc;
14504
14505 mutex_lock(&bp->link_lock);
14506 rc = bnxt_update_phy_setting(bp);
14507 mutex_unlock(&bp->link_lock);
14508 if (rc) {
14509 netdev_warn(bp->dev, "update phy settings retry failed\n");
14510 } else {
14511 bp->link_info.phy_retry = false;
14512 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14513 }
14514 }
14515 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14516 mutex_lock(&bp->link_lock);
14517 bnxt_get_port_module_status(bp);
14518 mutex_unlock(&bp->link_lock);
14519 }
14520
14521 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14522 bnxt_tc_flow_stats_work(bp);
14523
14524 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14525 bnxt_chk_missed_irq(bp);
14526
14527 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14528 bnxt_fw_echo_reply(bp);
14529
14530 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14531 bnxt_hwmon_notify_event(bp);
14532
14533 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14534 * must be the last functions to be called before exiting.
14535 */
14536 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14537 bnxt_reset(bp, false);
14538
14539 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14540 bnxt_reset(bp, true);
14541
14542 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14543 bnxt_rx_ring_reset(bp);
14544
14545 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14546 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14547 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14548 bnxt_devlink_health_fw_report(bp);
14549 else
14550 bnxt_fw_reset(bp);
14551 }
14552
14553 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14554 if (!is_bnxt_fw_ok(bp))
14555 bnxt_devlink_health_fw_report(bp);
14556 }
14557
14558 smp_mb__before_atomic();
14559 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14560 }
14561
14562 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14563 int *max_cp);
14564
14565 /* Under netdev instance lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14566 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14567 int tx_xdp)
14568 {
14569 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14570 struct bnxt_hw_rings hwr = {0};
14571 int rx_rings = rx;
14572 int rc;
14573
14574 if (tcs)
14575 tx_sets = tcs;
14576
14577 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14578
14579 if (max_rx < rx_rings)
14580 return -ENOMEM;
14581
14582 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14583 rx_rings <<= 1;
14584
14585 hwr.rx = rx_rings;
14586 hwr.tx = tx * tx_sets + tx_xdp;
14587 if (max_tx < hwr.tx)
14588 return -ENOMEM;
14589
14590 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14591
14592 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14593 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14594 if (max_cp < hwr.cp)
14595 return -ENOMEM;
14596 hwr.stat = hwr.cp;
14597 if (BNXT_NEW_RM(bp)) {
14598 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14599 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14600 hwr.grp = rx;
14601 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14602 }
14603 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14604 hwr.cp_p5 = hwr.tx + rx;
14605 rc = bnxt_hwrm_check_rings(bp, &hwr);
14606 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14607 if (!bnxt_ulp_registered(bp->edev)) {
14608 hwr.cp += bnxt_get_ulp_msix_num(bp);
14609 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14610 }
14611 if (hwr.cp > bp->total_irqs) {
14612 int total_msix = bnxt_change_msix(bp, hwr.cp);
14613
14614 if (total_msix < hwr.cp) {
14615 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14616 hwr.cp, total_msix);
14617 rc = -ENOSPC;
14618 }
14619 }
14620 }
14621 return rc;
14622 }
14623
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14624 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14625 {
14626 if (bp->bar2) {
14627 pci_iounmap(pdev, bp->bar2);
14628 bp->bar2 = NULL;
14629 }
14630
14631 if (bp->bar1) {
14632 pci_iounmap(pdev, bp->bar1);
14633 bp->bar1 = NULL;
14634 }
14635
14636 if (bp->bar0) {
14637 pci_iounmap(pdev, bp->bar0);
14638 bp->bar0 = NULL;
14639 }
14640 }
14641
bnxt_cleanup_pci(struct bnxt * bp)14642 static void bnxt_cleanup_pci(struct bnxt *bp)
14643 {
14644 bnxt_unmap_bars(bp, bp->pdev);
14645 pci_release_regions(bp->pdev);
14646 if (pci_is_enabled(bp->pdev))
14647 pci_disable_device(bp->pdev);
14648 }
14649
bnxt_init_dflt_coal(struct bnxt * bp)14650 static void bnxt_init_dflt_coal(struct bnxt *bp)
14651 {
14652 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14653 struct bnxt_coal *coal;
14654 u16 flags = 0;
14655
14656 if (coal_cap->cmpl_params &
14657 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14658 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14659
14660 /* Tick values in micro seconds.
14661 * 1 coal_buf x bufs_per_record = 1 completion record.
14662 */
14663 coal = &bp->rx_coal;
14664 coal->coal_ticks = 10;
14665 coal->coal_bufs = 30;
14666 coal->coal_ticks_irq = 1;
14667 coal->coal_bufs_irq = 2;
14668 coal->idle_thresh = 50;
14669 coal->bufs_per_record = 2;
14670 coal->budget = 64; /* NAPI budget */
14671 coal->flags = flags;
14672
14673 coal = &bp->tx_coal;
14674 coal->coal_ticks = 28;
14675 coal->coal_bufs = 30;
14676 coal->coal_ticks_irq = 2;
14677 coal->coal_bufs_irq = 2;
14678 coal->bufs_per_record = 1;
14679 coal->flags = flags;
14680
14681 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14682 }
14683
14684 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14685 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14686 {
14687 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14688
14689 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14690 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14691 return true;
14692 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14693 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14694 return true;
14695 return false;
14696 }
14697
bnxt_fw_init_one_p1(struct bnxt * bp)14698 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14699 {
14700 int rc;
14701
14702 bp->fw_cap = 0;
14703 rc = bnxt_hwrm_ver_get(bp);
14704 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14705 * so wait before continuing with recovery.
14706 */
14707 if (rc)
14708 msleep(100);
14709 bnxt_try_map_fw_health_reg(bp);
14710 if (rc) {
14711 rc = bnxt_try_recover_fw(bp);
14712 if (rc)
14713 return rc;
14714 rc = bnxt_hwrm_ver_get(bp);
14715 if (rc)
14716 return rc;
14717 }
14718
14719 bnxt_nvm_cfg_ver_get(bp);
14720
14721 rc = bnxt_hwrm_func_reset(bp);
14722 if (rc)
14723 return -ENODEV;
14724
14725 bnxt_hwrm_fw_set_time(bp);
14726 return 0;
14727 }
14728
bnxt_fw_init_one_p2(struct bnxt * bp)14729 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14730 {
14731 int rc;
14732
14733 /* Get the MAX capabilities for this function */
14734 rc = bnxt_hwrm_func_qcaps(bp);
14735 if (rc) {
14736 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14737 rc);
14738 return -ENODEV;
14739 }
14740
14741 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14742 if (rc)
14743 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14744 rc);
14745
14746 if (bnxt_alloc_fw_health(bp)) {
14747 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14748 } else {
14749 rc = bnxt_hwrm_error_recovery_qcfg(bp);
14750 if (rc)
14751 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14752 rc);
14753 }
14754
14755 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14756 if (rc)
14757 return -ENODEV;
14758
14759 rc = bnxt_alloc_crash_dump_mem(bp);
14760 if (rc)
14761 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14762 rc);
14763 if (!rc) {
14764 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14765 if (rc) {
14766 bnxt_free_crash_dump_mem(bp);
14767 netdev_warn(bp->dev,
14768 "hwrm crash dump mem failure rc: %d\n", rc);
14769 }
14770 }
14771
14772 if (bnxt_fw_pre_resv_vnics(bp))
14773 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14774
14775 bnxt_hwrm_func_qcfg(bp);
14776 bnxt_hwrm_vnic_qcaps(bp);
14777 bnxt_hwrm_port_led_qcaps(bp);
14778 bnxt_ethtool_init(bp);
14779 if (bp->fw_cap & BNXT_FW_CAP_PTP)
14780 __bnxt_hwrm_ptp_qcfg(bp);
14781 bnxt_dcb_init(bp);
14782 bnxt_hwmon_init(bp);
14783 return 0;
14784 }
14785
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)14786 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14787 {
14788 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14789 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14790 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14791 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14792 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14793 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14794 bp->rss_hash_delta = bp->rss_hash_cfg;
14795 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14796 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14797 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14798 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14799 }
14800 }
14801
bnxt_set_dflt_rfs(struct bnxt * bp)14802 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14803 {
14804 struct net_device *dev = bp->dev;
14805
14806 dev->hw_features &= ~NETIF_F_NTUPLE;
14807 dev->features &= ~NETIF_F_NTUPLE;
14808 bp->flags &= ~BNXT_FLAG_RFS;
14809 if (bnxt_rfs_supported(bp)) {
14810 dev->hw_features |= NETIF_F_NTUPLE;
14811 if (bnxt_rfs_capable(bp, false)) {
14812 bp->flags |= BNXT_FLAG_RFS;
14813 dev->features |= NETIF_F_NTUPLE;
14814 }
14815 }
14816 }
14817
bnxt_fw_init_one_p3(struct bnxt * bp)14818 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14819 {
14820 struct pci_dev *pdev = bp->pdev;
14821
14822 bnxt_set_dflt_rss_hash_type(bp);
14823 bnxt_set_dflt_rfs(bp);
14824
14825 bnxt_get_wol_settings(bp);
14826 if (bp->flags & BNXT_FLAG_WOL_CAP)
14827 device_set_wakeup_enable(&pdev->dev, bp->wol);
14828 else
14829 device_set_wakeup_capable(&pdev->dev, false);
14830
14831 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14832 bnxt_hwrm_coal_params_qcaps(bp);
14833 }
14834
14835 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14836
bnxt_fw_init_one(struct bnxt * bp)14837 int bnxt_fw_init_one(struct bnxt *bp)
14838 {
14839 int rc;
14840
14841 rc = bnxt_fw_init_one_p1(bp);
14842 if (rc) {
14843 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
14844 return rc;
14845 }
14846 rc = bnxt_fw_init_one_p2(bp);
14847 if (rc) {
14848 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
14849 return rc;
14850 }
14851 rc = bnxt_probe_phy(bp, false);
14852 if (rc)
14853 return rc;
14854 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
14855 if (rc)
14856 return rc;
14857
14858 bnxt_fw_init_one_p3(bp);
14859 return 0;
14860 }
14861
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)14862 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
14863 {
14864 struct bnxt_fw_health *fw_health = bp->fw_health;
14865 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
14866 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
14867 u32 reg_type, reg_off, delay_msecs;
14868
14869 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
14870 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
14871 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
14872 switch (reg_type) {
14873 case BNXT_FW_HEALTH_REG_TYPE_CFG:
14874 pci_write_config_dword(bp->pdev, reg_off, val);
14875 break;
14876 case BNXT_FW_HEALTH_REG_TYPE_GRC:
14877 writel(reg_off & BNXT_GRC_BASE_MASK,
14878 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
14879 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
14880 fallthrough;
14881 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
14882 writel(val, bp->bar0 + reg_off);
14883 break;
14884 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
14885 writel(val, bp->bar1 + reg_off);
14886 break;
14887 }
14888 if (delay_msecs) {
14889 pci_read_config_dword(bp->pdev, 0, &val);
14890 msleep(delay_msecs);
14891 }
14892 }
14893
bnxt_hwrm_reset_permitted(struct bnxt * bp)14894 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
14895 {
14896 struct hwrm_func_qcfg_output *resp;
14897 struct hwrm_func_qcfg_input *req;
14898 bool result = true; /* firmware will enforce if unknown */
14899
14900 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
14901 return result;
14902
14903 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
14904 return result;
14905
14906 req->fid = cpu_to_le16(0xffff);
14907 resp = hwrm_req_hold(bp, req);
14908 if (!hwrm_req_send(bp, req))
14909 result = !!(le16_to_cpu(resp->flags) &
14910 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
14911 hwrm_req_drop(bp, req);
14912 return result;
14913 }
14914
bnxt_reset_all(struct bnxt * bp)14915 static void bnxt_reset_all(struct bnxt *bp)
14916 {
14917 struct bnxt_fw_health *fw_health = bp->fw_health;
14918 int i, rc;
14919
14920 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14921 bnxt_fw_reset_via_optee(bp);
14922 bp->fw_reset_timestamp = jiffies;
14923 return;
14924 }
14925
14926 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
14927 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
14928 bnxt_fw_reset_writel(bp, i);
14929 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
14930 struct hwrm_fw_reset_input *req;
14931
14932 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
14933 if (!rc) {
14934 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
14935 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
14936 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
14937 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
14938 rc = hwrm_req_send(bp, req);
14939 }
14940 if (rc != -ENODEV)
14941 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
14942 }
14943 bp->fw_reset_timestamp = jiffies;
14944 }
14945
bnxt_fw_reset_timeout(struct bnxt * bp)14946 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
14947 {
14948 return time_after(jiffies, bp->fw_reset_timestamp +
14949 (bp->fw_reset_max_dsecs * HZ / 10));
14950 }
14951
bnxt_fw_reset_abort(struct bnxt * bp,int rc)14952 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
14953 {
14954 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14955 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
14956 bnxt_dl_health_fw_status_update(bp, false);
14957 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
14958 netif_close(bp->dev);
14959 }
14960
bnxt_fw_reset_task(struct work_struct * work)14961 static void bnxt_fw_reset_task(struct work_struct *work)
14962 {
14963 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
14964 int rc = 0;
14965
14966 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14967 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
14968 return;
14969 }
14970
14971 switch (bp->fw_reset_state) {
14972 case BNXT_FW_RESET_STATE_POLL_VF: {
14973 int n = bnxt_get_registered_vfs(bp);
14974 int tmo;
14975
14976 if (n < 0) {
14977 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
14978 n, jiffies_to_msecs(jiffies -
14979 bp->fw_reset_timestamp));
14980 goto fw_reset_abort;
14981 } else if (n > 0) {
14982 if (bnxt_fw_reset_timeout(bp)) {
14983 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14984 bp->fw_reset_state = 0;
14985 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
14986 n);
14987 goto ulp_start;
14988 }
14989 bnxt_queue_fw_reset_work(bp, HZ / 10);
14990 return;
14991 }
14992 bp->fw_reset_timestamp = jiffies;
14993 netdev_lock(bp->dev);
14994 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
14995 bnxt_fw_reset_abort(bp, rc);
14996 netdev_unlock(bp->dev);
14997 goto ulp_start;
14998 }
14999 bnxt_fw_reset_close(bp);
15000 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15001 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15002 tmo = HZ / 10;
15003 } else {
15004 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15005 tmo = bp->fw_reset_min_dsecs * HZ / 10;
15006 }
15007 netdev_unlock(bp->dev);
15008 bnxt_queue_fw_reset_work(bp, tmo);
15009 return;
15010 }
15011 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15012 u32 val;
15013
15014 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15015 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15016 !bnxt_fw_reset_timeout(bp)) {
15017 bnxt_queue_fw_reset_work(bp, HZ / 5);
15018 return;
15019 }
15020
15021 if (!bp->fw_health->primary) {
15022 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15023
15024 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15025 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15026 return;
15027 }
15028 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15029 }
15030 fallthrough;
15031 case BNXT_FW_RESET_STATE_RESET_FW:
15032 bnxt_reset_all(bp);
15033 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15034 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15035 return;
15036 case BNXT_FW_RESET_STATE_ENABLE_DEV:
15037 bnxt_inv_fw_health_reg(bp);
15038 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15039 !bp->fw_reset_min_dsecs) {
15040 u16 val;
15041
15042 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15043 if (val == 0xffff) {
15044 if (bnxt_fw_reset_timeout(bp)) {
15045 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15046 rc = -ETIMEDOUT;
15047 goto fw_reset_abort;
15048 }
15049 bnxt_queue_fw_reset_work(bp, HZ / 1000);
15050 return;
15051 }
15052 }
15053 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15054 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15055 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15056 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15057 bnxt_dl_remote_reload(bp);
15058 if (pci_enable_device(bp->pdev)) {
15059 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15060 rc = -ENODEV;
15061 goto fw_reset_abort;
15062 }
15063 pci_set_master(bp->pdev);
15064 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15065 fallthrough;
15066 case BNXT_FW_RESET_STATE_POLL_FW:
15067 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15068 rc = bnxt_hwrm_poll(bp);
15069 if (rc) {
15070 if (bnxt_fw_reset_timeout(bp)) {
15071 netdev_err(bp->dev, "Firmware reset aborted\n");
15072 goto fw_reset_abort_status;
15073 }
15074 bnxt_queue_fw_reset_work(bp, HZ / 5);
15075 return;
15076 }
15077 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15078 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15079 fallthrough;
15080 case BNXT_FW_RESET_STATE_OPENING:
15081 while (!netdev_trylock(bp->dev)) {
15082 bnxt_queue_fw_reset_work(bp, HZ / 10);
15083 return;
15084 }
15085 rc = bnxt_open(bp->dev);
15086 if (rc) {
15087 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15088 bnxt_fw_reset_abort(bp, rc);
15089 netdev_unlock(bp->dev);
15090 goto ulp_start;
15091 }
15092
15093 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15094 bp->fw_health->enabled) {
15095 bp->fw_health->last_fw_reset_cnt =
15096 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15097 }
15098 bp->fw_reset_state = 0;
15099 /* Make sure fw_reset_state is 0 before clearing the flag */
15100 smp_mb__before_atomic();
15101 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15102 bnxt_ptp_reapply_pps(bp);
15103 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15104 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15105 bnxt_dl_health_fw_recovery_done(bp);
15106 bnxt_dl_health_fw_status_update(bp, true);
15107 }
15108 netdev_unlock(bp->dev);
15109 bnxt_ulp_start(bp, 0);
15110 bnxt_reenable_sriov(bp);
15111 netdev_lock(bp->dev);
15112 bnxt_vf_reps_alloc(bp);
15113 bnxt_vf_reps_open(bp);
15114 netdev_unlock(bp->dev);
15115 break;
15116 }
15117 return;
15118
15119 fw_reset_abort_status:
15120 if (bp->fw_health->status_reliable ||
15121 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15122 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15123
15124 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15125 }
15126 fw_reset_abort:
15127 netdev_lock(bp->dev);
15128 bnxt_fw_reset_abort(bp, rc);
15129 netdev_unlock(bp->dev);
15130 ulp_start:
15131 bnxt_ulp_start(bp, rc);
15132 }
15133
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)15134 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15135 {
15136 int rc;
15137 struct bnxt *bp = netdev_priv(dev);
15138
15139 SET_NETDEV_DEV(dev, &pdev->dev);
15140
15141 /* enable device (incl. PCI PM wakeup), and bus-mastering */
15142 rc = pci_enable_device(pdev);
15143 if (rc) {
15144 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15145 goto init_err;
15146 }
15147
15148 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15149 dev_err(&pdev->dev,
15150 "Cannot find PCI device base address, aborting\n");
15151 rc = -ENODEV;
15152 goto init_err_disable;
15153 }
15154
15155 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15156 if (rc) {
15157 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15158 goto init_err_disable;
15159 }
15160
15161 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15162 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15163 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15164 rc = -EIO;
15165 goto init_err_release;
15166 }
15167
15168 pci_set_master(pdev);
15169
15170 bp->dev = dev;
15171 bp->pdev = pdev;
15172
15173 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15174 * determines the BAR size.
15175 */
15176 bp->bar0 = pci_ioremap_bar(pdev, 0);
15177 if (!bp->bar0) {
15178 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15179 rc = -ENOMEM;
15180 goto init_err_release;
15181 }
15182
15183 bp->bar2 = pci_ioremap_bar(pdev, 4);
15184 if (!bp->bar2) {
15185 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15186 rc = -ENOMEM;
15187 goto init_err_release;
15188 }
15189
15190 INIT_WORK(&bp->sp_task, bnxt_sp_task);
15191 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15192
15193 spin_lock_init(&bp->ntp_fltr_lock);
15194 #if BITS_PER_LONG == 32
15195 spin_lock_init(&bp->db_lock);
15196 #endif
15197
15198 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15199 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15200
15201 timer_setup(&bp->timer, bnxt_timer, 0);
15202 bp->current_interval = BNXT_TIMER_INTERVAL;
15203
15204 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15205 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15206
15207 clear_bit(BNXT_STATE_OPEN, &bp->state);
15208 return 0;
15209
15210 init_err_release:
15211 bnxt_unmap_bars(bp, pdev);
15212 pci_release_regions(pdev);
15213
15214 init_err_disable:
15215 pci_disable_device(pdev);
15216
15217 init_err:
15218 return rc;
15219 }
15220
bnxt_change_mac_addr(struct net_device * dev,void * p)15221 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15222 {
15223 struct sockaddr *addr = p;
15224 struct bnxt *bp = netdev_priv(dev);
15225 int rc = 0;
15226
15227 netdev_assert_locked(dev);
15228
15229 if (!is_valid_ether_addr(addr->sa_data))
15230 return -EADDRNOTAVAIL;
15231
15232 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15233 return 0;
15234
15235 rc = bnxt_approve_mac(bp, addr->sa_data, true);
15236 if (rc)
15237 return rc;
15238
15239 eth_hw_addr_set(dev, addr->sa_data);
15240 bnxt_clear_usr_fltrs(bp, true);
15241 if (netif_running(dev)) {
15242 bnxt_close_nic(bp, false, false);
15243 rc = bnxt_open_nic(bp, false, false);
15244 }
15245
15246 return rc;
15247 }
15248
bnxt_change_mtu(struct net_device * dev,int new_mtu)15249 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15250 {
15251 struct bnxt *bp = netdev_priv(dev);
15252
15253 netdev_assert_locked(dev);
15254
15255 if (netif_running(dev))
15256 bnxt_close_nic(bp, true, false);
15257
15258 WRITE_ONCE(dev->mtu, new_mtu);
15259
15260 /* MTU change may change the AGG ring settings if an XDP multi-buffer
15261 * program is attached. We need to set the AGG rings settings and
15262 * rx_skb_func accordingly.
15263 */
15264 if (READ_ONCE(bp->xdp_prog))
15265 bnxt_set_rx_skb_mode(bp, true);
15266
15267 bnxt_set_ring_params(bp);
15268
15269 if (netif_running(dev))
15270 return bnxt_open_nic(bp, true, false);
15271
15272 return 0;
15273 }
15274
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)15275 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15276 {
15277 struct bnxt *bp = netdev_priv(dev);
15278 bool sh = false;
15279 int rc, tx_cp;
15280
15281 if (tc > bp->max_tc) {
15282 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15283 tc, bp->max_tc);
15284 return -EINVAL;
15285 }
15286
15287 if (bp->num_tc == tc)
15288 return 0;
15289
15290 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15291 sh = true;
15292
15293 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15294 sh, tc, bp->tx_nr_rings_xdp);
15295 if (rc)
15296 return rc;
15297
15298 /* Needs to close the device and do hw resource re-allocations */
15299 if (netif_running(bp->dev))
15300 bnxt_close_nic(bp, true, false);
15301
15302 if (tc) {
15303 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15304 netdev_set_num_tc(dev, tc);
15305 bp->num_tc = tc;
15306 } else {
15307 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15308 netdev_reset_tc(dev);
15309 bp->num_tc = 0;
15310 }
15311 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15312 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15313 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15314 tx_cp + bp->rx_nr_rings;
15315
15316 if (netif_running(bp->dev))
15317 return bnxt_open_nic(bp, true, false);
15318
15319 return 0;
15320 }
15321
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)15322 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15323 void *cb_priv)
15324 {
15325 struct bnxt *bp = cb_priv;
15326
15327 if (!bnxt_tc_flower_enabled(bp) ||
15328 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15329 return -EOPNOTSUPP;
15330
15331 switch (type) {
15332 case TC_SETUP_CLSFLOWER:
15333 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15334 default:
15335 return -EOPNOTSUPP;
15336 }
15337 }
15338
15339 LIST_HEAD(bnxt_block_cb_list);
15340
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)15341 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15342 void *type_data)
15343 {
15344 struct bnxt *bp = netdev_priv(dev);
15345
15346 switch (type) {
15347 case TC_SETUP_BLOCK:
15348 return flow_block_cb_setup_simple(type_data,
15349 &bnxt_block_cb_list,
15350 bnxt_setup_tc_block_cb,
15351 bp, bp, true);
15352 case TC_SETUP_QDISC_MQPRIO: {
15353 struct tc_mqprio_qopt *mqprio = type_data;
15354
15355 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15356
15357 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15358 }
15359 default:
15360 return -EOPNOTSUPP;
15361 }
15362 }
15363
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15364 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15365 const struct sk_buff *skb)
15366 {
15367 struct bnxt_vnic_info *vnic;
15368
15369 if (skb)
15370 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15371
15372 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15373 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15374 }
15375
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15376 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15377 u32 idx)
15378 {
15379 struct hlist_head *head;
15380 int bit_id;
15381
15382 spin_lock_bh(&bp->ntp_fltr_lock);
15383 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15384 if (bit_id < 0) {
15385 spin_unlock_bh(&bp->ntp_fltr_lock);
15386 return -ENOMEM;
15387 }
15388
15389 fltr->base.sw_id = (u16)bit_id;
15390 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15391 fltr->base.flags |= BNXT_ACT_RING_DST;
15392 head = &bp->ntp_fltr_hash_tbl[idx];
15393 hlist_add_head_rcu(&fltr->base.hash, head);
15394 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15395 bnxt_insert_usr_fltr(bp, &fltr->base);
15396 bp->ntp_fltr_count++;
15397 spin_unlock_bh(&bp->ntp_fltr_lock);
15398 return 0;
15399 }
15400
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15401 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15402 struct bnxt_ntuple_filter *f2)
15403 {
15404 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15405 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15406 struct flow_keys *keys1 = &f1->fkeys;
15407 struct flow_keys *keys2 = &f2->fkeys;
15408
15409 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15410 keys1->basic.ip_proto != keys2->basic.ip_proto)
15411 return false;
15412
15413 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15414 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15415 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15416 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15417 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15418 return false;
15419 } else {
15420 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15421 &keys2->addrs.v6addrs.src) ||
15422 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15423 &masks2->addrs.v6addrs.src) ||
15424 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15425 &keys2->addrs.v6addrs.dst) ||
15426 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15427 &masks2->addrs.v6addrs.dst))
15428 return false;
15429 }
15430
15431 return keys1->ports.src == keys2->ports.src &&
15432 masks1->ports.src == masks2->ports.src &&
15433 keys1->ports.dst == keys2->ports.dst &&
15434 masks1->ports.dst == masks2->ports.dst &&
15435 keys1->control.flags == keys2->control.flags &&
15436 f1->l2_fltr == f2->l2_fltr;
15437 }
15438
15439 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15440 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15441 struct bnxt_ntuple_filter *fltr, u32 idx)
15442 {
15443 struct bnxt_ntuple_filter *f;
15444 struct hlist_head *head;
15445
15446 head = &bp->ntp_fltr_hash_tbl[idx];
15447 hlist_for_each_entry_rcu(f, head, base.hash) {
15448 if (bnxt_fltr_match(f, fltr))
15449 return f;
15450 }
15451 return NULL;
15452 }
15453
15454 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15455 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15456 u16 rxq_index, u32 flow_id)
15457 {
15458 struct bnxt *bp = netdev_priv(dev);
15459 struct bnxt_ntuple_filter *fltr, *new_fltr;
15460 struct flow_keys *fkeys;
15461 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15462 struct bnxt_l2_filter *l2_fltr;
15463 int rc = 0, idx;
15464 u32 flags;
15465
15466 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15467 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15468 atomic_inc(&l2_fltr->refcnt);
15469 } else {
15470 struct bnxt_l2_key key;
15471
15472 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15473 key.vlan = 0;
15474 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15475 if (!l2_fltr)
15476 return -EINVAL;
15477 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15478 bnxt_del_l2_filter(bp, l2_fltr);
15479 return -EINVAL;
15480 }
15481 }
15482 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
15483 if (!new_fltr) {
15484 bnxt_del_l2_filter(bp, l2_fltr);
15485 return -ENOMEM;
15486 }
15487
15488 fkeys = &new_fltr->fkeys;
15489 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15490 rc = -EPROTONOSUPPORT;
15491 goto err_free;
15492 }
15493
15494 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15495 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15496 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15497 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15498 rc = -EPROTONOSUPPORT;
15499 goto err_free;
15500 }
15501 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15502 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15503 if (bp->hwrm_spec_code < 0x10601) {
15504 rc = -EPROTONOSUPPORT;
15505 goto err_free;
15506 }
15507 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15508 }
15509 flags = fkeys->control.flags;
15510 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15511 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15512 rc = -EPROTONOSUPPORT;
15513 goto err_free;
15514 }
15515 new_fltr->l2_fltr = l2_fltr;
15516
15517 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15518 rcu_read_lock();
15519 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15520 if (fltr) {
15521 rc = fltr->base.sw_id;
15522 rcu_read_unlock();
15523 goto err_free;
15524 }
15525 rcu_read_unlock();
15526
15527 new_fltr->flow_id = flow_id;
15528 new_fltr->base.rxq = rxq_index;
15529 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15530 if (!rc) {
15531 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15532 return new_fltr->base.sw_id;
15533 }
15534
15535 err_free:
15536 bnxt_del_l2_filter(bp, l2_fltr);
15537 kfree(new_fltr);
15538 return rc;
15539 }
15540 #endif
15541
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15542 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15543 {
15544 spin_lock_bh(&bp->ntp_fltr_lock);
15545 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15546 spin_unlock_bh(&bp->ntp_fltr_lock);
15547 return;
15548 }
15549 hlist_del_rcu(&fltr->base.hash);
15550 bnxt_del_one_usr_fltr(bp, &fltr->base);
15551 bp->ntp_fltr_count--;
15552 spin_unlock_bh(&bp->ntp_fltr_lock);
15553 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15554 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15555 kfree_rcu(fltr, base.rcu);
15556 }
15557
bnxt_cfg_ntp_filters(struct bnxt * bp)15558 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15559 {
15560 #ifdef CONFIG_RFS_ACCEL
15561 int i;
15562
15563 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15564 struct hlist_head *head;
15565 struct hlist_node *tmp;
15566 struct bnxt_ntuple_filter *fltr;
15567 int rc;
15568
15569 head = &bp->ntp_fltr_hash_tbl[i];
15570 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15571 bool del = false;
15572
15573 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15574 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15575 continue;
15576 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15577 fltr->flow_id,
15578 fltr->base.sw_id)) {
15579 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15580 fltr);
15581 del = true;
15582 }
15583 } else {
15584 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15585 fltr);
15586 if (rc)
15587 del = true;
15588 else
15589 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15590 }
15591
15592 if (del)
15593 bnxt_del_ntp_filter(bp, fltr);
15594 }
15595 }
15596 #endif
15597 }
15598
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15599 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15600 unsigned int entry, struct udp_tunnel_info *ti)
15601 {
15602 struct bnxt *bp = netdev_priv(netdev);
15603 unsigned int cmd;
15604
15605 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15606 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15607 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15608 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15609 else
15610 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15611
15612 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15613 }
15614
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15615 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15616 unsigned int entry, struct udp_tunnel_info *ti)
15617 {
15618 struct bnxt *bp = netdev_priv(netdev);
15619 unsigned int cmd;
15620
15621 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15622 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15623 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15624 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15625 else
15626 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15627
15628 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15629 }
15630
15631 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15632 .set_port = bnxt_udp_tunnel_set_port,
15633 .unset_port = bnxt_udp_tunnel_unset_port,
15634 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15635 .tables = {
15636 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15637 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15638 },
15639 }, bnxt_udp_tunnels_p7 = {
15640 .set_port = bnxt_udp_tunnel_set_port,
15641 .unset_port = bnxt_udp_tunnel_unset_port,
15642 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15643 .tables = {
15644 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15645 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15646 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15647 },
15648 };
15649
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15650 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15651 struct net_device *dev, u32 filter_mask,
15652 int nlflags)
15653 {
15654 struct bnxt *bp = netdev_priv(dev);
15655
15656 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15657 nlflags, filter_mask, NULL);
15658 }
15659
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15660 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15661 u16 flags, struct netlink_ext_ack *extack)
15662 {
15663 struct bnxt *bp = netdev_priv(dev);
15664 struct nlattr *attr, *br_spec;
15665 int rem, rc = 0;
15666
15667 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15668 return -EOPNOTSUPP;
15669
15670 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15671 if (!br_spec)
15672 return -EINVAL;
15673
15674 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15675 u16 mode;
15676
15677 mode = nla_get_u16(attr);
15678 if (mode == bp->br_mode)
15679 break;
15680
15681 rc = bnxt_hwrm_set_br_mode(bp, mode);
15682 if (!rc)
15683 bp->br_mode = mode;
15684 break;
15685 }
15686 return rc;
15687 }
15688
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15689 int bnxt_get_port_parent_id(struct net_device *dev,
15690 struct netdev_phys_item_id *ppid)
15691 {
15692 struct bnxt *bp = netdev_priv(dev);
15693
15694 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15695 return -EOPNOTSUPP;
15696
15697 /* The PF and it's VF-reps only support the switchdev framework */
15698 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15699 return -EOPNOTSUPP;
15700
15701 ppid->id_len = sizeof(bp->dsn);
15702 memcpy(ppid->id, bp->dsn, ppid->id_len);
15703
15704 return 0;
15705 }
15706
15707 static const struct net_device_ops bnxt_netdev_ops = {
15708 .ndo_open = bnxt_open,
15709 .ndo_start_xmit = bnxt_start_xmit,
15710 .ndo_stop = bnxt_close,
15711 .ndo_get_stats64 = bnxt_get_stats64,
15712 .ndo_set_rx_mode = bnxt_set_rx_mode,
15713 .ndo_eth_ioctl = bnxt_ioctl,
15714 .ndo_validate_addr = eth_validate_addr,
15715 .ndo_set_mac_address = bnxt_change_mac_addr,
15716 .ndo_change_mtu = bnxt_change_mtu,
15717 .ndo_fix_features = bnxt_fix_features,
15718 .ndo_set_features = bnxt_set_features,
15719 .ndo_features_check = bnxt_features_check,
15720 .ndo_tx_timeout = bnxt_tx_timeout,
15721 #ifdef CONFIG_BNXT_SRIOV
15722 .ndo_get_vf_config = bnxt_get_vf_config,
15723 .ndo_set_vf_mac = bnxt_set_vf_mac,
15724 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
15725 .ndo_set_vf_rate = bnxt_set_vf_bw,
15726 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
15727 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
15728 .ndo_set_vf_trust = bnxt_set_vf_trust,
15729 #endif
15730 .ndo_setup_tc = bnxt_setup_tc,
15731 #ifdef CONFIG_RFS_ACCEL
15732 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
15733 #endif
15734 .ndo_bpf = bnxt_xdp,
15735 .ndo_xdp_xmit = bnxt_xdp_xmit,
15736 .ndo_bridge_getlink = bnxt_bridge_getlink,
15737 .ndo_bridge_setlink = bnxt_bridge_setlink,
15738 };
15739
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)15740 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15741 struct netdev_queue_stats_rx *stats)
15742 {
15743 struct bnxt *bp = netdev_priv(dev);
15744 struct bnxt_cp_ring_info *cpr;
15745 u64 *sw;
15746
15747 if (!bp->bnapi)
15748 return;
15749
15750 cpr = &bp->bnapi[i]->cp_ring;
15751 sw = cpr->stats.sw_stats;
15752
15753 stats->packets = 0;
15754 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15755 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15756 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15757
15758 stats->bytes = 0;
15759 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15760 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15761 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15762
15763 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15764 }
15765
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)15766 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15767 struct netdev_queue_stats_tx *stats)
15768 {
15769 struct bnxt *bp = netdev_priv(dev);
15770 struct bnxt_napi *bnapi;
15771 u64 *sw;
15772
15773 if (!bp->tx_ring)
15774 return;
15775
15776 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15777 sw = bnapi->cp_ring.stats.sw_stats;
15778
15779 stats->packets = 0;
15780 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15781 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15782 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15783
15784 stats->bytes = 0;
15785 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15786 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15787 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15788 }
15789
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)15790 static void bnxt_get_base_stats(struct net_device *dev,
15791 struct netdev_queue_stats_rx *rx,
15792 struct netdev_queue_stats_tx *tx)
15793 {
15794 struct bnxt *bp = netdev_priv(dev);
15795
15796 rx->packets = bp->net_stats_prev.rx_packets;
15797 rx->bytes = bp->net_stats_prev.rx_bytes;
15798 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15799
15800 tx->packets = bp->net_stats_prev.tx_packets;
15801 tx->bytes = bp->net_stats_prev.tx_bytes;
15802 }
15803
15804 static const struct netdev_stat_ops bnxt_stat_ops = {
15805 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
15806 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
15807 .get_base_stats = bnxt_get_base_stats,
15808 };
15809
bnxt_queue_mem_alloc(struct net_device * dev,void * qmem,int idx)15810 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
15811 {
15812 struct bnxt_rx_ring_info *rxr, *clone;
15813 struct bnxt *bp = netdev_priv(dev);
15814 struct bnxt_ring_struct *ring;
15815 int rc;
15816
15817 if (!bp->rx_ring)
15818 return -ENETDOWN;
15819
15820 rxr = &bp->rx_ring[idx];
15821 clone = qmem;
15822 memcpy(clone, rxr, sizeof(*rxr));
15823 bnxt_init_rx_ring_struct(bp, clone);
15824 bnxt_reset_rx_ring_struct(bp, clone);
15825
15826 clone->rx_prod = 0;
15827 clone->rx_agg_prod = 0;
15828 clone->rx_sw_agg_prod = 0;
15829 clone->rx_next_cons = 0;
15830 clone->need_head_pool = false;
15831
15832 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
15833 if (rc)
15834 return rc;
15835
15836 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
15837 if (rc < 0)
15838 goto err_page_pool_destroy;
15839
15840 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
15841 MEM_TYPE_PAGE_POOL,
15842 clone->page_pool);
15843 if (rc)
15844 goto err_rxq_info_unreg;
15845
15846 ring = &clone->rx_ring_struct;
15847 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15848 if (rc)
15849 goto err_free_rx_ring;
15850
15851 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
15852 ring = &clone->rx_agg_ring_struct;
15853 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15854 if (rc)
15855 goto err_free_rx_agg_ring;
15856
15857 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
15858 if (rc)
15859 goto err_free_rx_agg_ring;
15860 }
15861
15862 if (bp->flags & BNXT_FLAG_TPA) {
15863 rc = bnxt_alloc_one_tpa_info(bp, clone);
15864 if (rc)
15865 goto err_free_tpa_info;
15866 }
15867
15868 bnxt_init_one_rx_ring_rxbd(bp, clone);
15869 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
15870
15871 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
15872 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15873 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
15874 if (bp->flags & BNXT_FLAG_TPA)
15875 bnxt_alloc_one_tpa_info_data(bp, clone);
15876
15877 return 0;
15878
15879 err_free_tpa_info:
15880 bnxt_free_one_tpa_info(bp, clone);
15881 err_free_rx_agg_ring:
15882 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
15883 err_free_rx_ring:
15884 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
15885 err_rxq_info_unreg:
15886 xdp_rxq_info_unreg(&clone->xdp_rxq);
15887 err_page_pool_destroy:
15888 page_pool_destroy(clone->page_pool);
15889 if (bnxt_separate_head_pool(clone))
15890 page_pool_destroy(clone->head_pool);
15891 clone->page_pool = NULL;
15892 clone->head_pool = NULL;
15893 return rc;
15894 }
15895
bnxt_queue_mem_free(struct net_device * dev,void * qmem)15896 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15897 {
15898 struct bnxt_rx_ring_info *rxr = qmem;
15899 struct bnxt *bp = netdev_priv(dev);
15900 struct bnxt_ring_struct *ring;
15901
15902 bnxt_free_one_rx_ring_skbs(bp, rxr);
15903 bnxt_free_one_tpa_info(bp, rxr);
15904
15905 xdp_rxq_info_unreg(&rxr->xdp_rxq);
15906
15907 page_pool_destroy(rxr->page_pool);
15908 if (bnxt_separate_head_pool(rxr))
15909 page_pool_destroy(rxr->head_pool);
15910 rxr->page_pool = NULL;
15911 rxr->head_pool = NULL;
15912
15913 ring = &rxr->rx_ring_struct;
15914 bnxt_free_ring(bp, &ring->ring_mem);
15915
15916 ring = &rxr->rx_agg_ring_struct;
15917 bnxt_free_ring(bp, &ring->ring_mem);
15918
15919 kfree(rxr->rx_agg_bmap);
15920 rxr->rx_agg_bmap = NULL;
15921 }
15922
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)15923 static void bnxt_copy_rx_ring(struct bnxt *bp,
15924 struct bnxt_rx_ring_info *dst,
15925 struct bnxt_rx_ring_info *src)
15926 {
15927 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
15928 struct bnxt_ring_struct *dst_ring, *src_ring;
15929 int i;
15930
15931 dst_ring = &dst->rx_ring_struct;
15932 dst_rmem = &dst_ring->ring_mem;
15933 src_ring = &src->rx_ring_struct;
15934 src_rmem = &src_ring->ring_mem;
15935
15936 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15937 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15938 WARN_ON(dst_rmem->flags != src_rmem->flags);
15939 WARN_ON(dst_rmem->depth != src_rmem->depth);
15940 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15941 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15942
15943 dst_rmem->pg_tbl = src_rmem->pg_tbl;
15944 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15945 *dst_rmem->vmem = *src_rmem->vmem;
15946 for (i = 0; i < dst_rmem->nr_pages; i++) {
15947 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15948 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15949 }
15950
15951 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
15952 return;
15953
15954 dst_ring = &dst->rx_agg_ring_struct;
15955 dst_rmem = &dst_ring->ring_mem;
15956 src_ring = &src->rx_agg_ring_struct;
15957 src_rmem = &src_ring->ring_mem;
15958
15959 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15960 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15961 WARN_ON(dst_rmem->flags != src_rmem->flags);
15962 WARN_ON(dst_rmem->depth != src_rmem->depth);
15963 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15964 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15965 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
15966
15967 dst_rmem->pg_tbl = src_rmem->pg_tbl;
15968 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15969 *dst_rmem->vmem = *src_rmem->vmem;
15970 for (i = 0; i < dst_rmem->nr_pages; i++) {
15971 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15972 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15973 }
15974
15975 dst->rx_agg_bmap = src->rx_agg_bmap;
15976 }
15977
bnxt_queue_start(struct net_device * dev,void * qmem,int idx)15978 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
15979 {
15980 struct bnxt *bp = netdev_priv(dev);
15981 struct bnxt_rx_ring_info *rxr, *clone;
15982 struct bnxt_cp_ring_info *cpr;
15983 struct bnxt_vnic_info *vnic;
15984 struct bnxt_napi *bnapi;
15985 int i, rc;
15986 u16 mru;
15987
15988 rxr = &bp->rx_ring[idx];
15989 clone = qmem;
15990
15991 rxr->rx_prod = clone->rx_prod;
15992 rxr->rx_agg_prod = clone->rx_agg_prod;
15993 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
15994 rxr->rx_next_cons = clone->rx_next_cons;
15995 rxr->rx_tpa = clone->rx_tpa;
15996 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
15997 rxr->page_pool = clone->page_pool;
15998 rxr->head_pool = clone->head_pool;
15999 rxr->xdp_rxq = clone->xdp_rxq;
16000 rxr->need_head_pool = clone->need_head_pool;
16001
16002 bnxt_copy_rx_ring(bp, rxr, clone);
16003
16004 bnapi = rxr->bnapi;
16005 cpr = &bnapi->cp_ring;
16006
16007 /* All rings have been reserved and previously allocated.
16008 * Reallocating with the same parameters should never fail.
16009 */
16010 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16011 if (rc)
16012 goto err_reset;
16013
16014 if (bp->tph_mode) {
16015 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16016 if (rc)
16017 goto err_reset;
16018 }
16019
16020 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16021 if (rc)
16022 goto err_reset;
16023
16024 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16025 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16026 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16027
16028 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16029 rc = bnxt_tx_queue_start(bp, idx);
16030 if (rc)
16031 goto err_reset;
16032 }
16033
16034 napi_enable_locked(&bnapi->napi);
16035 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16036
16037 mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
16038 for (i = 0; i < bp->nr_vnics; i++) {
16039 vnic = &bp->vnic_info[i];
16040
16041 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16042 if (rc)
16043 return rc;
16044 }
16045 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16046
16047 err_reset:
16048 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16049 rc);
16050 napi_enable_locked(&bnapi->napi);
16051 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16052 bnxt_reset_task(bp, true);
16053 return rc;
16054 }
16055
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)16056 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16057 {
16058 struct bnxt *bp = netdev_priv(dev);
16059 struct bnxt_rx_ring_info *rxr;
16060 struct bnxt_cp_ring_info *cpr;
16061 struct bnxt_vnic_info *vnic;
16062 struct bnxt_napi *bnapi;
16063 int i;
16064
16065 for (i = 0; i < bp->nr_vnics; i++) {
16066 vnic = &bp->vnic_info[i];
16067
16068 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16069 }
16070 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16071 /* Make sure NAPI sees that the VNIC is disabled */
16072 synchronize_net();
16073 rxr = &bp->rx_ring[idx];
16074 bnapi = rxr->bnapi;
16075 cpr = &bnapi->cp_ring;
16076 cancel_work_sync(&cpr->dim.work);
16077 bnxt_hwrm_rx_ring_free(bp, rxr, false);
16078 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16079 page_pool_disable_direct_recycling(rxr->page_pool);
16080 if (bnxt_separate_head_pool(rxr))
16081 page_pool_disable_direct_recycling(rxr->head_pool);
16082
16083 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16084 bnxt_tx_queue_stop(bp, idx);
16085
16086 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16087 * completion is handled in NAPI to guarantee no more DMA on that ring
16088 * after seeing the completion.
16089 */
16090 napi_disable_locked(&bnapi->napi);
16091
16092 if (bp->tph_mode) {
16093 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16094 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16095 }
16096 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16097
16098 memcpy(qmem, rxr, sizeof(*rxr));
16099 bnxt_init_rx_ring_struct(bp, qmem);
16100
16101 return 0;
16102 }
16103
16104 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16105 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
16106 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
16107 .ndo_queue_mem_free = bnxt_queue_mem_free,
16108 .ndo_queue_start = bnxt_queue_start,
16109 .ndo_queue_stop = bnxt_queue_stop,
16110 };
16111
bnxt_remove_one(struct pci_dev * pdev)16112 static void bnxt_remove_one(struct pci_dev *pdev)
16113 {
16114 struct net_device *dev = pci_get_drvdata(pdev);
16115 struct bnxt *bp = netdev_priv(dev);
16116
16117 if (BNXT_PF(bp))
16118 bnxt_sriov_disable(bp);
16119
16120 bnxt_rdma_aux_device_del(bp);
16121
16122 unregister_netdev(dev);
16123 bnxt_ptp_clear(bp);
16124
16125 bnxt_rdma_aux_device_uninit(bp);
16126
16127 bnxt_free_l2_filters(bp, true);
16128 bnxt_free_ntp_fltrs(bp, true);
16129 WARN_ON(bp->num_rss_ctx);
16130 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16131 /* Flush any pending tasks */
16132 cancel_work_sync(&bp->sp_task);
16133 cancel_delayed_work_sync(&bp->fw_reset_task);
16134 bp->sp_event = 0;
16135
16136 bnxt_dl_fw_reporters_destroy(bp);
16137 bnxt_dl_unregister(bp);
16138 bnxt_shutdown_tc(bp);
16139
16140 bnxt_clear_int_mode(bp);
16141 bnxt_hwrm_func_drv_unrgtr(bp);
16142 bnxt_free_hwrm_resources(bp);
16143 bnxt_hwmon_uninit(bp);
16144 bnxt_ethtool_free(bp);
16145 bnxt_dcb_free(bp);
16146 kfree(bp->ptp_cfg);
16147 bp->ptp_cfg = NULL;
16148 kfree(bp->fw_health);
16149 bp->fw_health = NULL;
16150 bnxt_cleanup_pci(bp);
16151 bnxt_free_ctx_mem(bp, true);
16152 bnxt_free_crash_dump_mem(bp);
16153 kfree(bp->rss_indir_tbl);
16154 bp->rss_indir_tbl = NULL;
16155 bnxt_free_port_stats(bp);
16156 free_netdev(dev);
16157 }
16158
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)16159 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16160 {
16161 int rc = 0;
16162 struct bnxt_link_info *link_info = &bp->link_info;
16163
16164 bp->phy_flags = 0;
16165 rc = bnxt_hwrm_phy_qcaps(bp);
16166 if (rc) {
16167 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16168 rc);
16169 return rc;
16170 }
16171 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16172 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16173 else
16174 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16175
16176 bp->mac_flags = 0;
16177 bnxt_hwrm_mac_qcaps(bp);
16178
16179 if (!fw_dflt)
16180 return 0;
16181
16182 mutex_lock(&bp->link_lock);
16183 rc = bnxt_update_link(bp, false);
16184 if (rc) {
16185 mutex_unlock(&bp->link_lock);
16186 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16187 rc);
16188 return rc;
16189 }
16190
16191 /* Older firmware does not have supported_auto_speeds, so assume
16192 * that all supported speeds can be autonegotiated.
16193 */
16194 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16195 link_info->support_auto_speeds = link_info->support_speeds;
16196
16197 bnxt_init_ethtool_link_settings(bp);
16198 mutex_unlock(&bp->link_lock);
16199 return 0;
16200 }
16201
bnxt_get_max_irq(struct pci_dev * pdev)16202 static int bnxt_get_max_irq(struct pci_dev *pdev)
16203 {
16204 u16 ctrl;
16205
16206 if (!pdev->msix_cap)
16207 return 1;
16208
16209 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16210 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16211 }
16212
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)16213 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16214 int *max_cp)
16215 {
16216 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16217 int max_ring_grps = 0, max_irq;
16218
16219 *max_tx = hw_resc->max_tx_rings;
16220 *max_rx = hw_resc->max_rx_rings;
16221 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16222 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16223 bnxt_get_ulp_msix_num_in_use(bp),
16224 hw_resc->max_stat_ctxs -
16225 bnxt_get_ulp_stat_ctxs_in_use(bp));
16226 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16227 *max_cp = min_t(int, *max_cp, max_irq);
16228 max_ring_grps = hw_resc->max_hw_ring_grps;
16229 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16230 *max_cp -= 1;
16231 *max_rx -= 2;
16232 }
16233 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16234 *max_rx >>= 1;
16235 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16236 int rc;
16237
16238 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16239 if (rc) {
16240 *max_rx = 0;
16241 *max_tx = 0;
16242 }
16243 /* On P5 chips, max_cp output param should be available NQs */
16244 *max_cp = max_irq;
16245 }
16246 *max_rx = min_t(int, *max_rx, max_ring_grps);
16247 }
16248
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16249 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16250 {
16251 int rx, tx, cp;
16252
16253 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
16254 *max_rx = rx;
16255 *max_tx = tx;
16256 if (!rx || !tx || !cp)
16257 return -ENOMEM;
16258
16259 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16260 }
16261
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16262 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16263 bool shared)
16264 {
16265 int rc;
16266
16267 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16268 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16269 /* Not enough rings, try disabling agg rings. */
16270 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16271 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16272 if (rc) {
16273 /* set BNXT_FLAG_AGG_RINGS back for consistency */
16274 bp->flags |= BNXT_FLAG_AGG_RINGS;
16275 return rc;
16276 }
16277 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16278 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16279 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16280 bnxt_set_ring_params(bp);
16281 }
16282
16283 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16284 int max_cp, max_stat, max_irq;
16285
16286 /* Reserve minimum resources for RoCE */
16287 max_cp = bnxt_get_max_func_cp_rings(bp);
16288 max_stat = bnxt_get_max_func_stat_ctxs(bp);
16289 max_irq = bnxt_get_max_func_irqs(bp);
16290 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16291 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16292 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16293 return 0;
16294
16295 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16296 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16297 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16298 max_cp = min_t(int, max_cp, max_irq);
16299 max_cp = min_t(int, max_cp, max_stat);
16300 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16301 if (rc)
16302 rc = 0;
16303 }
16304 return rc;
16305 }
16306
16307 /* In initial default shared ring setting, each shared ring must have a
16308 * RX/TX ring pair.
16309 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)16310 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16311 {
16312 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16313 bp->rx_nr_rings = bp->cp_nr_rings;
16314 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16315 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
16316 }
16317
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)16318 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16319 {
16320 int dflt_rings, max_rx_rings, max_tx_rings, rc;
16321 int avail_msix;
16322
16323 if (!bnxt_can_reserve_rings(bp))
16324 return 0;
16325
16326 if (sh)
16327 bp->flags |= BNXT_FLAG_SHARED_RINGS;
16328 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16329 /* Reduce default rings on multi-port cards so that total default
16330 * rings do not exceed CPU count.
16331 */
16332 if (bp->port_count > 1) {
16333 int max_rings =
16334 max_t(int, num_online_cpus() / bp->port_count, 1);
16335
16336 dflt_rings = min_t(int, dflt_rings, max_rings);
16337 }
16338 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16339 if (rc)
16340 return rc;
16341 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16342 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16343 if (sh)
16344 bnxt_trim_dflt_sh_rings(bp);
16345 else
16346 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16347 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
16348
16349 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16350 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16351 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16352
16353 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16354 bnxt_set_dflt_ulp_stat_ctxs(bp);
16355 }
16356
16357 rc = __bnxt_reserve_rings(bp);
16358 if (rc && rc != -ENODEV)
16359 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16360 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16361 if (sh)
16362 bnxt_trim_dflt_sh_rings(bp);
16363
16364 /* Rings may have been trimmed, re-reserve the trimmed rings. */
16365 if (bnxt_need_reserve_rings(bp)) {
16366 rc = __bnxt_reserve_rings(bp);
16367 if (rc && rc != -ENODEV)
16368 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16369 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16370 }
16371 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16372 bp->rx_nr_rings++;
16373 bp->cp_nr_rings++;
16374 }
16375 if (rc) {
16376 bp->tx_nr_rings = 0;
16377 bp->rx_nr_rings = 0;
16378 }
16379 return rc;
16380 }
16381
bnxt_init_dflt_ring_mode(struct bnxt * bp)16382 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16383 {
16384 int rc;
16385
16386 if (bp->tx_nr_rings)
16387 return 0;
16388
16389 bnxt_ulp_irq_stop(bp);
16390 bnxt_clear_int_mode(bp);
16391 rc = bnxt_set_dflt_rings(bp, true);
16392 if (rc) {
16393 if (BNXT_VF(bp) && rc == -ENODEV)
16394 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16395 else
16396 netdev_err(bp->dev, "Not enough rings available.\n");
16397 goto init_dflt_ring_err;
16398 }
16399 rc = bnxt_init_int_mode(bp);
16400 if (rc)
16401 goto init_dflt_ring_err;
16402
16403 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16404
16405 bnxt_set_dflt_rfs(bp);
16406
16407 init_dflt_ring_err:
16408 bnxt_ulp_irq_restart(bp, rc);
16409 return rc;
16410 }
16411
bnxt_restore_pf_fw_resources(struct bnxt * bp)16412 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16413 {
16414 int rc;
16415
16416 netdev_ops_assert_locked(bp->dev);
16417 bnxt_hwrm_func_qcaps(bp);
16418
16419 if (netif_running(bp->dev))
16420 __bnxt_close_nic(bp, true, false);
16421
16422 bnxt_ulp_irq_stop(bp);
16423 bnxt_clear_int_mode(bp);
16424 rc = bnxt_init_int_mode(bp);
16425 bnxt_ulp_irq_restart(bp, rc);
16426
16427 if (netif_running(bp->dev)) {
16428 if (rc)
16429 netif_close(bp->dev);
16430 else
16431 rc = bnxt_open_nic(bp, true, false);
16432 }
16433
16434 return rc;
16435 }
16436
bnxt_init_mac_addr(struct bnxt * bp)16437 static int bnxt_init_mac_addr(struct bnxt *bp)
16438 {
16439 int rc = 0;
16440
16441 if (BNXT_PF(bp)) {
16442 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16443 } else {
16444 #ifdef CONFIG_BNXT_SRIOV
16445 struct bnxt_vf_info *vf = &bp->vf;
16446 bool strict_approval = true;
16447
16448 if (is_valid_ether_addr(vf->mac_addr)) {
16449 /* overwrite netdev dev_addr with admin VF MAC */
16450 eth_hw_addr_set(bp->dev, vf->mac_addr);
16451 /* Older PF driver or firmware may not approve this
16452 * correctly.
16453 */
16454 strict_approval = false;
16455 } else {
16456 eth_hw_addr_random(bp->dev);
16457 }
16458 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16459 #endif
16460 }
16461 return rc;
16462 }
16463
bnxt_vpd_read_info(struct bnxt * bp)16464 static void bnxt_vpd_read_info(struct bnxt *bp)
16465 {
16466 struct pci_dev *pdev = bp->pdev;
16467 unsigned int vpd_size, kw_len;
16468 int pos, size;
16469 u8 *vpd_data;
16470
16471 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16472 if (IS_ERR(vpd_data)) {
16473 pci_warn(pdev, "Unable to read VPD\n");
16474 return;
16475 }
16476
16477 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16478 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16479 if (pos < 0)
16480 goto read_sn;
16481
16482 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16483 memcpy(bp->board_partno, &vpd_data[pos], size);
16484
16485 read_sn:
16486 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16487 PCI_VPD_RO_KEYWORD_SERIALNO,
16488 &kw_len);
16489 if (pos < 0)
16490 goto exit;
16491
16492 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16493 memcpy(bp->board_serialno, &vpd_data[pos], size);
16494 exit:
16495 kfree(vpd_data);
16496 }
16497
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16498 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16499 {
16500 struct pci_dev *pdev = bp->pdev;
16501 u64 qword;
16502
16503 qword = pci_get_dsn(pdev);
16504 if (!qword) {
16505 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16506 return -EOPNOTSUPP;
16507 }
16508
16509 put_unaligned_le64(qword, dsn);
16510
16511 bp->flags |= BNXT_FLAG_DSN_VALID;
16512 return 0;
16513 }
16514
bnxt_map_db_bar(struct bnxt * bp)16515 static int bnxt_map_db_bar(struct bnxt *bp)
16516 {
16517 if (!bp->db_size)
16518 return -ENODEV;
16519 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16520 if (!bp->bar1)
16521 return -ENOMEM;
16522 return 0;
16523 }
16524
bnxt_print_device_info(struct bnxt * bp)16525 void bnxt_print_device_info(struct bnxt *bp)
16526 {
16527 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16528 board_info[bp->board_idx].name,
16529 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16530
16531 pcie_print_link_status(bp->pdev);
16532 }
16533
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16534 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16535 {
16536 struct bnxt_hw_resc *hw_resc;
16537 struct net_device *dev;
16538 struct bnxt *bp;
16539 int rc, max_irqs;
16540
16541 if (pci_is_bridge(pdev))
16542 return -ENODEV;
16543
16544 if (!pdev->msix_cap) {
16545 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16546 return -ENODEV;
16547 }
16548
16549 /* Clear any pending DMA transactions from crash kernel
16550 * while loading driver in capture kernel.
16551 */
16552 if (is_kdump_kernel()) {
16553 pci_clear_master(pdev);
16554 pcie_flr(pdev);
16555 }
16556
16557 max_irqs = bnxt_get_max_irq(pdev);
16558 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16559 max_irqs);
16560 if (!dev)
16561 return -ENOMEM;
16562
16563 bp = netdev_priv(dev);
16564 bp->board_idx = ent->driver_data;
16565 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16566 bnxt_set_max_func_irqs(bp, max_irqs);
16567
16568 if (bnxt_vf_pciid(bp->board_idx))
16569 bp->flags |= BNXT_FLAG_VF;
16570
16571 /* No devlink port registration in case of a VF */
16572 if (BNXT_PF(bp))
16573 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16574
16575 rc = bnxt_init_board(pdev, dev);
16576 if (rc < 0)
16577 goto init_err_free;
16578
16579 dev->netdev_ops = &bnxt_netdev_ops;
16580 dev->stat_ops = &bnxt_stat_ops;
16581 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16582 dev->ethtool_ops = &bnxt_ethtool_ops;
16583 pci_set_drvdata(pdev, dev);
16584
16585 rc = bnxt_alloc_hwrm_resources(bp);
16586 if (rc)
16587 goto init_err_pci_clean;
16588
16589 mutex_init(&bp->hwrm_cmd_lock);
16590 mutex_init(&bp->link_lock);
16591
16592 rc = bnxt_fw_init_one_p1(bp);
16593 if (rc)
16594 goto init_err_pci_clean;
16595
16596 if (BNXT_PF(bp))
16597 bnxt_vpd_read_info(bp);
16598
16599 if (BNXT_CHIP_P5_PLUS(bp)) {
16600 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16601 if (BNXT_CHIP_P7(bp))
16602 bp->flags |= BNXT_FLAG_CHIP_P7;
16603 }
16604
16605 rc = bnxt_alloc_rss_indir_tbl(bp);
16606 if (rc)
16607 goto init_err_pci_clean;
16608
16609 rc = bnxt_fw_init_one_p2(bp);
16610 if (rc)
16611 goto init_err_pci_clean;
16612
16613 rc = bnxt_map_db_bar(bp);
16614 if (rc) {
16615 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16616 rc);
16617 goto init_err_pci_clean;
16618 }
16619
16620 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16621 NETIF_F_TSO | NETIF_F_TSO6 |
16622 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16623 NETIF_F_GSO_IPXIP4 |
16624 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16625 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16626 NETIF_F_RXCSUM | NETIF_F_GRO;
16627 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16628 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16629
16630 if (BNXT_SUPPORTS_TPA(bp))
16631 dev->hw_features |= NETIF_F_LRO;
16632
16633 dev->hw_enc_features =
16634 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16635 NETIF_F_TSO | NETIF_F_TSO6 |
16636 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16637 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16638 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16639 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16640 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16641 if (bp->flags & BNXT_FLAG_CHIP_P7)
16642 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16643 else
16644 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16645
16646 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16647 NETIF_F_GSO_GRE_CSUM;
16648 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16649 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16650 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16651 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16652 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16653 if (BNXT_SUPPORTS_TPA(bp))
16654 dev->hw_features |= NETIF_F_GRO_HW;
16655 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16656 if (dev->features & NETIF_F_GRO_HW)
16657 dev->features &= ~NETIF_F_LRO;
16658 dev->priv_flags |= IFF_UNICAST_FLT;
16659
16660 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16661 if (bp->tso_max_segs)
16662 netif_set_tso_max_segs(dev, bp->tso_max_segs);
16663
16664 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16665 NETDEV_XDP_ACT_RX_SG;
16666
16667 #ifdef CONFIG_BNXT_SRIOV
16668 init_waitqueue_head(&bp->sriov_cfg_wait);
16669 #endif
16670 if (BNXT_SUPPORTS_TPA(bp)) {
16671 bp->gro_func = bnxt_gro_func_5730x;
16672 if (BNXT_CHIP_P4(bp))
16673 bp->gro_func = bnxt_gro_func_5731x;
16674 else if (BNXT_CHIP_P5_PLUS(bp))
16675 bp->gro_func = bnxt_gro_func_5750x;
16676 }
16677 if (!BNXT_CHIP_P4_PLUS(bp))
16678 bp->flags |= BNXT_FLAG_DOUBLE_DB;
16679
16680 rc = bnxt_init_mac_addr(bp);
16681 if (rc) {
16682 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16683 rc = -EADDRNOTAVAIL;
16684 goto init_err_pci_clean;
16685 }
16686
16687 if (BNXT_PF(bp)) {
16688 /* Read the adapter's DSN to use as the eswitch switch_id */
16689 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16690 }
16691
16692 /* MTU range: 60 - FW defined max */
16693 dev->min_mtu = ETH_ZLEN;
16694 dev->max_mtu = bp->max_mtu;
16695
16696 rc = bnxt_probe_phy(bp, true);
16697 if (rc)
16698 goto init_err_pci_clean;
16699
16700 hw_resc = &bp->hw_resc;
16701 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16702 BNXT_L2_FLTR_MAX_FLTR;
16703 /* Older firmware may not report these filters properly */
16704 if (bp->max_fltr < BNXT_MAX_FLTR)
16705 bp->max_fltr = BNXT_MAX_FLTR;
16706 bnxt_init_l2_fltr_tbl(bp);
16707 __bnxt_set_rx_skb_mode(bp, false);
16708 bnxt_set_tpa_flags(bp);
16709 bnxt_init_ring_params(bp);
16710 bnxt_set_ring_params(bp);
16711 bnxt_rdma_aux_device_init(bp);
16712 rc = bnxt_set_dflt_rings(bp, true);
16713 if (rc) {
16714 if (BNXT_VF(bp) && rc == -ENODEV) {
16715 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16716 } else {
16717 netdev_err(bp->dev, "Not enough rings available.\n");
16718 rc = -ENOMEM;
16719 }
16720 goto init_err_pci_clean;
16721 }
16722
16723 bnxt_fw_init_one_p3(bp);
16724
16725 bnxt_init_dflt_coal(bp);
16726
16727 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16728 bp->flags |= BNXT_FLAG_STRIP_VLAN;
16729
16730 rc = bnxt_init_int_mode(bp);
16731 if (rc)
16732 goto init_err_pci_clean;
16733
16734 /* No TC has been set yet and rings may have been trimmed due to
16735 * limited MSIX, so we re-initialize the TX rings per TC.
16736 */
16737 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16738
16739 if (BNXT_PF(bp)) {
16740 if (!bnxt_pf_wq) {
16741 bnxt_pf_wq =
16742 create_singlethread_workqueue("bnxt_pf_wq");
16743 if (!bnxt_pf_wq) {
16744 dev_err(&pdev->dev, "Unable to create workqueue.\n");
16745 rc = -ENOMEM;
16746 goto init_err_pci_clean;
16747 }
16748 }
16749 rc = bnxt_init_tc(bp);
16750 if (rc)
16751 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16752 rc);
16753 }
16754
16755 bnxt_inv_fw_health_reg(bp);
16756 rc = bnxt_dl_register(bp);
16757 if (rc)
16758 goto init_err_dl;
16759
16760 INIT_LIST_HEAD(&bp->usr_fltr_list);
16761
16762 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16763 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16764 if (BNXT_SUPPORTS_QUEUE_API(bp))
16765 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16766 dev->request_ops_lock = true;
16767 dev->netmem_tx = true;
16768
16769 rc = register_netdev(dev);
16770 if (rc)
16771 goto init_err_cleanup;
16772
16773 bnxt_dl_fw_reporters_create(bp);
16774
16775 bnxt_rdma_aux_device_add(bp);
16776
16777 bnxt_print_device_info(bp);
16778
16779 pci_save_state(pdev);
16780
16781 return 0;
16782 init_err_cleanup:
16783 bnxt_rdma_aux_device_uninit(bp);
16784 bnxt_dl_unregister(bp);
16785 init_err_dl:
16786 bnxt_shutdown_tc(bp);
16787 bnxt_clear_int_mode(bp);
16788
16789 init_err_pci_clean:
16790 bnxt_hwrm_func_drv_unrgtr(bp);
16791 bnxt_free_hwrm_resources(bp);
16792 bnxt_hwmon_uninit(bp);
16793 bnxt_ethtool_free(bp);
16794 bnxt_ptp_clear(bp);
16795 kfree(bp->ptp_cfg);
16796 bp->ptp_cfg = NULL;
16797 kfree(bp->fw_health);
16798 bp->fw_health = NULL;
16799 bnxt_cleanup_pci(bp);
16800 bnxt_free_ctx_mem(bp, true);
16801 bnxt_free_crash_dump_mem(bp);
16802 kfree(bp->rss_indir_tbl);
16803 bp->rss_indir_tbl = NULL;
16804
16805 init_err_free:
16806 free_netdev(dev);
16807 return rc;
16808 }
16809
bnxt_shutdown(struct pci_dev * pdev)16810 static void bnxt_shutdown(struct pci_dev *pdev)
16811 {
16812 struct net_device *dev = pci_get_drvdata(pdev);
16813 struct bnxt *bp;
16814
16815 if (!dev)
16816 return;
16817
16818 rtnl_lock();
16819 netdev_lock(dev);
16820 bp = netdev_priv(dev);
16821 if (!bp)
16822 goto shutdown_exit;
16823
16824 if (netif_running(dev))
16825 netif_close(dev);
16826
16827 bnxt_ptp_clear(bp);
16828 bnxt_clear_int_mode(bp);
16829 pci_disable_device(pdev);
16830
16831 if (system_state == SYSTEM_POWER_OFF) {
16832 pci_wake_from_d3(pdev, bp->wol);
16833 pci_set_power_state(pdev, PCI_D3hot);
16834 }
16835
16836 shutdown_exit:
16837 netdev_unlock(dev);
16838 rtnl_unlock();
16839 }
16840
16841 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)16842 static int bnxt_suspend(struct device *device)
16843 {
16844 struct net_device *dev = dev_get_drvdata(device);
16845 struct bnxt *bp = netdev_priv(dev);
16846 int rc = 0;
16847
16848 bnxt_ulp_stop(bp);
16849
16850 netdev_lock(dev);
16851 if (netif_running(dev)) {
16852 netif_device_detach(dev);
16853 rc = bnxt_close(dev);
16854 }
16855 bnxt_hwrm_func_drv_unrgtr(bp);
16856 bnxt_ptp_clear(bp);
16857 pci_disable_device(bp->pdev);
16858 bnxt_free_ctx_mem(bp, false);
16859 netdev_unlock(dev);
16860 return rc;
16861 }
16862
bnxt_resume(struct device * device)16863 static int bnxt_resume(struct device *device)
16864 {
16865 struct net_device *dev = dev_get_drvdata(device);
16866 struct bnxt *bp = netdev_priv(dev);
16867 int rc = 0;
16868
16869 netdev_lock(dev);
16870 rc = pci_enable_device(bp->pdev);
16871 if (rc) {
16872 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
16873 rc);
16874 goto resume_exit;
16875 }
16876 pci_set_master(bp->pdev);
16877 if (bnxt_hwrm_ver_get(bp)) {
16878 rc = -ENODEV;
16879 goto resume_exit;
16880 }
16881 rc = bnxt_hwrm_func_reset(bp);
16882 if (rc) {
16883 rc = -EBUSY;
16884 goto resume_exit;
16885 }
16886
16887 rc = bnxt_hwrm_func_qcaps(bp);
16888 if (rc)
16889 goto resume_exit;
16890
16891 bnxt_clear_reservations(bp, true);
16892
16893 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
16894 rc = -ENODEV;
16895 goto resume_exit;
16896 }
16897 if (bp->fw_crash_mem)
16898 bnxt_hwrm_crash_dump_mem_cfg(bp);
16899
16900 if (bnxt_ptp_init(bp)) {
16901 kfree(bp->ptp_cfg);
16902 bp->ptp_cfg = NULL;
16903 }
16904 bnxt_get_wol_settings(bp);
16905 if (netif_running(dev)) {
16906 rc = bnxt_open(dev);
16907 if (!rc)
16908 netif_device_attach(dev);
16909 }
16910
16911 resume_exit:
16912 netdev_unlock(bp->dev);
16913 bnxt_ulp_start(bp, rc);
16914 if (!rc)
16915 bnxt_reenable_sriov(bp);
16916 return rc;
16917 }
16918
16919 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
16920 #define BNXT_PM_OPS (&bnxt_pm_ops)
16921
16922 #else
16923
16924 #define BNXT_PM_OPS NULL
16925
16926 #endif /* CONFIG_PM_SLEEP */
16927
16928 /**
16929 * bnxt_io_error_detected - called when PCI error is detected
16930 * @pdev: Pointer to PCI device
16931 * @state: The current pci connection state
16932 *
16933 * This function is called after a PCI bus error affecting
16934 * this device has been detected.
16935 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)16936 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
16937 pci_channel_state_t state)
16938 {
16939 struct net_device *netdev = pci_get_drvdata(pdev);
16940 struct bnxt *bp = netdev_priv(netdev);
16941 bool abort = false;
16942
16943 netdev_info(netdev, "PCI I/O error detected\n");
16944
16945 bnxt_ulp_stop(bp);
16946
16947 netdev_lock(netdev);
16948 netif_device_detach(netdev);
16949
16950 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
16951 netdev_err(bp->dev, "Firmware reset already in progress\n");
16952 abort = true;
16953 }
16954
16955 if (abort || state == pci_channel_io_perm_failure) {
16956 netdev_unlock(netdev);
16957 return PCI_ERS_RESULT_DISCONNECT;
16958 }
16959
16960 /* Link is not reliable anymore if state is pci_channel_io_frozen
16961 * so we disable bus master to prevent any potential bad DMAs before
16962 * freeing kernel memory.
16963 */
16964 if (state == pci_channel_io_frozen) {
16965 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
16966 bnxt_fw_fatal_close(bp);
16967 }
16968
16969 if (netif_running(netdev))
16970 __bnxt_close_nic(bp, true, true);
16971
16972 if (pci_is_enabled(pdev))
16973 pci_disable_device(pdev);
16974 bnxt_free_ctx_mem(bp, false);
16975 netdev_unlock(netdev);
16976
16977 /* Request a slot reset. */
16978 return PCI_ERS_RESULT_NEED_RESET;
16979 }
16980
16981 /**
16982 * bnxt_io_slot_reset - called after the pci bus has been reset.
16983 * @pdev: Pointer to PCI device
16984 *
16985 * Restart the card from scratch, as if from a cold-boot.
16986 * At this point, the card has experienced a hard reset,
16987 * followed by fixups by BIOS, and has its config space
16988 * set up identically to what it was at cold boot.
16989 */
bnxt_io_slot_reset(struct pci_dev * pdev)16990 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
16991 {
16992 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
16993 struct net_device *netdev = pci_get_drvdata(pdev);
16994 struct bnxt *bp = netdev_priv(netdev);
16995 int retry = 0;
16996 int err = 0;
16997 int off;
16998
16999 netdev_info(bp->dev, "PCI Slot Reset\n");
17000
17001 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17002 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17003 msleep(900);
17004
17005 netdev_lock(netdev);
17006
17007 if (pci_enable_device(pdev)) {
17008 dev_err(&pdev->dev,
17009 "Cannot re-enable PCI device after reset.\n");
17010 } else {
17011 pci_set_master(pdev);
17012 /* Upon fatal error, our device internal logic that latches to
17013 * BAR value is getting reset and will restore only upon
17014 * rewriting the BARs.
17015 *
17016 * As pci_restore_state() does not re-write the BARs if the
17017 * value is same as saved value earlier, driver needs to
17018 * write the BARs to 0 to force restore, in case of fatal error.
17019 */
17020 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17021 &bp->state)) {
17022 for (off = PCI_BASE_ADDRESS_0;
17023 off <= PCI_BASE_ADDRESS_5; off += 4)
17024 pci_write_config_dword(bp->pdev, off, 0);
17025 }
17026 pci_restore_state(pdev);
17027 pci_save_state(pdev);
17028
17029 bnxt_inv_fw_health_reg(bp);
17030 bnxt_try_map_fw_health_reg(bp);
17031
17032 /* In some PCIe AER scenarios, firmware may take up to
17033 * 10 seconds to become ready in the worst case.
17034 */
17035 do {
17036 err = bnxt_try_recover_fw(bp);
17037 if (!err)
17038 break;
17039 retry++;
17040 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
17041
17042 if (err) {
17043 dev_err(&pdev->dev, "Firmware not ready\n");
17044 goto reset_exit;
17045 }
17046
17047 err = bnxt_hwrm_func_reset(bp);
17048 if (!err)
17049 result = PCI_ERS_RESULT_RECOVERED;
17050
17051 /* IRQ will be initialized later in bnxt_io_resume */
17052 bnxt_ulp_irq_stop(bp);
17053 bnxt_clear_int_mode(bp);
17054 }
17055
17056 reset_exit:
17057 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17058 bnxt_clear_reservations(bp, true);
17059 netdev_unlock(netdev);
17060
17061 return result;
17062 }
17063
17064 /**
17065 * bnxt_io_resume - called when traffic can start flowing again.
17066 * @pdev: Pointer to PCI device
17067 *
17068 * This callback is called when the error recovery driver tells
17069 * us that its OK to resume normal operation.
17070 */
bnxt_io_resume(struct pci_dev * pdev)17071 static void bnxt_io_resume(struct pci_dev *pdev)
17072 {
17073 struct net_device *netdev = pci_get_drvdata(pdev);
17074 struct bnxt *bp = netdev_priv(netdev);
17075 int err;
17076
17077 netdev_info(bp->dev, "PCI Slot Resume\n");
17078 netdev_lock(netdev);
17079
17080 err = bnxt_hwrm_func_qcaps(bp);
17081 if (!err) {
17082 if (netif_running(netdev)) {
17083 err = bnxt_open(netdev);
17084 } else {
17085 err = bnxt_reserve_rings(bp, true);
17086 if (!err)
17087 err = bnxt_init_int_mode(bp);
17088 }
17089 }
17090
17091 if (!err)
17092 netif_device_attach(netdev);
17093
17094 netdev_unlock(netdev);
17095 bnxt_ulp_start(bp, err);
17096 if (!err)
17097 bnxt_reenable_sriov(bp);
17098 }
17099
17100 static const struct pci_error_handlers bnxt_err_handler = {
17101 .error_detected = bnxt_io_error_detected,
17102 .slot_reset = bnxt_io_slot_reset,
17103 .resume = bnxt_io_resume
17104 };
17105
17106 static struct pci_driver bnxt_pci_driver = {
17107 .name = DRV_MODULE_NAME,
17108 .id_table = bnxt_pci_tbl,
17109 .probe = bnxt_init_one,
17110 .remove = bnxt_remove_one,
17111 .shutdown = bnxt_shutdown,
17112 .driver.pm = BNXT_PM_OPS,
17113 .err_handler = &bnxt_err_handler,
17114 #if defined(CONFIG_BNXT_SRIOV)
17115 .sriov_configure = bnxt_sriov_configure,
17116 #endif
17117 };
17118
bnxt_init(void)17119 static int __init bnxt_init(void)
17120 {
17121 int err;
17122
17123 bnxt_debug_init();
17124 err = pci_register_driver(&bnxt_pci_driver);
17125 if (err) {
17126 bnxt_debug_exit();
17127 return err;
17128 }
17129
17130 return 0;
17131 }
17132
bnxt_exit(void)17133 static void __exit bnxt_exit(void)
17134 {
17135 pci_unregister_driver(&bnxt_pci_driver);
17136 if (bnxt_pf_wq)
17137 destroy_workqueue(bnxt_pf_wq);
17138 bnxt_debug_exit();
17139 }
17140
17141 module_init(bnxt_init);
17142 module_exit(bnxt_exit);
17143