1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62 #include <linux/bnxt/ulp.h>
63
64 #include "bnxt.h"
65 #include "bnxt_hwrm.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77 #include "bnxt_gso.h"
78 #include <net/tso.h>
79
80 #define BNXT_TX_TIMEOUT (5 * HZ)
81 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
82 NETIF_MSG_TX_ERR)
83
84 MODULE_IMPORT_NS("NETDEV_INTERNAL");
85 MODULE_LICENSE("GPL");
86 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
87
88 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
89 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
90
91 #define BNXT_TX_PUSH_THRESH 164
92
93 /* indexed by enum board_idx */
94 static const struct {
95 char *name;
96 } board_info[] = {
97 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
98 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
99 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
100 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
101 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
102 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
103 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
104 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
105 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
106 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
108 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
109 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
110 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
111 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
112 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
113 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
114 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
115 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
116 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
117 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
118 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
119 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
120 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
121 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
122 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
123 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
124 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
125 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
126 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
127 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
128 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
129 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
130 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
131 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
132 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
133 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
134 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
135 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
136 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
137 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
138 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
139 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
140 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
141 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
142 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
143 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
144 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
145 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
146 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
147 [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
148 };
149
150 static const struct pci_device_id bnxt_pci_tbl[] = {
151 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
152 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
153 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
154 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
155 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
156 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
157 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
158 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
159 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
160 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
161 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
162 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
163 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
164 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
165 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
166 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
167 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
168 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
169 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
170 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
171 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
173 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
174 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
175 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
178 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
183 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
185 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
186 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
187 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
188 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
189 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
190 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
191 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
192 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
193 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
197 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
198 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
199 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
200 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
201 #ifdef CONFIG_BNXT_SRIOV
202 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
203 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
207 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
209 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
210 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
211 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
212 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
215 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
216 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
217 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
218 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
219 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
220 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
221 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
222 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
223 { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
224 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
225 #endif
226 { 0 }
227 };
228
229 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
230
231 static const u16 bnxt_vf_req_snif[] = {
232 HWRM_FUNC_CFG,
233 HWRM_FUNC_VF_CFG,
234 HWRM_PORT_PHY_QCFG,
235 HWRM_CFA_L2_FILTER_ALLOC,
236 };
237
238 static const u16 bnxt_async_events_arr[] = {
239 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
240 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
241 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
242 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
243 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
244 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
245 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
246 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
247 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
248 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
249 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
250 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
251 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
252 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
253 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
254 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
255 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
256 };
257
258 const u16 bnxt_bstore_to_trace[] = {
259 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
260 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
261 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
262 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
263 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
264 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
265 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
266 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
267 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
268 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
269 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
270 [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
271 [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
272 };
273
274 static struct workqueue_struct *bnxt_pf_wq;
275
276 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
277 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
278 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
279
280 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
281 .ports = {
282 .src = 0,
283 .dst = 0,
284 },
285 .addrs = {
286 .v6addrs = {
287 .src = BNXT_IPV6_MASK_NONE,
288 .dst = BNXT_IPV6_MASK_NONE,
289 },
290 },
291 };
292
293 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
294 .ports = {
295 .src = cpu_to_be16(0xffff),
296 .dst = cpu_to_be16(0xffff),
297 },
298 .addrs = {
299 .v6addrs = {
300 .src = BNXT_IPV6_MASK_ALL,
301 .dst = BNXT_IPV6_MASK_ALL,
302 },
303 },
304 };
305
306 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
307 .ports = {
308 .src = cpu_to_be16(0xffff),
309 .dst = cpu_to_be16(0xffff),
310 },
311 .addrs = {
312 .v4addrs = {
313 .src = cpu_to_be32(0xffffffff),
314 .dst = cpu_to_be32(0xffffffff),
315 },
316 },
317 };
318
bnxt_vf_pciid(enum board_idx idx)319 static bool bnxt_vf_pciid(enum board_idx idx)
320 {
321 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
322 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
323 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
324 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
325 idx == NETXTREME_E_P7_VF_HV);
326 }
327
328 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
329 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
330
331 #define BNXT_DB_CQ(db, idx) \
332 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
333
334 #define BNXT_DB_NQ_P5(db, idx) \
335 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
336 (db)->doorbell)
337
338 #define BNXT_DB_NQ_P7(db, idx) \
339 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
340 DB_RING_IDX(db, idx), (db)->doorbell)
341
342 #define BNXT_DB_CQ_ARM(db, idx) \
343 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
344
345 #define BNXT_DB_NQ_ARM_P5(db, idx) \
346 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
347 DB_RING_IDX(db, idx), (db)->doorbell)
348
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)349 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
350 {
351 if (bp->flags & BNXT_FLAG_CHIP_P7)
352 BNXT_DB_NQ_P7(db, idx);
353 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
354 BNXT_DB_NQ_P5(db, idx);
355 else
356 BNXT_DB_CQ(db, idx);
357 }
358
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)359 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
360 {
361 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
362 BNXT_DB_NQ_ARM_P5(db, idx);
363 else
364 BNXT_DB_CQ_ARM(db, idx);
365 }
366
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)367 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
368 {
369 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
370 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
371 DB_RING_IDX(db, idx), db->doorbell);
372 else
373 BNXT_DB_CQ(db, idx);
374 }
375
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)376 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
377 {
378 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
379 return;
380
381 if (BNXT_PF(bp))
382 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
383 else
384 schedule_delayed_work(&bp->fw_reset_task, delay);
385 }
386
__bnxt_queue_sp_work(struct bnxt * bp)387 static void __bnxt_queue_sp_work(struct bnxt *bp)
388 {
389 if (BNXT_PF(bp))
390 queue_work(bnxt_pf_wq, &bp->sp_task);
391 else
392 schedule_work(&bp->sp_task);
393 }
394
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)395 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
396 {
397 set_bit(event, &bp->sp_event);
398 __bnxt_queue_sp_work(bp);
399 }
400
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)401 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
402 {
403 if (!rxr->bnapi->in_reset) {
404 rxr->bnapi->in_reset = true;
405 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
406 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
407 else
408 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
409 __bnxt_queue_sp_work(bp);
410 }
411 rxr->rx_next_cons = 0xffff;
412 }
413
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)414 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
415 u16 curr)
416 {
417 struct bnxt_napi *bnapi = txr->bnapi;
418
419 if (bnapi->tx_fault)
420 return;
421
422 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
423 txr->txq_index, txr->tx_hw_cons,
424 txr->tx_cons, txr->tx_prod, curr);
425 WARN_ON_ONCE(1);
426 bnapi->tx_fault = 1;
427 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
428 }
429
430 const u16 bnxt_lhint_arr[] = {
431 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
432 TX_BD_FLAGS_LHINT_512_TO_1023,
433 TX_BD_FLAGS_LHINT_1024_TO_2047,
434 TX_BD_FLAGS_LHINT_1024_TO_2047,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
449 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
450 };
451
bnxt_xmit_get_cfa_action(struct sk_buff * skb)452 u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
453 {
454 struct metadata_dst *md_dst = skb_metadata_dst(skb);
455
456 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
457 return 0;
458
459 return md_dst->u.port_info.port_id;
460 }
461
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)462 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
463 u16 prod)
464 {
465 /* Sync BD data before updating doorbell */
466 wmb();
467 bnxt_db_write(bp, &txr->tx_db, prod);
468 txr->kick_pending = 0;
469 }
470
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)471 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
472 {
473 struct bnxt *bp = netdev_priv(dev);
474 struct tx_bd *txbd, *txbd0;
475 struct tx_bd_ext *txbd1;
476 struct netdev_queue *txq;
477 int i;
478 dma_addr_t mapping;
479 unsigned int length, pad = 0;
480 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
481 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
482 struct pci_dev *pdev = bp->pdev;
483 u16 prod, last_frag, txts_prod;
484 struct bnxt_tx_ring_info *txr;
485 struct bnxt_sw_tx_bd *tx_buf;
486 __le32 lflags = 0;
487 skb_frag_t *frag;
488
489 i = skb_get_queue_mapping(skb);
490 if (unlikely(i >= bp->tx_nr_rings)) {
491 dev_kfree_skb_any(skb);
492 dev_core_stats_tx_dropped_inc(dev);
493 return NETDEV_TX_OK;
494 }
495
496 txq = netdev_get_tx_queue(dev, i);
497 txr = &bp->tx_ring[bp->tx_ring_map[i]];
498 prod = txr->tx_prod;
499
500 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
501 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
502 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
503 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
504 if (skb_linearize(skb)) {
505 dev_kfree_skb_any(skb);
506 dev_core_stats_tx_dropped_inc(dev);
507 return NETDEV_TX_OK;
508 }
509 }
510 #endif
511 if (skb_is_gso(skb) &&
512 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) &&
513 !(bp->flags & BNXT_FLAG_UDP_GSO_CAP))
514 return bnxt_sw_udp_gso_xmit(bp, txr, txq, skb);
515
516 free_size = bnxt_tx_avail(bp, txr);
517 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
518 /* We must have raced with NAPI cleanup */
519 if (net_ratelimit() && txr->kick_pending)
520 netif_warn(bp, tx_err, dev,
521 "bnxt: ring busy w/ flush pending!\n");
522 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
523 bp->tx_wake_thresh))
524 return NETDEV_TX_BUSY;
525 }
526
527 length = skb->len;
528 len = skb_headlen(skb);
529 last_frag = skb_shinfo(skb)->nr_frags;
530
531 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
532
533 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
534 tx_buf->skb = skb;
535 tx_buf->nr_frags = last_frag;
536
537 vlan_tag_flags = 0;
538 cfa_action = bnxt_xmit_get_cfa_action(skb);
539 if (skb_vlan_tag_present(skb)) {
540 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
541 skb_vlan_tag_get(skb);
542 /* Currently supports 8021Q, 8021AD vlan offloads
543 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
544 */
545 if (skb->vlan_proto == htons(ETH_P_8021Q))
546 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
547 }
548
549 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
550 ptp->tx_tstamp_en) {
551 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
552 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
553 tx_buf->is_ts_pkt = 1;
554 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
555 } else if (!skb_is_gso(skb)) {
556 u16 seq_id, hdr_off;
557
558 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
559 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
560 if (vlan_tag_flags)
561 hdr_off += VLAN_HLEN;
562 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
563 tx_buf->is_ts_pkt = 1;
564 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
565
566 ptp->txts_req[txts_prod].tx_seqid = seq_id;
567 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
568 tx_buf->txts_prod = txts_prod;
569 }
570 }
571 }
572 if (unlikely(skb->no_fcs))
573 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
574
575 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
576 skb_frags_readable(skb) && !lflags) {
577 struct tx_push_buffer *tx_push_buf = txr->tx_push;
578 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
579 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
580 void __iomem *db = txr->tx_db.doorbell;
581 void *pdata = tx_push_buf->data;
582 u64 *end;
583 int j, push_len;
584
585 /* Set COAL_NOW to be ready quickly for the next push */
586 tx_push->tx_bd_len_flags_type =
587 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
588 TX_BD_TYPE_LONG_TX_BD |
589 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
590 TX_BD_FLAGS_COAL_NOW |
591 TX_BD_FLAGS_PACKET_END |
592 TX_BD_CNT(2));
593
594 if (skb->ip_summed == CHECKSUM_PARTIAL)
595 tx_push1->tx_bd_hsize_lflags =
596 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
597 else
598 tx_push1->tx_bd_hsize_lflags = 0;
599
600 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
601 tx_push1->tx_bd_cfa_action =
602 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
603
604 end = pdata + length;
605 end = PTR_ALIGN(end, 8) - 1;
606 *end = 0;
607
608 skb_copy_from_linear_data(skb, pdata, len);
609 pdata += len;
610 for (j = 0; j < last_frag; j++) {
611 void *fptr;
612
613 frag = &skb_shinfo(skb)->frags[j];
614 fptr = skb_frag_address_safe(frag);
615 if (!fptr)
616 goto normal_tx;
617
618 memcpy(pdata, fptr, skb_frag_size(frag));
619 pdata += skb_frag_size(frag);
620 }
621
622 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
623 txbd->tx_bd_haddr = txr->data_mapping;
624 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
625 prod = NEXT_TX(prod);
626 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
627 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
628 memcpy(txbd, tx_push1, sizeof(*txbd));
629 prod = NEXT_TX(prod);
630 tx_push->doorbell =
631 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
632 DB_RING_IDX(&txr->tx_db, prod));
633 WRITE_ONCE(txr->tx_prod, prod);
634
635 tx_buf->is_push = 1;
636 netdev_tx_sent_queue(txq, skb->len);
637 wmb(); /* Sync is_push and byte queue before pushing data */
638
639 push_len = (length + sizeof(*tx_push) + 7) / 8;
640 if (push_len > 16) {
641 __iowrite64_copy(db, tx_push_buf, 16);
642 __iowrite32_copy(db + 4, tx_push_buf + 1,
643 (push_len - 16) << 1);
644 } else {
645 __iowrite64_copy(db, tx_push_buf, push_len);
646 }
647
648 goto tx_done;
649 }
650
651 normal_tx:
652 if (length < BNXT_MIN_PKT_SIZE) {
653 pad = BNXT_MIN_PKT_SIZE - length;
654 if (skb_pad(skb, pad))
655 /* SKB already freed. */
656 goto tx_kick_pending;
657 length = BNXT_MIN_PKT_SIZE;
658 }
659
660 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
661
662 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
663 goto tx_free;
664
665 dma_unmap_addr_set(tx_buf, mapping, mapping);
666 dma_unmap_len_set(tx_buf, len, len);
667 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
668 TX_BD_CNT(last_frag + 2);
669
670 txbd->tx_bd_haddr = cpu_to_le64(mapping);
671 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
672
673 prod = NEXT_TX(prod);
674 txbd1 = bnxt_init_ext_bd(bp, txr, prod, lflags, vlan_tag_flags,
675 cfa_action);
676
677 if (skb_is_gso(skb)) {
678 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
679 u32 hdr_len;
680
681 if (skb->encapsulation) {
682 if (udp_gso)
683 hdr_len = skb_inner_transport_offset(skb) +
684 sizeof(struct udphdr);
685 else
686 hdr_len = skb_inner_tcp_all_headers(skb);
687 } else if (udp_gso) {
688 hdr_len = skb_transport_offset(skb) +
689 sizeof(struct udphdr);
690 } else {
691 hdr_len = skb_tcp_all_headers(skb);
692 }
693
694 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
695 TX_BD_FLAGS_T_IPID |
696 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
697 length = skb_shinfo(skb)->gso_size;
698 txbd1->tx_bd_mss = cpu_to_le32(length);
699 length += hdr_len;
700 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
701 txbd1->tx_bd_hsize_lflags |=
702 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
703 }
704
705 length >>= 9;
706 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
707 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
708 skb->len);
709 i = 0;
710 goto tx_dma_error;
711 }
712 flags |= bnxt_lhint_arr[length];
713 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
714
715 txbd0 = txbd;
716 for (i = 0; i < last_frag; i++) {
717 frag = &skb_shinfo(skb)->frags[i];
718 prod = NEXT_TX(prod);
719 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
720
721 len = skb_frag_size(frag);
722 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
723 DMA_TO_DEVICE);
724
725 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
726 goto tx_dma_error;
727
728 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
729 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
730 mapping, mapping);
731 dma_unmap_len_set(tx_buf, len, len);
732
733 txbd->tx_bd_haddr = cpu_to_le64(mapping);
734
735 flags = len << TX_BD_LEN_SHIFT;
736 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
737 }
738
739 flags &= ~TX_BD_LEN;
740 txbd->tx_bd_len_flags_type =
741 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
742 TX_BD_FLAGS_PACKET_END);
743
744 netdev_tx_sent_queue(txq, skb->len);
745
746 skb_tx_timestamp(skb);
747
748 prod = NEXT_TX(prod);
749 WRITE_ONCE(txr->tx_prod, prod);
750
751 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
752 bnxt_txr_db_kick(bp, txr, prod);
753 } else {
754 if (free_size >= bp->tx_wake_thresh)
755 txbd0->tx_bd_len_flags_type |=
756 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
757 txr->kick_pending = 1;
758 }
759
760 tx_done:
761
762 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
763 if (netdev_xmit_more() && !tx_buf->is_push) {
764 txbd0->tx_bd_len_flags_type &=
765 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
766 bnxt_txr_db_kick(bp, txr, prod);
767 }
768
769 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
770 bp->tx_wake_thresh);
771 }
772 return NETDEV_TX_OK;
773
774 tx_dma_error:
775 last_frag = i;
776
777 /* start back at beginning and unmap skb */
778 prod = txr->tx_prod;
779 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
780 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
781 skb_headlen(skb), DMA_TO_DEVICE);
782 prod = NEXT_TX(prod);
783
784 /* unmap remaining mapped pages */
785 for (i = 0; i < last_frag; i++) {
786 prod = NEXT_TX(prod);
787 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
788 frag = &skb_shinfo(skb)->frags[i];
789 netmem_dma_unmap_page_attrs(&pdev->dev,
790 dma_unmap_addr(tx_buf, mapping),
791 skb_frag_size(frag),
792 DMA_TO_DEVICE, 0);
793 }
794
795 tx_free:
796 dev_kfree_skb_any(skb);
797 tx_kick_pending:
798 if (BNXT_TX_PTP_IS_SET(lflags)) {
799 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
800 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
801 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
802 /* set SKB to err so PTP worker will clean up */
803 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
804 }
805 if (txr->kick_pending)
806 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
807 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
808 dev_core_stats_tx_dropped_inc(dev);
809 return NETDEV_TX_OK;
810 }
811
812 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)813 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
814 int budget)
815 {
816 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
817 struct pci_dev *pdev = bp->pdev;
818 u16 hw_cons = txr->tx_hw_cons;
819 unsigned int tx_bytes = 0;
820 u16 cons = txr->tx_cons;
821 unsigned int dma_len;
822 dma_addr_t dma_addr;
823 int tx_pkts = 0;
824 bool rc = false;
825
826 while (RING_TX(bp, cons) != hw_cons) {
827 struct bnxt_sw_tx_bd *tx_buf, *head_buf;
828 struct sk_buff *skb;
829 bool is_ts_pkt;
830 int j, last;
831
832 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
833 head_buf = tx_buf;
834 skb = tx_buf->skb;
835
836 if (unlikely(!skb)) {
837 bnxt_sched_reset_txr(bp, txr, cons);
838 return rc;
839 }
840
841 is_ts_pkt = tx_buf->is_ts_pkt;
842 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
843 rc = true;
844 break;
845 }
846
847 cons = NEXT_TX(cons);
848 tx_pkts++;
849 tx_bytes += skb->len;
850 tx_buf->skb = NULL;
851 tx_buf->is_ts_pkt = 0;
852
853 if (tx_buf->is_push) {
854 tx_buf->is_push = 0;
855 goto next_tx_int;
856 }
857
858 if (dma_unmap_len(tx_buf, len)) {
859 dma_addr = dma_unmap_addr(tx_buf, mapping);
860 dma_len = dma_unmap_len(tx_buf, len);
861
862 dma_unmap_single(&pdev->dev, dma_addr, dma_len,
863 DMA_TO_DEVICE);
864 }
865
866 last = tx_buf->nr_frags;
867
868 for (j = 0; j < last; j++) {
869 cons = NEXT_TX(cons);
870 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
871 if (dma_unmap_len(tx_buf, len)) {
872 dma_addr = dma_unmap_addr(tx_buf, mapping);
873 dma_len = dma_unmap_len(tx_buf, len);
874
875 netmem_dma_unmap_page_attrs(&pdev->dev,
876 dma_addr, dma_len,
877 DMA_TO_DEVICE, 0);
878 }
879 }
880
881 if (unlikely(head_buf->is_sw_gso)) {
882 u16 inline_cons = txr->tx_inline_cons + 1;
883
884 WRITE_ONCE(txr->tx_inline_cons, inline_cons);
885 if (head_buf->is_sw_gso == BNXT_SW_GSO_LAST) {
886 tso_dma_map_complete(&pdev->dev,
887 &head_buf->sw_gso_cstate);
888 } else {
889 tx_pkts--;
890 tx_bytes -= skb->len;
891 skb = NULL;
892 }
893 head_buf->is_sw_gso = 0;
894 }
895
896 if (unlikely(is_ts_pkt)) {
897 if (BNXT_CHIP_P5(bp)) {
898 /* PTP worker takes ownership of the skb */
899 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
900 skb = NULL;
901 }
902 }
903
904 next_tx_int:
905 cons = NEXT_TX(cons);
906
907 napi_consume_skb(skb, budget);
908 }
909
910 WRITE_ONCE(txr->tx_cons, cons);
911
912 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
913 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
914 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
915
916 return rc;
917 }
918
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)919 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
920 {
921 struct bnxt_tx_ring_info *txr;
922 bool more = false;
923 int i;
924
925 bnxt_for_each_napi_tx(i, bnapi, txr) {
926 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
927 more |= __bnxt_tx_int(bp, txr, budget);
928 }
929 if (!more)
930 bnapi->events &= ~BNXT_TX_CMP_EVENT;
931 }
932
bnxt_separate_head_pool(struct bnxt_rx_ring_info * rxr)933 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
934 {
935 return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
936 }
937
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)938 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
939 struct bnxt_rx_ring_info *rxr,
940 unsigned int *offset,
941 gfp_t gfp)
942 {
943 struct page *page;
944
945 if (rxr->rx_page_size < PAGE_SIZE) {
946 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
947 rxr->rx_page_size);
948 } else {
949 page = page_pool_dev_alloc_pages(rxr->page_pool);
950 *offset = 0;
951 }
952 if (!page)
953 return NULL;
954
955 *mapping = page_pool_get_dma_addr(page) + *offset;
956 return page;
957 }
958
__bnxt_alloc_rx_netmem(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)959 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
960 struct bnxt_rx_ring_info *rxr,
961 unsigned int *offset,
962 gfp_t gfp)
963 {
964 netmem_ref netmem;
965
966 if (rxr->rx_page_size < PAGE_SIZE) {
967 netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
968 rxr->rx_page_size, gfp);
969 } else {
970 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
971 *offset = 0;
972 }
973 if (!netmem)
974 return 0;
975
976 *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
977 return netmem;
978 }
979
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)980 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
981 struct bnxt_rx_ring_info *rxr,
982 gfp_t gfp)
983 {
984 unsigned int offset;
985 struct page *page;
986
987 page = page_pool_alloc_frag(rxr->head_pool, &offset,
988 bp->rx_buf_size, gfp);
989 if (!page)
990 return NULL;
991
992 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
993 return page_address(page) + offset;
994 }
995
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)996 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
997 u16 prod, gfp_t gfp)
998 {
999 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1000 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1001 dma_addr_t mapping;
1002
1003 if (BNXT_RX_PAGE_MODE(bp)) {
1004 unsigned int offset;
1005 struct page *page =
1006 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
1007
1008 if (!page)
1009 return -ENOMEM;
1010
1011 mapping += bp->rx_dma_offset;
1012 rx_buf->data = page;
1013 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
1014 } else {
1015 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
1016
1017 if (!data)
1018 return -ENOMEM;
1019
1020 rx_buf->data = data;
1021 rx_buf->data_ptr = data + bp->rx_offset;
1022 }
1023 rx_buf->mapping = mapping;
1024
1025 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1026 return 0;
1027 }
1028
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)1029 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1030 {
1031 u16 prod = rxr->rx_prod;
1032 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1033 struct bnxt *bp = rxr->bnapi->bp;
1034 struct rx_bd *cons_bd, *prod_bd;
1035
1036 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1037 cons_rx_buf = &rxr->rx_buf_ring[cons];
1038
1039 prod_rx_buf->data = data;
1040 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1041
1042 prod_rx_buf->mapping = cons_rx_buf->mapping;
1043
1044 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1045 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1046
1047 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1048 }
1049
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1050 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1051 {
1052 u16 next, max = rxr->rx_agg_bmap_size;
1053
1054 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1055 if (next >= max)
1056 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1057 return next;
1058 }
1059
bnxt_alloc_rx_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)1060 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1061 u16 prod, gfp_t gfp)
1062 {
1063 struct rx_bd *rxbd =
1064 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1065 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1066 u16 sw_prod = rxr->rx_sw_agg_prod;
1067 unsigned int offset = 0;
1068 dma_addr_t mapping;
1069 netmem_ref netmem;
1070
1071 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1072 if (!netmem)
1073 return -ENOMEM;
1074
1075 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1076 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1077
1078 __set_bit(sw_prod, rxr->rx_agg_bmap);
1079 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1080 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1081
1082 rx_agg_buf->netmem = netmem;
1083 rx_agg_buf->offset = offset;
1084 rx_agg_buf->mapping = mapping;
1085 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1086 rxbd->rx_bd_opaque = sw_prod;
1087 return 0;
1088 }
1089
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1090 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1091 struct bnxt_cp_ring_info *cpr,
1092 u16 cp_cons, u16 curr)
1093 {
1094 struct rx_agg_cmp *agg;
1095
1096 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1097 agg = (struct rx_agg_cmp *)
1098 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1099 return agg;
1100 }
1101
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1102 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1103 struct bnxt_rx_ring_info *rxr,
1104 u16 agg_id, u16 curr)
1105 {
1106 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1107
1108 return &tpa_info->agg_arr[curr];
1109 }
1110
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1111 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1112 u16 start, u32 agg_bufs, bool tpa)
1113 {
1114 struct bnxt_napi *bnapi = cpr->bnapi;
1115 struct bnxt *bp = bnapi->bp;
1116 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1117 u16 prod = rxr->rx_agg_prod;
1118 u16 sw_prod = rxr->rx_sw_agg_prod;
1119 bool p5_tpa = false;
1120 u32 i;
1121
1122 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1123 p5_tpa = true;
1124
1125 for (i = 0; i < agg_bufs; i++) {
1126 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1127 struct rx_agg_cmp *agg;
1128 struct rx_bd *prod_bd;
1129 netmem_ref netmem;
1130 u16 cons;
1131
1132 if (p5_tpa)
1133 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1134 else
1135 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1136 cons = agg->rx_agg_cmp_opaque;
1137 __clear_bit(cons, rxr->rx_agg_bmap);
1138
1139 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1140 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1141
1142 __set_bit(sw_prod, rxr->rx_agg_bmap);
1143 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1144 cons_rx_buf = &rxr->rx_agg_ring[cons];
1145
1146 /* It is possible for sw_prod to be equal to cons, so
1147 * set cons_rx_buf->netmem to 0 first.
1148 */
1149 netmem = cons_rx_buf->netmem;
1150 cons_rx_buf->netmem = 0;
1151 prod_rx_buf->netmem = netmem;
1152 prod_rx_buf->offset = cons_rx_buf->offset;
1153
1154 prod_rx_buf->mapping = cons_rx_buf->mapping;
1155
1156 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1157
1158 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1159 prod_bd->rx_bd_opaque = sw_prod;
1160
1161 prod = NEXT_RX_AGG(prod);
1162 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1163 }
1164 rxr->rx_agg_prod = prod;
1165 rxr->rx_sw_agg_prod = sw_prod;
1166 }
1167
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1168 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1169 struct bnxt_rx_ring_info *rxr,
1170 u16 cons, void *data, u8 *data_ptr,
1171 dma_addr_t dma_addr,
1172 unsigned int offset_and_len)
1173 {
1174 unsigned int len = offset_and_len & 0xffff;
1175 struct page *page = data;
1176 u16 prod = rxr->rx_prod;
1177 struct sk_buff *skb;
1178 int err;
1179
1180 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1181 if (unlikely(err)) {
1182 bnxt_reuse_rx_data(rxr, cons, data);
1183 return NULL;
1184 }
1185 dma_addr -= bp->rx_dma_offset;
1186 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1187 bp->rx_dir);
1188 skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
1189 if (!skb) {
1190 page_pool_recycle_direct(rxr->page_pool, page);
1191 return NULL;
1192 }
1193 skb_mark_for_recycle(skb);
1194 skb_reserve(skb, bp->rx_offset);
1195 __skb_put(skb, len);
1196
1197 return skb;
1198 }
1199
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1200 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1201 struct bnxt_rx_ring_info *rxr,
1202 u16 cons, void *data, u8 *data_ptr,
1203 dma_addr_t dma_addr,
1204 unsigned int offset_and_len)
1205 {
1206 unsigned int payload = offset_and_len >> 16;
1207 unsigned int len = offset_and_len & 0xffff;
1208 skb_frag_t *frag;
1209 struct page *page = data;
1210 u16 prod = rxr->rx_prod;
1211 struct sk_buff *skb;
1212 int off, err;
1213
1214 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1215 if (unlikely(err)) {
1216 bnxt_reuse_rx_data(rxr, cons, data);
1217 return NULL;
1218 }
1219 dma_addr -= bp->rx_dma_offset;
1220 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1221 bp->rx_dir);
1222
1223 if (unlikely(!payload))
1224 payload = eth_get_headlen(bp->dev, data_ptr, len);
1225
1226 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1227 if (!skb) {
1228 page_pool_recycle_direct(rxr->page_pool, page);
1229 return NULL;
1230 }
1231
1232 skb_mark_for_recycle(skb);
1233 off = (void *)data_ptr - page_address(page);
1234 skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
1235 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1236 payload + NET_IP_ALIGN);
1237
1238 frag = &skb_shinfo(skb)->frags[0];
1239 skb_frag_size_sub(frag, payload);
1240 skb_frag_off_add(frag, payload);
1241 skb->data_len -= payload;
1242 skb->tail += payload;
1243
1244 return skb;
1245 }
1246
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1247 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1248 struct bnxt_rx_ring_info *rxr, u16 cons,
1249 void *data, u8 *data_ptr,
1250 dma_addr_t dma_addr,
1251 unsigned int offset_and_len)
1252 {
1253 u16 prod = rxr->rx_prod;
1254 struct sk_buff *skb;
1255 int err;
1256
1257 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1258 if (unlikely(err)) {
1259 bnxt_reuse_rx_data(rxr, cons, data);
1260 return NULL;
1261 }
1262
1263 skb = napi_build_skb(data, bp->rx_buf_size);
1264 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1265 bp->rx_dir);
1266 if (!skb) {
1267 page_pool_free_va(rxr->head_pool, data, true);
1268 return NULL;
1269 }
1270
1271 skb_mark_for_recycle(skb);
1272 skb_reserve(skb, bp->rx_offset);
1273 skb_put(skb, offset_and_len & 0xffff);
1274 return skb;
1275 }
1276
__bnxt_rx_agg_netmems(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs,bool tpa,struct sk_buff * skb,struct xdp_buff * xdp)1277 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1278 struct bnxt_cp_ring_info *cpr,
1279 u16 idx, u32 agg_bufs, bool tpa,
1280 struct sk_buff *skb,
1281 struct xdp_buff *xdp)
1282 {
1283 struct bnxt_napi *bnapi = cpr->bnapi;
1284 struct skb_shared_info *shinfo;
1285 struct bnxt_rx_ring_info *rxr;
1286 u32 i, total_frag_len = 0;
1287 bool p5_tpa = false;
1288 u16 prod;
1289
1290 rxr = bnapi->rx_ring;
1291 prod = rxr->rx_agg_prod;
1292
1293 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1294 p5_tpa = true;
1295
1296 if (skb)
1297 shinfo = skb_shinfo(skb);
1298 else
1299 shinfo = xdp_get_shared_info_from_buff(xdp);
1300
1301 for (i = 0; i < agg_bufs; i++) {
1302 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1303 struct rx_agg_cmp *agg;
1304 u16 cons, frag_len;
1305 netmem_ref netmem;
1306
1307 if (p5_tpa)
1308 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1309 else
1310 agg = bnxt_get_agg(bp, cpr, idx, i);
1311 cons = agg->rx_agg_cmp_opaque;
1312 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1313 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1314
1315 cons_rx_buf = &rxr->rx_agg_ring[cons];
1316 if (skb) {
1317 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1318 cons_rx_buf->offset,
1319 frag_len, rxr->rx_page_size);
1320 } else {
1321 skb_frag_t *frag = &shinfo->frags[i];
1322
1323 skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1324 cons_rx_buf->offset,
1325 frag_len);
1326 shinfo->nr_frags = i + 1;
1327 }
1328 __clear_bit(cons, rxr->rx_agg_bmap);
1329
1330 /* It is possible for bnxt_alloc_rx_netmem() to allocate
1331 * a sw_prod index that equals the cons index, so we
1332 * need to clear the cons entry now.
1333 */
1334 netmem = cons_rx_buf->netmem;
1335 cons_rx_buf->netmem = 0;
1336
1337 if (xdp && netmem_is_pfmemalloc(netmem))
1338 xdp_buff_set_frag_pfmemalloc(xdp);
1339
1340 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1341 if (skb) {
1342 skb->len -= frag_len;
1343 skb->data_len -= frag_len;
1344 skb->truesize -= rxr->rx_page_size;
1345 }
1346
1347 --shinfo->nr_frags;
1348 cons_rx_buf->netmem = netmem;
1349
1350 /* Update prod since possibly some netmems have been
1351 * allocated already.
1352 */
1353 rxr->rx_agg_prod = prod;
1354 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1355 return 0;
1356 }
1357
1358 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1359 rxr->rx_page_size);
1360
1361 total_frag_len += frag_len;
1362 prod = NEXT_RX_AGG(prod);
1363 }
1364 rxr->rx_agg_prod = prod;
1365 return total_frag_len;
1366 }
1367
bnxt_rx_agg_netmems_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1368 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1369 struct bnxt_cp_ring_info *cpr,
1370 struct sk_buff *skb, u16 idx,
1371 u32 agg_bufs, bool tpa)
1372 {
1373 u32 total_frag_len = 0;
1374
1375 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1376 skb, NULL);
1377 if (!total_frag_len) {
1378 skb_mark_for_recycle(skb);
1379 dev_kfree_skb(skb);
1380 return NULL;
1381 }
1382
1383 return skb;
1384 }
1385
bnxt_rx_agg_netmems_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1386 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1387 struct bnxt_cp_ring_info *cpr,
1388 struct xdp_buff *xdp, u16 idx,
1389 u32 agg_bufs, bool tpa)
1390 {
1391 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1392 u32 total_frag_len = 0;
1393
1394 if (!xdp_buff_has_frags(xdp))
1395 shinfo->nr_frags = 0;
1396
1397 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1398 NULL, xdp);
1399 if (total_frag_len) {
1400 xdp_buff_set_frags_flag(xdp);
1401 shinfo->nr_frags = agg_bufs;
1402 shinfo->xdp_frags_size = total_frag_len;
1403 }
1404 return total_frag_len;
1405 }
1406
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1407 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1408 u8 agg_bufs, u32 *raw_cons)
1409 {
1410 u16 last;
1411 struct rx_agg_cmp *agg;
1412
1413 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1414 last = RING_CMP(*raw_cons);
1415 agg = (struct rx_agg_cmp *)
1416 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1417 return RX_AGG_CMP_VALID(agg, *raw_cons);
1418 }
1419
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1420 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1421 unsigned int len,
1422 dma_addr_t mapping)
1423 {
1424 struct bnxt *bp = bnapi->bp;
1425 struct pci_dev *pdev = bp->pdev;
1426 struct sk_buff *skb;
1427
1428 skb = napi_alloc_skb(&bnapi->napi, len);
1429 if (!skb)
1430 return NULL;
1431
1432 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1433 bp->rx_dir);
1434
1435 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1436 len + NET_IP_ALIGN);
1437
1438 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1439 bp->rx_dir);
1440
1441 skb_put(skb, len);
1442
1443 return skb;
1444 }
1445
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1446 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1447 unsigned int len,
1448 dma_addr_t mapping)
1449 {
1450 return bnxt_copy_data(bnapi, data, len, mapping);
1451 }
1452
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1453 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1454 struct xdp_buff *xdp,
1455 unsigned int len,
1456 dma_addr_t mapping)
1457 {
1458 unsigned int metasize = 0;
1459 u8 *data = xdp->data;
1460 struct sk_buff *skb;
1461
1462 len = xdp->data_end - xdp->data_meta;
1463 metasize = xdp->data - xdp->data_meta;
1464 data = xdp->data_meta;
1465
1466 skb = bnxt_copy_data(bnapi, data, len, mapping);
1467 if (!skb)
1468 return skb;
1469
1470 if (metasize) {
1471 skb_metadata_set(skb, metasize);
1472 __skb_pull(skb, metasize);
1473 }
1474
1475 return skb;
1476 }
1477
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1478 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1479 u32 *raw_cons, void *cmp)
1480 {
1481 struct rx_cmp *rxcmp = cmp;
1482 u32 tmp_raw_cons = *raw_cons;
1483 u8 cmp_type, agg_bufs = 0;
1484
1485 cmp_type = RX_CMP_TYPE(rxcmp);
1486
1487 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1488 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1489 RX_CMP_AGG_BUFS) >>
1490 RX_CMP_AGG_BUFS_SHIFT;
1491 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1492 struct rx_tpa_end_cmp *tpa_end = cmp;
1493
1494 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1495 return 0;
1496
1497 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1498 }
1499
1500 if (agg_bufs) {
1501 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1502 return -EBUSY;
1503 }
1504 *raw_cons = tmp_raw_cons;
1505 return 0;
1506 }
1507
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1508 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1509 {
1510 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1511 u16 idx = agg_id & MAX_TPA_P5_MASK;
1512
1513 if (test_bit(idx, map->agg_idx_bmap)) {
1514 idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
1515 if (idx >= MAX_TPA_P5)
1516 return INVALID_HW_RING_ID;
1517 }
1518 __set_bit(idx, map->agg_idx_bmap);
1519 map->agg_id_tbl[agg_id] = idx;
1520 return idx;
1521 }
1522
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1523 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1524 {
1525 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1526
1527 __clear_bit(idx, map->agg_idx_bmap);
1528 }
1529
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1530 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1531 {
1532 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1533
1534 return map->agg_id_tbl[agg_id];
1535 }
1536
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1537 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1538 struct rx_tpa_start_cmp *tpa_start,
1539 struct rx_tpa_start_cmp_ext *tpa_start1)
1540 {
1541 tpa_info->cfa_code_valid = 1;
1542 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1543 tpa_info->vlan_valid = 0;
1544 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1545 tpa_info->vlan_valid = 1;
1546 tpa_info->metadata =
1547 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1548 }
1549 }
1550
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1551 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1552 struct rx_tpa_start_cmp *tpa_start,
1553 struct rx_tpa_start_cmp_ext *tpa_start1)
1554 {
1555 tpa_info->vlan_valid = 0;
1556 if (TPA_START_VLAN_VALID(tpa_start)) {
1557 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1558 u32 vlan_proto = ETH_P_8021Q;
1559
1560 tpa_info->vlan_valid = 1;
1561 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1562 vlan_proto = ETH_P_8021AD;
1563 tpa_info->metadata = vlan_proto << 16 |
1564 TPA_START_METADATA0_TCI(tpa_start1);
1565 }
1566 }
1567
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1568 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1569 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1570 struct rx_tpa_start_cmp_ext *tpa_start1)
1571 {
1572 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1573 struct bnxt_tpa_info *tpa_info;
1574 u16 cons, prod, agg_id;
1575 struct rx_bd *prod_bd;
1576 dma_addr_t mapping;
1577
1578 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1579 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1580 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1581 if (unlikely(agg_id == INVALID_HW_RING_ID)) {
1582 netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
1583 rxr->bnapi->index,
1584 TPA_START_AGG_ID_P5(tpa_start));
1585 bnxt_sched_reset_rxr(bp, rxr);
1586 return;
1587 }
1588 } else {
1589 agg_id = TPA_START_AGG_ID(tpa_start);
1590 }
1591 cons = tpa_start->rx_tpa_start_cmp_opaque;
1592 prod = rxr->rx_prod;
1593 cons_rx_buf = &rxr->rx_buf_ring[cons];
1594 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1595 tpa_info = &rxr->rx_tpa[agg_id];
1596
1597 if (unlikely(cons != rxr->rx_next_cons ||
1598 TPA_START_ERROR(tpa_start))) {
1599 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1600 cons, rxr->rx_next_cons,
1601 TPA_START_ERROR_CODE(tpa_start1));
1602 bnxt_sched_reset_rxr(bp, rxr);
1603 return;
1604 }
1605 prod_rx_buf->data = tpa_info->data;
1606 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1607
1608 mapping = tpa_info->mapping;
1609 prod_rx_buf->mapping = mapping;
1610
1611 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1612
1613 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1614
1615 tpa_info->data = cons_rx_buf->data;
1616 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1617 cons_rx_buf->data = NULL;
1618 tpa_info->mapping = cons_rx_buf->mapping;
1619
1620 tpa_info->len =
1621 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1622 RX_TPA_START_CMP_LEN_SHIFT;
1623 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1624 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1625 tpa_info->gso_type = SKB_GSO_TCPV4;
1626 if (TPA_START_IS_IPV6(tpa_start1))
1627 tpa_info->gso_type = SKB_GSO_TCPV6;
1628 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1629 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1630 TPA_START_HASH_TYPE(tpa_start) == 3)
1631 tpa_info->gso_type = SKB_GSO_TCPV6;
1632 tpa_info->rss_hash =
1633 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1634 } else {
1635 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1636 tpa_info->gso_type = 0;
1637 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1638 }
1639 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1640 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1641 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1642 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1643 else
1644 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1645 tpa_info->agg_count = 0;
1646
1647 rxr->rx_prod = NEXT_RX(prod);
1648 cons = RING_RX(bp, NEXT_RX(cons));
1649 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1650 cons_rx_buf = &rxr->rx_buf_ring[cons];
1651
1652 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1653 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1654 cons_rx_buf->data = NULL;
1655 }
1656
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1657 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1658 {
1659 if (agg_bufs)
1660 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1661 }
1662
1663 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1664 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1665 {
1666 struct udphdr *uh = NULL;
1667
1668 if (ip_proto == htons(ETH_P_IP)) {
1669 struct iphdr *iph = (struct iphdr *)skb->data;
1670
1671 if (iph->protocol == IPPROTO_UDP)
1672 uh = (struct udphdr *)(iph + 1);
1673 } else {
1674 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1675
1676 if (iph->nexthdr == IPPROTO_UDP)
1677 uh = (struct udphdr *)(iph + 1);
1678 }
1679 if (uh) {
1680 if (uh->check)
1681 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1682 else
1683 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1684 }
1685 }
1686 #endif
1687
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1688 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1689 int payload_off, int tcp_ts,
1690 struct sk_buff *skb)
1691 {
1692 #ifdef CONFIG_INET
1693 struct tcphdr *th;
1694 int len, nw_off;
1695 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1696 u32 hdr_info = tpa_info->hdr_info;
1697 bool loopback = false;
1698
1699 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1700 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1701 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1702
1703 /* If the packet is an internal loopback packet, the offsets will
1704 * have an extra 4 bytes.
1705 */
1706 if (inner_mac_off == 4) {
1707 loopback = true;
1708 } else if (inner_mac_off > 4) {
1709 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1710 ETH_HLEN - 2));
1711
1712 /* We only support inner iPv4/ipv6. If we don't see the
1713 * correct protocol ID, it must be a loopback packet where
1714 * the offsets are off by 4.
1715 */
1716 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1717 loopback = true;
1718 }
1719 if (loopback) {
1720 /* internal loopback packet, subtract all offsets by 4 */
1721 inner_ip_off -= 4;
1722 inner_mac_off -= 4;
1723 outer_ip_off -= 4;
1724 }
1725
1726 nw_off = inner_ip_off - ETH_HLEN;
1727 skb_set_network_header(skb, nw_off);
1728 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1729 struct ipv6hdr *iph = ipv6_hdr(skb);
1730
1731 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1732 len = skb->len - skb_transport_offset(skb);
1733 th = tcp_hdr(skb);
1734 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1735 } else {
1736 struct iphdr *iph = ip_hdr(skb);
1737
1738 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1739 len = skb->len - skb_transport_offset(skb);
1740 th = tcp_hdr(skb);
1741 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1742 }
1743
1744 if (inner_mac_off) { /* tunnel */
1745 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1746 ETH_HLEN - 2));
1747
1748 bnxt_gro_tunnel(skb, proto);
1749 }
1750 #endif
1751 return skb;
1752 }
1753
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1754 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1755 int payload_off, int tcp_ts,
1756 struct sk_buff *skb)
1757 {
1758 #ifdef CONFIG_INET
1759 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1760 u32 hdr_info = tpa_info->hdr_info;
1761 int iphdr_len, nw_off;
1762
1763 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1764 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1765 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1766
1767 nw_off = inner_ip_off - ETH_HLEN;
1768 skb_set_network_header(skb, nw_off);
1769 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1770 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1771 skb_set_transport_header(skb, nw_off + iphdr_len);
1772
1773 if (inner_mac_off) { /* tunnel */
1774 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1775 ETH_HLEN - 2));
1776
1777 bnxt_gro_tunnel(skb, proto);
1778 }
1779 #endif
1780 return skb;
1781 }
1782
1783 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1784 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1785
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1786 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1787 int payload_off, int tcp_ts,
1788 struct sk_buff *skb)
1789 {
1790 #ifdef CONFIG_INET
1791 struct tcphdr *th;
1792 int len, nw_off, tcp_opt_len = 0;
1793
1794 if (tcp_ts)
1795 tcp_opt_len = 12;
1796
1797 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1798 struct iphdr *iph;
1799
1800 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1801 ETH_HLEN;
1802 skb_set_network_header(skb, nw_off);
1803 iph = ip_hdr(skb);
1804 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1805 len = skb->len - skb_transport_offset(skb);
1806 th = tcp_hdr(skb);
1807 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1808 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1809 struct ipv6hdr *iph;
1810
1811 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1812 ETH_HLEN;
1813 skb_set_network_header(skb, nw_off);
1814 iph = ipv6_hdr(skb);
1815 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1816 len = skb->len - skb_transport_offset(skb);
1817 th = tcp_hdr(skb);
1818 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1819 } else {
1820 dev_kfree_skb_any(skb);
1821 return NULL;
1822 }
1823
1824 if (nw_off) /* tunnel */
1825 bnxt_gro_tunnel(skb, skb->protocol);
1826 #endif
1827 return skb;
1828 }
1829
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb,struct bnxt_rx_sw_stats * rx_stats)1830 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1831 struct bnxt_tpa_info *tpa_info,
1832 struct rx_tpa_end_cmp *tpa_end,
1833 struct rx_tpa_end_cmp_ext *tpa_end1,
1834 struct sk_buff *skb,
1835 struct bnxt_rx_sw_stats *rx_stats)
1836 {
1837 #ifdef CONFIG_INET
1838 int payload_off;
1839 u16 segs;
1840
1841 segs = TPA_END_TPA_SEGS(tpa_end);
1842 if (segs == 1)
1843 return skb;
1844
1845 rx_stats->rx_hw_gro_packets++;
1846 rx_stats->rx_hw_gro_wire_packets += segs;
1847
1848 NAPI_GRO_CB(skb)->count = segs;
1849 skb_shinfo(skb)->gso_size =
1850 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1851 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1852 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1853 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1854 else
1855 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1856 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1857 if (likely(skb))
1858 tcp_gro_complete(skb);
1859 #endif
1860 return skb;
1861 }
1862
1863 /* Given the cfa_code of a received packet determine which
1864 * netdev (vf-rep or PF) the packet is destined to.
1865 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1866 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1867 {
1868 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1869
1870 /* if vf-rep dev is NULL, it must belong to the PF */
1871 return dev ? dev : bp->dev;
1872 }
1873
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1874 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1875 struct bnxt_cp_ring_info *cpr,
1876 u32 *raw_cons,
1877 struct rx_tpa_end_cmp *tpa_end,
1878 struct rx_tpa_end_cmp_ext *tpa_end1,
1879 u8 *event)
1880 {
1881 struct bnxt_napi *bnapi = cpr->bnapi;
1882 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1883 struct net_device *dev = bp->dev;
1884 u8 *data_ptr, agg_bufs;
1885 unsigned int len;
1886 struct bnxt_tpa_info *tpa_info;
1887 dma_addr_t mapping;
1888 struct sk_buff *skb;
1889 u16 idx = 0, agg_id;
1890 void *data;
1891 bool gro;
1892
1893 if (unlikely(bnapi->in_reset)) {
1894 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1895
1896 if (rc < 0)
1897 return ERR_PTR(-EBUSY);
1898 return NULL;
1899 }
1900
1901 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1902 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1903 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1904 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1905 tpa_info = &rxr->rx_tpa[agg_id];
1906 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1907 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1908 agg_bufs, tpa_info->agg_count);
1909 agg_bufs = tpa_info->agg_count;
1910 }
1911 tpa_info->agg_count = 0;
1912 *event |= BNXT_AGG_EVENT;
1913 bnxt_free_agg_idx(rxr, agg_id);
1914 idx = agg_id;
1915 gro = !!(bp->flags & BNXT_FLAG_GRO);
1916 } else {
1917 agg_id = TPA_END_AGG_ID(tpa_end);
1918 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1919 tpa_info = &rxr->rx_tpa[agg_id];
1920 idx = RING_CMP(*raw_cons);
1921 if (agg_bufs) {
1922 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1923 return ERR_PTR(-EBUSY);
1924
1925 *event |= BNXT_AGG_EVENT;
1926 idx = NEXT_CMP(idx);
1927 }
1928 gro = !!TPA_END_GRO(tpa_end);
1929 }
1930 data = tpa_info->data;
1931 data_ptr = tpa_info->data_ptr;
1932 prefetch(data_ptr);
1933 len = tpa_info->len;
1934 mapping = tpa_info->mapping;
1935
1936 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1937 bnxt_abort_tpa(cpr, idx, agg_bufs);
1938 if (agg_bufs > MAX_SKB_FRAGS)
1939 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1940 agg_bufs, (int)MAX_SKB_FRAGS);
1941 return NULL;
1942 }
1943
1944 if (len <= bp->rx_copybreak) {
1945 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1946 if (!skb) {
1947 bnxt_abort_tpa(cpr, idx, agg_bufs);
1948 cpr->sw_stats->rx.rx_oom_discards += 1;
1949 return NULL;
1950 }
1951 } else {
1952 u8 *new_data;
1953 dma_addr_t new_mapping;
1954
1955 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1956 GFP_ATOMIC);
1957 if (!new_data) {
1958 bnxt_abort_tpa(cpr, idx, agg_bufs);
1959 cpr->sw_stats->rx.rx_oom_discards += 1;
1960 return NULL;
1961 }
1962
1963 tpa_info->data = new_data;
1964 tpa_info->data_ptr = new_data + bp->rx_offset;
1965 tpa_info->mapping = new_mapping;
1966
1967 skb = napi_build_skb(data, bp->rx_buf_size);
1968 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1969 bp->rx_buf_use_size, bp->rx_dir);
1970
1971 if (!skb) {
1972 page_pool_free_va(rxr->head_pool, data, true);
1973 bnxt_abort_tpa(cpr, idx, agg_bufs);
1974 cpr->sw_stats->rx.rx_oom_discards += 1;
1975 return NULL;
1976 }
1977 skb_mark_for_recycle(skb);
1978 skb_reserve(skb, bp->rx_offset);
1979 skb_put(skb, len);
1980 }
1981
1982 if (agg_bufs) {
1983 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1984 true);
1985 if (!skb) {
1986 /* Page reuse already handled by bnxt_rx_pages(). */
1987 cpr->sw_stats->rx.rx_oom_discards += 1;
1988 return NULL;
1989 }
1990 }
1991
1992 if (tpa_info->cfa_code_valid)
1993 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1994 skb->protocol = eth_type_trans(skb, dev);
1995
1996 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1997 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1998
1999 if (tpa_info->vlan_valid &&
2000 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
2001 __be16 vlan_proto = htons(tpa_info->metadata >>
2002 RX_CMP_FLAGS2_METADATA_TPID_SFT);
2003 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2004
2005 if (eth_type_vlan(vlan_proto)) {
2006 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2007 } else {
2008 dev_kfree_skb(skb);
2009 return NULL;
2010 }
2011 }
2012
2013 skb_checksum_none_assert(skb);
2014 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
2015 skb->ip_summed = CHECKSUM_UNNECESSARY;
2016 skb->csum_level =
2017 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
2018 }
2019
2020 if (gro)
2021 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb,
2022 &cpr->sw_stats->rx);
2023
2024 return skb;
2025 }
2026
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)2027 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
2028 struct rx_agg_cmp *rx_agg)
2029 {
2030 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
2031 struct bnxt_tpa_info *tpa_info;
2032
2033 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
2034 tpa_info = &rxr->rx_tpa[agg_id];
2035 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
2036 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
2037 }
2038
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)2039 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
2040 struct sk_buff *skb)
2041 {
2042 skb_mark_for_recycle(skb);
2043
2044 if (skb->dev != bp->dev) {
2045 /* this packet belongs to a vf-rep */
2046 bnxt_vf_rep_rx(bp, skb);
2047 return;
2048 }
2049 skb_record_rx_queue(skb, bnapi->index);
2050 napi_gro_receive(&bnapi->napi, skb);
2051 }
2052
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)2053 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2054 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2055 {
2056 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2057
2058 if (BNXT_PTP_RX_TS_VALID(flags))
2059 goto ts_valid;
2060 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2061 return false;
2062
2063 ts_valid:
2064 *cmpl_ts = ts;
2065 return true;
2066 }
2067
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)2068 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2069 struct rx_cmp *rxcmp,
2070 struct rx_cmp_ext *rxcmp1)
2071 {
2072 __be16 vlan_proto;
2073 u16 vtag;
2074
2075 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2076 __le32 flags2 = rxcmp1->rx_cmp_flags2;
2077 u32 meta_data;
2078
2079 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2080 return skb;
2081
2082 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2083 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2084 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2085 if (eth_type_vlan(vlan_proto))
2086 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2087 else
2088 goto vlan_err;
2089 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2090 if (RX_CMP_VLAN_VALID(rxcmp)) {
2091 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2092
2093 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2094 vlan_proto = htons(ETH_P_8021Q);
2095 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2096 vlan_proto = htons(ETH_P_8021AD);
2097 else
2098 goto vlan_err;
2099 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2100 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2101 }
2102 }
2103 return skb;
2104 vlan_err:
2105 skb_mark_for_recycle(skb);
2106 dev_kfree_skb(skb);
2107 return NULL;
2108 }
2109
2110 /* returns the following:
2111 * 1 - 1 packet successfully received
2112 * 0 - successful TPA_START, packet not completed yet
2113 * -EBUSY - completion ring does not have all the agg buffers yet
2114 * -ENOMEM - packet aborted due to out of memory
2115 * -EIO - packet aborted due to hw error indicated in BD
2116 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2117 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2118 u32 *raw_cons, u8 *event)
2119 {
2120 struct bnxt_napi *bnapi = cpr->bnapi;
2121 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2122 struct net_device *dev = bp->dev;
2123 struct rx_cmp *rxcmp;
2124 struct rx_cmp_ext *rxcmp1;
2125 u32 tmp_raw_cons = *raw_cons;
2126 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2127 struct skb_shared_info *sinfo;
2128 struct bnxt_xdp_buff bnxt_xdp;
2129 struct bnxt_sw_rx_bd *rx_buf;
2130 unsigned int len;
2131 u8 *data_ptr, agg_bufs, cmp_type;
2132 bool xdp_active = false;
2133 dma_addr_t dma_addr;
2134 struct sk_buff *skb;
2135 u32 flags, misc;
2136 u32 cmpl_ts;
2137 void *data;
2138 int rc = 0;
2139
2140 rxcmp = (struct rx_cmp *)
2141 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2142
2143 cmp_type = RX_CMP_TYPE(rxcmp);
2144
2145 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2146 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2147 goto next_rx_no_prod_no_len;
2148 }
2149
2150 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2151 cp_cons = RING_CMP(tmp_raw_cons);
2152 rxcmp1 = (struct rx_cmp_ext *)
2153 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2154
2155 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2156 return -EBUSY;
2157
2158 /* The valid test of the entry must be done first before
2159 * reading any further.
2160 */
2161 dma_rmb();
2162 prod = rxr->rx_prod;
2163
2164 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2165 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2166 bnxt_tpa_start(bp, rxr, cmp_type,
2167 (struct rx_tpa_start_cmp *)rxcmp,
2168 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2169
2170 *event |= BNXT_RX_EVENT;
2171 goto next_rx_no_prod_no_len;
2172
2173 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2174 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2175 (struct rx_tpa_end_cmp *)rxcmp,
2176 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2177
2178 if (IS_ERR(skb))
2179 return -EBUSY;
2180
2181 rc = -ENOMEM;
2182 if (likely(skb)) {
2183 bnxt_deliver_skb(bp, bnapi, skb);
2184 rc = 1;
2185 }
2186 *event |= BNXT_RX_EVENT;
2187 goto next_rx_no_prod_no_len;
2188 }
2189
2190 cons = rxcmp->rx_cmp_opaque;
2191 if (unlikely(cons != rxr->rx_next_cons)) {
2192 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2193
2194 /* 0xffff is forced error, don't print it */
2195 if (rxr->rx_next_cons != 0xffff)
2196 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2197 cons, rxr->rx_next_cons);
2198 bnxt_sched_reset_rxr(bp, rxr);
2199 if (rc1)
2200 return rc1;
2201 goto next_rx_no_prod_no_len;
2202 }
2203 rx_buf = &rxr->rx_buf_ring[cons];
2204 data = rx_buf->data;
2205 data_ptr = rx_buf->data_ptr;
2206 prefetch(data_ptr);
2207
2208 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2209 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2210
2211 if (agg_bufs) {
2212 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2213 return -EBUSY;
2214
2215 cp_cons = NEXT_CMP(cp_cons);
2216 *event |= BNXT_AGG_EVENT;
2217 }
2218 *event |= BNXT_RX_EVENT;
2219
2220 rx_buf->data = NULL;
2221 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2222 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2223
2224 bnxt_reuse_rx_data(rxr, cons, data);
2225 if (agg_bufs)
2226 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2227 false);
2228
2229 rc = -EIO;
2230 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2231 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2232 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2233 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2234 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2235 rx_err);
2236 bnxt_sched_reset_rxr(bp, rxr);
2237 }
2238 }
2239 goto next_rx_no_len;
2240 }
2241
2242 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2243 len = flags >> RX_CMP_LEN_SHIFT;
2244 dma_addr = rx_buf->mapping;
2245
2246 if (bnxt_xdp_attached(bp, rxr)) {
2247 bnxt_xdp.rxcmp = rxcmp;
2248 bnxt_xdp.rxcmp1 = rxcmp1;
2249 bnxt_xdp.cmp_type = cmp_type;
2250
2251 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &bnxt_xdp.xdp);
2252 if (agg_bufs) {
2253 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr,
2254 &bnxt_xdp.xdp,
2255 cp_cons,
2256 agg_bufs,
2257 false);
2258 if (!frag_len)
2259 goto oom_next_rx;
2260
2261 }
2262 xdp_active = true;
2263 }
2264
2265 if (xdp_active) {
2266 if (bnxt_rx_xdp(bp, rxr, cons, &bnxt_xdp.xdp, data, &data_ptr,
2267 &len, event)) {
2268 rc = 1;
2269 goto next_rx;
2270 }
2271 if (xdp_buff_has_frags(&bnxt_xdp.xdp)) {
2272 sinfo = xdp_get_shared_info_from_buff(&bnxt_xdp.xdp);
2273 agg_bufs = sinfo->nr_frags;
2274 } else {
2275 agg_bufs = 0;
2276 }
2277 }
2278
2279 if (len <= bp->rx_copybreak) {
2280 if (!xdp_active)
2281 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2282 else
2283 skb = bnxt_copy_xdp(bnapi, &bnxt_xdp.xdp, len,
2284 dma_addr);
2285 bnxt_reuse_rx_data(rxr, cons, data);
2286 if (!skb) {
2287 if (agg_bufs) {
2288 if (!xdp_active)
2289 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2290 agg_bufs, false);
2291 else
2292 bnxt_xdp_buff_frags_free(rxr,
2293 &bnxt_xdp.xdp);
2294 }
2295 goto oom_next_rx;
2296 }
2297 } else {
2298 u32 payload;
2299
2300 if (rx_buf->data_ptr == data_ptr)
2301 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2302 else
2303 payload = 0;
2304 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2305 payload | len);
2306 if (!skb)
2307 goto oom_next_rx;
2308 }
2309
2310 if (agg_bufs) {
2311 if (!xdp_active) {
2312 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2313 agg_bufs, false);
2314 if (!skb)
2315 goto oom_next_rx;
2316 } else {
2317 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2318 rxr, &bnxt_xdp.xdp);
2319 if (!skb) {
2320 /* we should be able to free the old skb here */
2321 bnxt_xdp_buff_frags_free(rxr, &bnxt_xdp.xdp);
2322 goto oom_next_rx;
2323 }
2324 }
2325 }
2326
2327 if (RX_CMP_HASH_VALID(rxcmp)) {
2328 enum pkt_hash_types type;
2329
2330 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2331 type = bnxt_rss_ext_op(bp, rxcmp);
2332 } else {
2333 u32 itypes = RX_CMP_ITYPES(rxcmp);
2334
2335 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2336 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2337 type = PKT_HASH_TYPE_L4;
2338 else
2339 type = PKT_HASH_TYPE_L3;
2340 }
2341 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2342 }
2343
2344 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2345 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2346 skb->protocol = eth_type_trans(skb, dev);
2347
2348 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2349 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2350 if (!skb)
2351 goto next_rx;
2352 }
2353
2354 skb_checksum_none_assert(skb);
2355 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2356 if (dev->features & NETIF_F_RXCSUM) {
2357 skb->ip_summed = CHECKSUM_UNNECESSARY;
2358 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2359 }
2360 } else {
2361 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2362 if (dev->features & NETIF_F_RXCSUM)
2363 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2364 }
2365 }
2366
2367 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2368 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2369 u64 ns, ts;
2370
2371 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2372 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2373
2374 ns = bnxt_timecounter_cyc2time(ptp, ts);
2375 memset(skb_hwtstamps(skb), 0,
2376 sizeof(*skb_hwtstamps(skb)));
2377 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2378 }
2379 }
2380 }
2381 bnxt_deliver_skb(bp, bnapi, skb);
2382 rc = 1;
2383
2384 next_rx:
2385 cpr->rx_packets += 1;
2386 cpr->rx_bytes += len;
2387
2388 next_rx_no_len:
2389 rxr->rx_prod = NEXT_RX(prod);
2390 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2391
2392 next_rx_no_prod_no_len:
2393 *raw_cons = tmp_raw_cons;
2394
2395 return rc;
2396
2397 oom_next_rx:
2398 cpr->sw_stats->rx.rx_oom_discards += 1;
2399 rc = -ENOMEM;
2400 goto next_rx;
2401 }
2402
2403 /* In netpoll mode, if we are using a combined completion ring, we need to
2404 * discard the rx packets and recycle the buffers.
2405 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2406 static int bnxt_force_rx_discard(struct bnxt *bp,
2407 struct bnxt_cp_ring_info *cpr,
2408 u32 *raw_cons, u8 *event)
2409 {
2410 u32 tmp_raw_cons = *raw_cons;
2411 struct rx_cmp_ext *rxcmp1;
2412 struct rx_cmp *rxcmp;
2413 u16 cp_cons;
2414 u8 cmp_type;
2415 int rc;
2416
2417 cp_cons = RING_CMP(tmp_raw_cons);
2418 rxcmp = (struct rx_cmp *)
2419 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2420
2421 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2422 cp_cons = RING_CMP(tmp_raw_cons);
2423 rxcmp1 = (struct rx_cmp_ext *)
2424 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2425
2426 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2427 return -EBUSY;
2428
2429 /* The valid test of the entry must be done first before
2430 * reading any further.
2431 */
2432 dma_rmb();
2433 cmp_type = RX_CMP_TYPE(rxcmp);
2434 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2435 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2436 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2437 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2438 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2439 struct rx_tpa_end_cmp_ext *tpa_end1;
2440
2441 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2442 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2443 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2444 }
2445 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2446 if (rc && rc != -EBUSY)
2447 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2448 return rc;
2449 }
2450
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2451 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2452 {
2453 struct bnxt_fw_health *fw_health = bp->fw_health;
2454 u32 reg = fw_health->regs[reg_idx];
2455 u32 reg_type, reg_off, val = 0;
2456
2457 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2458 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2459 switch (reg_type) {
2460 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2461 pci_read_config_dword(bp->pdev, reg_off, &val);
2462 break;
2463 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2464 reg_off = fw_health->mapped_regs[reg_idx];
2465 fallthrough;
2466 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2467 val = readl(bp->bar0 + reg_off);
2468 break;
2469 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2470 val = readl(bp->bar1 + reg_off);
2471 break;
2472 }
2473 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2474 val &= fw_health->fw_reset_inprog_reg_mask;
2475 return val;
2476 }
2477
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2478 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2479 {
2480 int i;
2481
2482 for (i = 0; i < bp->rx_nr_rings; i++) {
2483 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2484 struct bnxt_ring_grp_info *grp_info;
2485
2486 grp_info = &bp->grp_info[grp_idx];
2487 if (grp_info->agg_fw_ring_id == ring_id)
2488 return grp_idx;
2489 }
2490 return INVALID_HW_RING_ID;
2491 }
2492
bnxt_get_force_speed(struct bnxt_link_info * link_info)2493 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2494 {
2495 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2496
2497 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2498 return link_info->force_link_speed2;
2499 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2500 return link_info->force_pam4_link_speed;
2501 return link_info->force_link_speed;
2502 }
2503
bnxt_set_force_speed(struct bnxt_link_info * link_info)2504 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2505 {
2506 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2507
2508 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2509 link_info->req_link_speed = link_info->force_link_speed2;
2510 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2511 switch (link_info->req_link_speed) {
2512 case BNXT_LINK_SPEED_50GB_PAM4:
2513 case BNXT_LINK_SPEED_100GB_PAM4:
2514 case BNXT_LINK_SPEED_200GB_PAM4:
2515 case BNXT_LINK_SPEED_400GB_PAM4:
2516 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2517 break;
2518 case BNXT_LINK_SPEED_100GB_PAM4_112:
2519 case BNXT_LINK_SPEED_200GB_PAM4_112:
2520 case BNXT_LINK_SPEED_400GB_PAM4_112:
2521 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2522 break;
2523 default:
2524 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2525 }
2526 return;
2527 }
2528 link_info->req_link_speed = link_info->force_link_speed;
2529 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2530 if (link_info->force_pam4_link_speed) {
2531 link_info->req_link_speed = link_info->force_pam4_link_speed;
2532 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2533 }
2534 }
2535
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2536 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2537 {
2538 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2539
2540 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2541 link_info->advertising = link_info->auto_link_speeds2;
2542 return;
2543 }
2544 link_info->advertising = link_info->auto_link_speeds;
2545 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2546 }
2547
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2548 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2549 {
2550 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2551
2552 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2553 if (link_info->req_link_speed != link_info->force_link_speed2)
2554 return true;
2555 return false;
2556 }
2557 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2558 link_info->req_link_speed != link_info->force_link_speed)
2559 return true;
2560 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2561 link_info->req_link_speed != link_info->force_pam4_link_speed)
2562 return true;
2563 return false;
2564 }
2565
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2566 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2567 {
2568 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2569
2570 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2571 if (link_info->advertising != link_info->auto_link_speeds2)
2572 return true;
2573 return false;
2574 }
2575 if (link_info->advertising != link_info->auto_link_speeds ||
2576 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2577 return true;
2578 return false;
2579 }
2580
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2581 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2582 {
2583 u32 flags = bp->ctx->ctx_arr[type].flags;
2584
2585 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2586 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2587 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2588 }
2589
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2590 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2591 {
2592 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2593 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2594 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2595 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2596 struct bnxt_bs_trace_info *bs_trace;
2597 int last_pg;
2598
2599 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2600 return;
2601
2602 mem_size = ctxm->max_entries * ctxm->entry_size;
2603 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2604 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2605
2606 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2607 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2608
2609 rmem = &ctx_pg[0].ring_mem;
2610 bs_trace = &bp->bs_trace[trace_type];
2611 bs_trace->ctx_type = ctxm->type;
2612 bs_trace->trace_type = trace_type;
2613 if (pages > MAX_CTX_PAGES) {
2614 int last_pg_dir = rmem->nr_pages - 1;
2615
2616 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2617 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2618 } else {
2619 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2620 }
2621 bs_trace->magic_byte += magic_byte_offset;
2622 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2623 }
2624
2625 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2626 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2627 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2628
2629 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2630 (((data2) & \
2631 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2632 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2633
2634 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2635 ((data2) & \
2636 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2637
2638 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2639 (((data2) & \
2640 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2641 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2642
2643 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2644 ((data1) & \
2645 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2646
2647 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2648 (((data1) & \
2649 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2650 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2651
2652 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2653 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2654 {
2655 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2656
2657 switch (err_type) {
2658 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2659 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2660 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2661 break;
2662 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2663 netdev_warn(bp->dev, "Pause Storm detected!\n");
2664 break;
2665 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2666 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2667 break;
2668 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2669 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2670 char *threshold_type;
2671 bool notify = false;
2672 char *dir_str;
2673
2674 switch (type) {
2675 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2676 threshold_type = "warning";
2677 break;
2678 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2679 threshold_type = "critical";
2680 break;
2681 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2682 threshold_type = "fatal";
2683 break;
2684 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2685 threshold_type = "shutdown";
2686 break;
2687 default:
2688 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2689 return false;
2690 }
2691 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2692 dir_str = "above";
2693 notify = true;
2694 } else {
2695 dir_str = "below";
2696 }
2697 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2698 dir_str, threshold_type);
2699 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2700 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2701 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2702 if (notify) {
2703 bp->thermal_threshold_type = type;
2704 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2705 return true;
2706 }
2707 return false;
2708 }
2709 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2710 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2711 break;
2712 default:
2713 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2714 err_type);
2715 break;
2716 }
2717 return false;
2718 }
2719
2720 #define BNXT_GET_EVENT_PORT(data) \
2721 ((data) & \
2722 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2723
2724 #define BNXT_EVENT_RING_TYPE(data2) \
2725 ((data2) & \
2726 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2727
2728 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2729 (BNXT_EVENT_RING_TYPE(data2) == \
2730 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2731
2732 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2733 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2734 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2735
2736 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2737 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2738 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2739
2740 #define BNXT_PHC_BITS 48
2741
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2742 static int bnxt_async_event_process(struct bnxt *bp,
2743 struct hwrm_async_event_cmpl *cmpl)
2744 {
2745 u16 event_id = le16_to_cpu(cmpl->event_id);
2746 u32 data1 = le32_to_cpu(cmpl->event_data1);
2747 u32 data2 = le32_to_cpu(cmpl->event_data2);
2748
2749 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2750 event_id, data1, data2);
2751
2752 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2753 switch (event_id) {
2754 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2755 struct bnxt_link_info *link_info = &bp->link_info;
2756
2757 if (BNXT_VF(bp))
2758 goto async_event_process_exit;
2759
2760 /* print unsupported speed warning in forced speed mode only */
2761 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2762 (data1 & 0x20000)) {
2763 u16 fw_speed = bnxt_get_force_speed(link_info);
2764 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2765
2766 if (speed != SPEED_UNKNOWN)
2767 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2768 speed);
2769 }
2770 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2771 }
2772 fallthrough;
2773 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2774 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2775 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2776 fallthrough;
2777 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2778 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2779 break;
2780 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2781 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2782 break;
2783 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2784 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2785
2786 if (BNXT_VF(bp))
2787 break;
2788
2789 if (bp->pf.port_id != port_id)
2790 break;
2791
2792 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2793 break;
2794 }
2795 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2796 if (BNXT_PF(bp))
2797 goto async_event_process_exit;
2798 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2799 break;
2800 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2801 char *type_str = "Solicited";
2802
2803 if (!bp->fw_health)
2804 goto async_event_process_exit;
2805
2806 bp->fw_reset_timestamp = jiffies;
2807 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2808 if (!bp->fw_reset_min_dsecs)
2809 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2810 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2811 if (!bp->fw_reset_max_dsecs)
2812 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2813 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2814 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2815 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2816 type_str = "Fatal";
2817 bp->fw_health->fatalities++;
2818 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2819 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2820 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2821 type_str = "Non-fatal";
2822 bp->fw_health->survivals++;
2823 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2824 }
2825 netif_warn(bp, hw, bp->dev,
2826 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2827 type_str, data1, data2,
2828 bp->fw_reset_min_dsecs * 100,
2829 bp->fw_reset_max_dsecs * 100);
2830 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2831 break;
2832 }
2833 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2834 struct bnxt_fw_health *fw_health = bp->fw_health;
2835 char *status_desc = "healthy";
2836 u32 status;
2837
2838 if (!fw_health)
2839 goto async_event_process_exit;
2840
2841 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2842 fw_health->enabled = false;
2843 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2844 break;
2845 }
2846 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2847 fw_health->tmr_multiplier =
2848 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2849 bp->current_interval * 10);
2850 fw_health->tmr_counter = fw_health->tmr_multiplier;
2851 if (!fw_health->enabled)
2852 fw_health->last_fw_heartbeat =
2853 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2854 fw_health->last_fw_reset_cnt =
2855 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2856 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2857 if (status != BNXT_FW_STATUS_HEALTHY)
2858 status_desc = "unhealthy";
2859 netif_info(bp, drv, bp->dev,
2860 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2861 fw_health->primary ? "primary" : "backup", status,
2862 status_desc, fw_health->last_fw_reset_cnt);
2863 if (!fw_health->enabled) {
2864 /* Make sure tmr_counter is set and visible to
2865 * bnxt_health_check() before setting enabled to true.
2866 */
2867 smp_wmb();
2868 fw_health->enabled = true;
2869 }
2870 goto async_event_process_exit;
2871 }
2872 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2873 netif_notice(bp, hw, bp->dev,
2874 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2875 data1, data2);
2876 goto async_event_process_exit;
2877 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2878 struct bnxt_rx_ring_info *rxr;
2879 u16 grp_idx;
2880
2881 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2882 goto async_event_process_exit;
2883
2884 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2885 BNXT_EVENT_RING_TYPE(data2), data1);
2886 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2887 goto async_event_process_exit;
2888
2889 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2890 if (grp_idx == INVALID_HW_RING_ID) {
2891 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2892 data1);
2893 goto async_event_process_exit;
2894 }
2895 rxr = bp->bnapi[grp_idx]->rx_ring;
2896 bnxt_sched_reset_rxr(bp, rxr);
2897 goto async_event_process_exit;
2898 }
2899 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2900 struct bnxt_fw_health *fw_health = bp->fw_health;
2901
2902 netif_notice(bp, hw, bp->dev,
2903 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2904 data1, data2);
2905 if (fw_health) {
2906 fw_health->echo_req_data1 = data1;
2907 fw_health->echo_req_data2 = data2;
2908 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2909 break;
2910 }
2911 goto async_event_process_exit;
2912 }
2913 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2914 bnxt_ptp_pps_event(bp, data1, data2);
2915 goto async_event_process_exit;
2916 }
2917 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2918 if (bnxt_event_error_report(bp, data1, data2))
2919 break;
2920 goto async_event_process_exit;
2921 }
2922 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2923 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2924 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2925 if (BNXT_PTP_USE_RTC(bp)) {
2926 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2927 unsigned long flags;
2928 u64 ns;
2929
2930 if (!ptp)
2931 goto async_event_process_exit;
2932
2933 bnxt_ptp_update_current_time(bp);
2934 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2935 BNXT_PHC_BITS) | ptp->current_time);
2936 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2937 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2938 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2939 }
2940 break;
2941 }
2942 goto async_event_process_exit;
2943 }
2944 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2945 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2946
2947 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2948 goto async_event_process_exit;
2949 }
2950 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2951 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2952 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2953
2954 if (type >= ARRAY_SIZE(bp->bs_trace))
2955 goto async_event_process_exit;
2956 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2957 goto async_event_process_exit;
2958 }
2959 default:
2960 goto async_event_process_exit;
2961 }
2962 __bnxt_queue_sp_work(bp);
2963 async_event_process_exit:
2964 bnxt_ulp_async_events(bp, cmpl);
2965 return 0;
2966 }
2967
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2968 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2969 {
2970 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2971 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2972 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2973 (struct hwrm_fwd_req_cmpl *)txcmp;
2974
2975 switch (cmpl_type) {
2976 case CMPL_BASE_TYPE_HWRM_DONE:
2977 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2978 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2979 break;
2980
2981 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2982 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2983
2984 if ((vf_id < bp->pf.first_vf_id) ||
2985 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2986 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2987 vf_id);
2988 return -EINVAL;
2989 }
2990
2991 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2992 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2993 break;
2994
2995 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2996 bnxt_async_event_process(bp,
2997 (struct hwrm_async_event_cmpl *)txcmp);
2998 break;
2999
3000 default:
3001 break;
3002 }
3003
3004 return 0;
3005 }
3006
bnxt_vnic_is_active(struct bnxt * bp)3007 static bool bnxt_vnic_is_active(struct bnxt *bp)
3008 {
3009 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
3010
3011 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
3012 }
3013
bnxt_msix(int irq,void * dev_instance)3014 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
3015 {
3016 struct bnxt_napi *bnapi = dev_instance;
3017 struct bnxt *bp = bnapi->bp;
3018 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3019 u32 cons = RING_CMP(cpr->cp_raw_cons);
3020
3021 cpr->event_ctr++;
3022 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
3023 napi_schedule(&bnapi->napi);
3024 return IRQ_HANDLED;
3025 }
3026
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)3027 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3028 {
3029 u32 raw_cons = cpr->cp_raw_cons;
3030 u16 cons = RING_CMP(raw_cons);
3031 struct tx_cmp *txcmp;
3032
3033 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3034
3035 return TX_CMP_VALID(txcmp, raw_cons);
3036 }
3037
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3038 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3039 int budget)
3040 {
3041 struct bnxt_napi *bnapi = cpr->bnapi;
3042 u32 raw_cons = cpr->cp_raw_cons;
3043 bool flush_xdp = false;
3044 u32 cons;
3045 int rx_pkts = 0;
3046 u8 event = 0;
3047 struct tx_cmp *txcmp;
3048
3049 cpr->has_more_work = 0;
3050 cpr->had_work_done = 1;
3051 while (1) {
3052 u8 cmp_type;
3053 int rc;
3054
3055 cons = RING_CMP(raw_cons);
3056 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3057
3058 if (!TX_CMP_VALID(txcmp, raw_cons))
3059 break;
3060
3061 /* The valid test of the entry must be done first before
3062 * reading any further.
3063 */
3064 dma_rmb();
3065 cmp_type = TX_CMP_TYPE(txcmp);
3066 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3067 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3068 u32 opaque = txcmp->tx_cmp_opaque;
3069 struct bnxt_tx_ring_info *txr;
3070 u16 tx_freed;
3071
3072 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3073 event |= BNXT_TX_CMP_EVENT;
3074 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3075 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3076 else
3077 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3078 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3079 bp->tx_ring_mask;
3080 /* return full budget so NAPI will complete. */
3081 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3082 rx_pkts = budget;
3083 raw_cons = NEXT_RAW_CMP(raw_cons);
3084 if (budget)
3085 cpr->has_more_work = 1;
3086 break;
3087 }
3088 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3089 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3090 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3091 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3092 if (likely(budget))
3093 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3094 else
3095 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3096 &event);
3097 if (event & BNXT_REDIRECT_EVENT)
3098 flush_xdp = true;
3099 if (likely(rc >= 0))
3100 rx_pkts += rc;
3101 /* Increment rx_pkts when rc is -ENOMEM to count towards
3102 * the NAPI budget. Otherwise, we may potentially loop
3103 * here forever if we consistently cannot allocate
3104 * buffers.
3105 */
3106 else if (rc == -ENOMEM && budget)
3107 rx_pkts++;
3108 else if (rc == -EBUSY) /* partial completion */
3109 break;
3110 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3111 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3112 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3113 bnxt_hwrm_handler(bp, txcmp);
3114 }
3115 raw_cons = NEXT_RAW_CMP(raw_cons);
3116
3117 if (rx_pkts && rx_pkts == budget) {
3118 cpr->has_more_work = 1;
3119 break;
3120 }
3121 }
3122
3123 if (flush_xdp) {
3124 xdp_do_flush();
3125 event &= ~BNXT_REDIRECT_EVENT;
3126 }
3127
3128 if (event & BNXT_TX_EVENT) {
3129 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3130 u16 prod = txr->tx_prod;
3131
3132 /* Sync BD data before updating doorbell */
3133 wmb();
3134
3135 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3136 event &= ~BNXT_TX_EVENT;
3137 }
3138
3139 cpr->cp_raw_cons = raw_cons;
3140 bnapi->events |= event;
3141 return rx_pkts;
3142 }
3143
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3144 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3145 int budget)
3146 {
3147 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3148 bnapi->tx_int(bp, bnapi, budget);
3149
3150 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3151 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3152
3153 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3154 bnapi->events &= ~BNXT_RX_EVENT;
3155 }
3156 if (bnapi->events & BNXT_AGG_EVENT) {
3157 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3158
3159 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3160 bnapi->events &= ~BNXT_AGG_EVENT;
3161 }
3162 }
3163
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3164 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3165 int budget)
3166 {
3167 struct bnxt_napi *bnapi = cpr->bnapi;
3168 int rx_pkts;
3169
3170 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3171
3172 /* ACK completion ring before freeing tx ring and producing new
3173 * buffers in rx/agg rings to prevent overflowing the completion
3174 * ring.
3175 */
3176 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3177
3178 __bnxt_poll_work_done(bp, bnapi, budget);
3179 return rx_pkts;
3180 }
3181
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3182 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3183 {
3184 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3185 struct bnxt *bp = bnapi->bp;
3186 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3187 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3188 struct tx_cmp *txcmp;
3189 struct rx_cmp_ext *rxcmp1;
3190 u32 cp_cons, tmp_raw_cons;
3191 u32 raw_cons = cpr->cp_raw_cons;
3192 bool flush_xdp = false;
3193 u32 rx_pkts = 0;
3194 u8 event = 0;
3195
3196 while (1) {
3197 int rc;
3198
3199 cp_cons = RING_CMP(raw_cons);
3200 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3201
3202 if (!TX_CMP_VALID(txcmp, raw_cons))
3203 break;
3204
3205 /* The valid test of the entry must be done first before
3206 * reading any further.
3207 */
3208 dma_rmb();
3209 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3210 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3211 cp_cons = RING_CMP(tmp_raw_cons);
3212 rxcmp1 = (struct rx_cmp_ext *)
3213 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3214
3215 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3216 break;
3217
3218 /* force an error to recycle the buffer */
3219 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3220 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3221
3222 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3223 if (likely(rc == -EIO) && budget)
3224 rx_pkts++;
3225 else if (rc == -EBUSY) /* partial completion */
3226 break;
3227 if (event & BNXT_REDIRECT_EVENT)
3228 flush_xdp = true;
3229 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3230 CMPL_BASE_TYPE_HWRM_DONE)) {
3231 bnxt_hwrm_handler(bp, txcmp);
3232 } else {
3233 netdev_err(bp->dev,
3234 "Invalid completion received on special ring\n");
3235 }
3236 raw_cons = NEXT_RAW_CMP(raw_cons);
3237
3238 if (rx_pkts == budget)
3239 break;
3240 }
3241
3242 cpr->cp_raw_cons = raw_cons;
3243 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3244 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3245
3246 if (event & BNXT_AGG_EVENT)
3247 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3248 if (flush_xdp)
3249 xdp_do_flush();
3250
3251 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3252 napi_complete_done(napi, rx_pkts);
3253 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3254 }
3255 return rx_pkts;
3256 }
3257
bnxt_poll(struct napi_struct * napi,int budget)3258 static int bnxt_poll(struct napi_struct *napi, int budget)
3259 {
3260 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3261 struct bnxt *bp = bnapi->bp;
3262 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3263 int work_done = 0;
3264
3265 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3266 napi_complete(napi);
3267 return 0;
3268 }
3269 while (1) {
3270 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3271
3272 if (work_done >= budget) {
3273 if (!budget)
3274 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3275 break;
3276 }
3277
3278 if (!bnxt_has_work(bp, cpr)) {
3279 if (napi_complete_done(napi, work_done))
3280 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3281 break;
3282 }
3283 }
3284 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3285 struct dim_sample dim_sample = {};
3286
3287 dim_update_sample(cpr->event_ctr,
3288 cpr->rx_packets,
3289 cpr->rx_bytes,
3290 &dim_sample);
3291 net_dim(&cpr->dim, &dim_sample);
3292 }
3293 return work_done;
3294 }
3295
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3296 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3297 {
3298 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3299 int i, work_done = 0;
3300
3301 for (i = 0; i < cpr->cp_ring_count; i++) {
3302 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3303
3304 if (cpr2->had_nqe_notify) {
3305 work_done += __bnxt_poll_work(bp, cpr2,
3306 budget - work_done);
3307 cpr->has_more_work |= cpr2->has_more_work;
3308 }
3309 }
3310 return work_done;
3311 }
3312
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3313 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3314 u64 dbr_type, int budget)
3315 {
3316 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3317 int i;
3318
3319 for (i = 0; i < cpr->cp_ring_count; i++) {
3320 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3321 struct bnxt_db_info *db;
3322
3323 if (cpr2->had_work_done) {
3324 u32 tgl = 0;
3325
3326 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3327 cpr2->had_nqe_notify = 0;
3328 tgl = cpr2->toggle;
3329 }
3330 db = &cpr2->cp_db;
3331 bnxt_writeq(bp,
3332 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3333 DB_RING_IDX(db, cpr2->cp_raw_cons),
3334 db->doorbell);
3335 cpr2->had_work_done = 0;
3336 }
3337 }
3338 __bnxt_poll_work_done(bp, bnapi, budget);
3339 }
3340
bnxt_poll_p5(struct napi_struct * napi,int budget)3341 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3342 {
3343 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3344 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3345 struct bnxt_cp_ring_info *cpr_rx;
3346 u32 raw_cons = cpr->cp_raw_cons;
3347 struct bnxt *bp = bnapi->bp;
3348 struct nqe_cn *nqcmp;
3349 int work_done = 0;
3350 u32 cons;
3351
3352 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3353 napi_complete(napi);
3354 return 0;
3355 }
3356 if (cpr->has_more_work) {
3357 cpr->has_more_work = 0;
3358 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3359 }
3360 while (1) {
3361 u16 type;
3362
3363 cons = RING_CMP(raw_cons);
3364 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3365
3366 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3367 if (cpr->has_more_work)
3368 break;
3369
3370 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3371 budget);
3372 cpr->cp_raw_cons = raw_cons;
3373 if (napi_complete_done(napi, work_done))
3374 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3375 cpr->cp_raw_cons);
3376 goto poll_done;
3377 }
3378
3379 /* The valid test of the entry must be done first before
3380 * reading any further.
3381 */
3382 dma_rmb();
3383
3384 type = le16_to_cpu(nqcmp->type);
3385 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3386 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3387 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3388 struct bnxt_cp_ring_info *cpr2;
3389
3390 /* No more budget for RX work */
3391 if (budget && work_done >= budget &&
3392 cq_type == BNXT_NQ_HDL_TYPE_RX)
3393 break;
3394
3395 idx = BNXT_NQ_HDL_IDX(idx);
3396 cpr2 = &cpr->cp_ring_arr[idx];
3397 cpr2->had_nqe_notify = 1;
3398 cpr2->toggle = NQE_CN_TOGGLE(type);
3399 work_done += __bnxt_poll_work(bp, cpr2,
3400 budget - work_done);
3401 cpr->has_more_work |= cpr2->has_more_work;
3402 } else {
3403 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3404 }
3405 raw_cons = NEXT_RAW_CMP(raw_cons);
3406 }
3407 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3408 if (raw_cons != cpr->cp_raw_cons) {
3409 cpr->cp_raw_cons = raw_cons;
3410 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3411 }
3412 poll_done:
3413 cpr_rx = &cpr->cp_ring_arr[0];
3414 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3415 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3416 struct dim_sample dim_sample = {};
3417
3418 dim_update_sample(cpr->event_ctr,
3419 cpr_rx->rx_packets,
3420 cpr_rx->rx_bytes,
3421 &dim_sample);
3422 net_dim(&cpr->dim, &dim_sample);
3423 }
3424 return work_done;
3425 }
3426
bnxt_free_one_tx_ring_skbs(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int idx)3427 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3428 struct bnxt_tx_ring_info *txr, int idx)
3429 {
3430 int i, max_idx;
3431 struct pci_dev *pdev = bp->pdev;
3432 unsigned int dma_len;
3433 dma_addr_t dma_addr;
3434
3435 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3436
3437 for (i = 0; i < max_idx;) {
3438 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3439 struct bnxt_sw_tx_bd *head_buf = tx_buf;
3440 struct sk_buff *skb;
3441 int j, last;
3442
3443 if (idx < bp->tx_nr_rings_xdp &&
3444 tx_buf->action == XDP_REDIRECT) {
3445 dma_addr = dma_unmap_addr(tx_buf, mapping);
3446 dma_len = dma_unmap_len(tx_buf, len);
3447
3448 dma_unmap_single(&pdev->dev, dma_addr, dma_len,
3449 DMA_TO_DEVICE);
3450 xdp_return_frame(tx_buf->xdpf);
3451 tx_buf->action = 0;
3452 tx_buf->xdpf = NULL;
3453 i++;
3454 continue;
3455 }
3456
3457 skb = tx_buf->skb;
3458 if (!skb) {
3459 i++;
3460 continue;
3461 }
3462
3463 tx_buf->skb = NULL;
3464
3465 if (tx_buf->is_push) {
3466 dev_kfree_skb(skb);
3467 i += 2;
3468 continue;
3469 }
3470
3471 if (dma_unmap_len(tx_buf, len)) {
3472 dma_addr = dma_unmap_addr(tx_buf, mapping);
3473 dma_len = dma_unmap_len(tx_buf, len);
3474
3475 dma_unmap_single(&pdev->dev, dma_addr, dma_len,
3476 DMA_TO_DEVICE);
3477 }
3478
3479 last = tx_buf->nr_frags;
3480 i += 2;
3481 for (j = 0; j < last; j++, i++) {
3482 int ring_idx = i & bp->tx_ring_mask;
3483
3484 tx_buf = &txr->tx_buf_ring[ring_idx];
3485 if (dma_unmap_len(tx_buf, len)) {
3486 dma_addr = dma_unmap_addr(tx_buf, mapping);
3487 dma_len = dma_unmap_len(tx_buf, len);
3488
3489 netmem_dma_unmap_page_attrs(&pdev->dev,
3490 dma_addr, dma_len,
3491 DMA_TO_DEVICE, 0);
3492 }
3493 }
3494 if (head_buf->is_sw_gso) {
3495 u16 inline_cons = txr->tx_inline_cons + 1;
3496
3497 WRITE_ONCE(txr->tx_inline_cons, inline_cons);
3498 if (head_buf->is_sw_gso == BNXT_SW_GSO_LAST) {
3499 tso_dma_map_complete(&pdev->dev,
3500 &head_buf->sw_gso_cstate);
3501 } else {
3502 skb = NULL;
3503 }
3504 head_buf->is_sw_gso = 0;
3505 }
3506 if (skb)
3507 dev_kfree_skb(skb);
3508 }
3509 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3510 }
3511
bnxt_free_tx_skbs(struct bnxt * bp)3512 static void bnxt_free_tx_skbs(struct bnxt *bp)
3513 {
3514 int i;
3515
3516 if (!bp->tx_ring)
3517 return;
3518
3519 for (i = 0; i < bp->tx_nr_rings; i++) {
3520 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3521
3522 if (!txr->tx_buf_ring)
3523 continue;
3524
3525 bnxt_free_one_tx_ring_skbs(bp, txr, i);
3526 }
3527
3528 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3529 bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3530 }
3531
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3532 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3533 {
3534 int i, max_idx;
3535
3536 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3537
3538 for (i = 0; i < max_idx; i++) {
3539 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3540 void *data = rx_buf->data;
3541
3542 if (!data)
3543 continue;
3544
3545 rx_buf->data = NULL;
3546 if (BNXT_RX_PAGE_MODE(bp))
3547 page_pool_recycle_direct(rxr->page_pool, data);
3548 else
3549 page_pool_free_va(rxr->head_pool, data, true);
3550 }
3551 }
3552
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3553 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3554 {
3555 int i, max_idx;
3556
3557 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3558
3559 for (i = 0; i < max_idx; i++) {
3560 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3561 netmem_ref netmem = rx_agg_buf->netmem;
3562
3563 if (!netmem)
3564 continue;
3565
3566 rx_agg_buf->netmem = 0;
3567 __clear_bit(i, rxr->rx_agg_bmap);
3568
3569 page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3570 }
3571 }
3572
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3573 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3574 struct bnxt_rx_ring_info *rxr)
3575 {
3576 int i;
3577
3578 for (i = 0; i < bp->max_tpa; i++) {
3579 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3580 u8 *data = tpa_info->data;
3581
3582 if (!data)
3583 continue;
3584
3585 tpa_info->data = NULL;
3586 page_pool_free_va(rxr->head_pool, data, false);
3587 }
3588 }
3589
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3590 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3591 struct bnxt_rx_ring_info *rxr)
3592 {
3593 struct bnxt_tpa_idx_map *map;
3594
3595 if (!rxr->rx_tpa)
3596 goto skip_rx_tpa_free;
3597
3598 bnxt_free_one_tpa_info_data(bp, rxr);
3599
3600 skip_rx_tpa_free:
3601 if (!rxr->rx_buf_ring)
3602 goto skip_rx_buf_free;
3603
3604 bnxt_free_one_rx_ring(bp, rxr);
3605
3606 skip_rx_buf_free:
3607 if (!rxr->rx_agg_ring)
3608 goto skip_rx_agg_free;
3609
3610 bnxt_free_one_rx_agg_ring(bp, rxr);
3611
3612 skip_rx_agg_free:
3613 map = rxr->rx_tpa_idx_map;
3614 if (map)
3615 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3616 }
3617
bnxt_free_rx_skbs(struct bnxt * bp)3618 static void bnxt_free_rx_skbs(struct bnxt *bp)
3619 {
3620 int i;
3621
3622 if (!bp->rx_ring)
3623 return;
3624
3625 for (i = 0; i < bp->rx_nr_rings; i++)
3626 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3627 }
3628
bnxt_free_skbs(struct bnxt * bp)3629 static void bnxt_free_skbs(struct bnxt *bp)
3630 {
3631 bnxt_free_tx_skbs(bp);
3632 bnxt_free_rx_skbs(bp);
3633 }
3634
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3635 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3636 {
3637 u8 init_val = ctxm->init_value;
3638 u16 offset = ctxm->init_offset;
3639 u8 *p2 = p;
3640 int i;
3641
3642 if (!init_val)
3643 return;
3644 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3645 memset(p, init_val, len);
3646 return;
3647 }
3648 for (i = 0; i < len; i += ctxm->entry_size)
3649 *(p2 + i + offset) = init_val;
3650 }
3651
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3652 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3653 void *buf, size_t offset, size_t head,
3654 size_t tail)
3655 {
3656 int i, head_page, start_idx, source_offset;
3657 size_t len, rem_len, total_len, max_bytes;
3658
3659 head_page = head / rmem->page_size;
3660 source_offset = head % rmem->page_size;
3661 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3662 if (!total_len)
3663 total_len = MAX_CTX_BYTES;
3664 start_idx = head_page % MAX_CTX_PAGES;
3665 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3666 source_offset;
3667 total_len = min(total_len, max_bytes);
3668 rem_len = total_len;
3669
3670 for (i = start_idx; rem_len; i++, source_offset = 0) {
3671 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3672 if (buf)
3673 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3674 len);
3675 offset += len;
3676 rem_len -= len;
3677 }
3678 return total_len;
3679 }
3680
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3681 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3682 {
3683 struct pci_dev *pdev = bp->pdev;
3684 int i;
3685
3686 if (!rmem->pg_arr)
3687 goto skip_pages;
3688
3689 for (i = 0; i < rmem->nr_pages; i++) {
3690 if (!rmem->pg_arr[i])
3691 continue;
3692
3693 dma_free_coherent(&pdev->dev, rmem->page_size,
3694 rmem->pg_arr[i], rmem->dma_arr[i]);
3695
3696 rmem->pg_arr[i] = NULL;
3697 }
3698 skip_pages:
3699 if (rmem->pg_tbl) {
3700 size_t pg_tbl_size = rmem->nr_pages * 8;
3701
3702 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3703 pg_tbl_size = rmem->page_size;
3704 dma_free_coherent(&pdev->dev, pg_tbl_size,
3705 rmem->pg_tbl, rmem->pg_tbl_map);
3706 rmem->pg_tbl = NULL;
3707 }
3708 if (rmem->vmem_size && *rmem->vmem) {
3709 vfree(*rmem->vmem);
3710 *rmem->vmem = NULL;
3711 }
3712 }
3713
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3714 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3715 {
3716 struct pci_dev *pdev = bp->pdev;
3717 u64 valid_bit = 0;
3718 int i;
3719
3720 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3721 valid_bit = PTU_PTE_VALID;
3722 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3723 size_t pg_tbl_size = rmem->nr_pages * 8;
3724
3725 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3726 pg_tbl_size = rmem->page_size;
3727 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3728 &rmem->pg_tbl_map,
3729 GFP_KERNEL);
3730 if (!rmem->pg_tbl)
3731 return -ENOMEM;
3732 }
3733
3734 for (i = 0; i < rmem->nr_pages; i++) {
3735 u64 extra_bits = valid_bit;
3736
3737 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3738 rmem->page_size,
3739 &rmem->dma_arr[i],
3740 GFP_KERNEL);
3741 if (!rmem->pg_arr[i])
3742 return -ENOMEM;
3743
3744 if (rmem->ctx_mem)
3745 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3746 rmem->page_size);
3747 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3748 if (i == rmem->nr_pages - 2 &&
3749 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3750 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3751 else if (i == rmem->nr_pages - 1 &&
3752 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3753 extra_bits |= PTU_PTE_LAST;
3754 rmem->pg_tbl[i] =
3755 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3756 }
3757 }
3758
3759 if (rmem->vmem_size) {
3760 *rmem->vmem = vzalloc(rmem->vmem_size);
3761 if (!(*rmem->vmem))
3762 return -ENOMEM;
3763 }
3764 return 0;
3765 }
3766
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3767 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3768 struct bnxt_rx_ring_info *rxr)
3769 {
3770 int i;
3771
3772 kfree(rxr->rx_tpa_idx_map);
3773 rxr->rx_tpa_idx_map = NULL;
3774 if (rxr->rx_tpa) {
3775 for (i = 0; i < bp->max_tpa; i++) {
3776 kfree(rxr->rx_tpa[i].agg_arr);
3777 rxr->rx_tpa[i].agg_arr = NULL;
3778 }
3779 }
3780 kfree(rxr->rx_tpa);
3781 rxr->rx_tpa = NULL;
3782 }
3783
bnxt_free_tpa_info(struct bnxt * bp)3784 static void bnxt_free_tpa_info(struct bnxt *bp)
3785 {
3786 int i;
3787
3788 for (i = 0; i < bp->rx_nr_rings; i++) {
3789 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3790
3791 bnxt_free_one_tpa_info(bp, rxr);
3792 }
3793 }
3794
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3795 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3796 struct bnxt_rx_ring_info *rxr)
3797 {
3798 struct rx_agg_cmp *agg;
3799 int i;
3800
3801 rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa);
3802 if (!rxr->rx_tpa)
3803 return -ENOMEM;
3804
3805 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3806 return 0;
3807 for (i = 0; i < bp->max_tpa; i++) {
3808 agg = kzalloc_objs(*agg, MAX_SKB_FRAGS);
3809 if (!agg)
3810 return -ENOMEM;
3811 rxr->rx_tpa[i].agg_arr = agg;
3812 }
3813 rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map);
3814 if (!rxr->rx_tpa_idx_map)
3815 return -ENOMEM;
3816
3817 return 0;
3818 }
3819
bnxt_alloc_tpa_info(struct bnxt * bp)3820 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3821 {
3822 int i, rc;
3823
3824 bp->max_tpa = MAX_TPA;
3825 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3826 if (!bp->max_tpa_v2)
3827 return 0;
3828 bp->max_tpa = min_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3829 /* Older P5 FW sets max_tpa_v2 low by mistake except NPAR */
3830 if (bp->max_tpa <= 32 && BNXT_CHIP_P5(bp) && !BNXT_NPAR(bp))
3831 bp->max_tpa = MAX_TPA_P5;
3832 }
3833
3834 for (i = 0; i < bp->rx_nr_rings; i++) {
3835 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3836
3837 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3838 if (rc)
3839 return rc;
3840 }
3841 return 0;
3842 }
3843
bnxt_free_rx_rings(struct bnxt * bp)3844 static void bnxt_free_rx_rings(struct bnxt *bp)
3845 {
3846 int i;
3847
3848 if (!bp->rx_ring)
3849 return;
3850
3851 bnxt_free_tpa_info(bp);
3852 for (i = 0; i < bp->rx_nr_rings; i++) {
3853 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3854 struct bnxt_ring_struct *ring;
3855
3856 if (rxr->xdp_prog)
3857 bpf_prog_put(rxr->xdp_prog);
3858
3859 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3860 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3861
3862 page_pool_destroy(rxr->page_pool);
3863 page_pool_destroy(rxr->head_pool);
3864 rxr->page_pool = rxr->head_pool = NULL;
3865
3866 kfree(rxr->rx_agg_bmap);
3867 rxr->rx_agg_bmap = NULL;
3868
3869 ring = &rxr->rx_ring_struct;
3870 bnxt_free_ring(bp, &ring->ring_mem);
3871
3872 ring = &rxr->rx_agg_ring_struct;
3873 bnxt_free_ring(bp, &ring->ring_mem);
3874 }
3875 }
3876
bnxt_rx_agg_ring_fill_level(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3877 static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
3878 struct bnxt_rx_ring_info *rxr)
3879 {
3880 /* User may have chosen larger than default rx_page_size,
3881 * we keep the ring sizes uniform and also want uniform amount
3882 * of bytes consumed per ring, so cap how much of the rings we fill.
3883 */
3884 int fill_level = bp->rx_agg_ring_size;
3885
3886 if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
3887 fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3888
3889 return fill_level;
3890 }
3891
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3892 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3893 struct bnxt_rx_ring_info *rxr,
3894 int numa_node)
3895 {
3896 unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3897 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3898 struct page_pool_params pp = { 0 };
3899 struct page_pool *pool;
3900
3901 pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
3902 if (BNXT_RX_PAGE_MODE(bp))
3903 pp.pool_size += bp->rx_ring_size / rx_size_fac;
3904
3905 pp.order = get_order(rxr->rx_page_size);
3906 pp.nid = numa_node;
3907 pp.netdev = bp->dev;
3908 pp.dev = &bp->pdev->dev;
3909 pp.dma_dir = bp->rx_dir;
3910 pp.max_len = PAGE_SIZE << pp.order;
3911 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3912 PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3913 pp.queue_idx = rxr->bnapi->index;
3914
3915 pool = page_pool_create(&pp);
3916 if (IS_ERR(pool))
3917 return PTR_ERR(pool);
3918 rxr->page_pool = pool;
3919
3920 rxr->need_head_pool = page_pool_is_unreadable(pool);
3921 rxr->need_head_pool |= !!pp.order;
3922 if (bnxt_separate_head_pool(rxr)) {
3923 pp.order = 0;
3924 pp.max_len = PAGE_SIZE;
3925 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3926 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3927 pool = page_pool_create(&pp);
3928 if (IS_ERR(pool))
3929 goto err_destroy_pp;
3930 } else {
3931 page_pool_get(pool);
3932 }
3933 rxr->head_pool = pool;
3934
3935 return 0;
3936
3937 err_destroy_pp:
3938 page_pool_destroy(rxr->page_pool);
3939 rxr->page_pool = NULL;
3940 return PTR_ERR(pool);
3941 }
3942
bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info * rxr)3943 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3944 {
3945 page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3946 page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3947 }
3948
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3949 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3950 {
3951 u16 mem_size;
3952
3953 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3954 mem_size = rxr->rx_agg_bmap_size / 8;
3955 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3956 if (!rxr->rx_agg_bmap)
3957 return -ENOMEM;
3958
3959 return 0;
3960 }
3961
bnxt_alloc_rx_rings(struct bnxt * bp)3962 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3963 {
3964 int numa_node = dev_to_node(&bp->pdev->dev);
3965 int i, rc = 0, agg_rings = 0, cpu;
3966
3967 if (!bp->rx_ring)
3968 return -ENOMEM;
3969
3970 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3971 agg_rings = 1;
3972
3973 for (i = 0; i < bp->rx_nr_rings; i++) {
3974 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3975 struct bnxt_ring_struct *ring;
3976 int cpu_node;
3977
3978 ring = &rxr->rx_ring_struct;
3979
3980 cpu = cpumask_local_spread(i, numa_node);
3981 cpu_node = cpu_to_node(cpu);
3982 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3983 i, cpu_node);
3984 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3985 if (rc)
3986 return rc;
3987 bnxt_enable_rx_page_pool(rxr);
3988
3989 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3990 if (rc < 0)
3991 return rc;
3992
3993 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3994 MEM_TYPE_PAGE_POOL,
3995 rxr->page_pool);
3996 if (rc) {
3997 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3998 return rc;
3999 }
4000
4001 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4002 if (rc)
4003 return rc;
4004
4005 ring->grp_idx = i;
4006 if (agg_rings) {
4007 ring = &rxr->rx_agg_ring_struct;
4008 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4009 if (rc)
4010 return rc;
4011
4012 ring->grp_idx = i;
4013 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
4014 if (rc)
4015 return rc;
4016 }
4017 }
4018 if (bp->flags & BNXT_FLAG_TPA)
4019 rc = bnxt_alloc_tpa_info(bp);
4020 return rc;
4021 }
4022
bnxt_free_tx_inline_buf(struct bnxt_tx_ring_info * txr,struct pci_dev * pdev)4023 static void bnxt_free_tx_inline_buf(struct bnxt_tx_ring_info *txr,
4024 struct pci_dev *pdev)
4025 {
4026 if (!txr->tx_inline_buf)
4027 return;
4028
4029 dma_unmap_single(&pdev->dev, txr->tx_inline_dma,
4030 txr->tx_inline_size, DMA_TO_DEVICE);
4031 kfree(txr->tx_inline_buf);
4032 txr->tx_inline_buf = NULL;
4033 txr->tx_inline_size = 0;
4034 }
4035
bnxt_alloc_tx_inline_buf(struct bnxt_tx_ring_info * txr,struct pci_dev * pdev,unsigned int size)4036 static int bnxt_alloc_tx_inline_buf(struct bnxt_tx_ring_info *txr,
4037 struct pci_dev *pdev,
4038 unsigned int size)
4039 {
4040 txr->tx_inline_buf = kmalloc(size, GFP_KERNEL);
4041 if (!txr->tx_inline_buf)
4042 return -ENOMEM;
4043
4044 txr->tx_inline_dma = dma_map_single(&pdev->dev, txr->tx_inline_buf,
4045 size, DMA_TO_DEVICE);
4046 if (dma_mapping_error(&pdev->dev, txr->tx_inline_dma)) {
4047 kfree(txr->tx_inline_buf);
4048 txr->tx_inline_buf = NULL;
4049 return -ENOMEM;
4050 }
4051 txr->tx_inline_size = size;
4052
4053 return 0;
4054 }
4055
bnxt_free_tx_rings(struct bnxt * bp)4056 static void bnxt_free_tx_rings(struct bnxt *bp)
4057 {
4058 int i;
4059 struct pci_dev *pdev = bp->pdev;
4060
4061 if (!bp->tx_ring)
4062 return;
4063
4064 for (i = 0; i < bp->tx_nr_rings; i++) {
4065 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4066 struct bnxt_ring_struct *ring;
4067
4068 if (txr->tx_push) {
4069 dma_free_coherent(&pdev->dev, bp->tx_push_size,
4070 txr->tx_push, txr->tx_push_mapping);
4071 txr->tx_push = NULL;
4072 }
4073
4074 bnxt_free_tx_inline_buf(txr, pdev);
4075
4076 ring = &txr->tx_ring_struct;
4077
4078 bnxt_free_ring(bp, &ring->ring_mem);
4079 }
4080 }
4081
4082 #define BNXT_TC_TO_RING_BASE(bp, tc) \
4083 ((tc) * (bp)->tx_nr_rings_per_tc)
4084
4085 #define BNXT_RING_TO_TC_OFF(bp, tx) \
4086 ((tx) % (bp)->tx_nr_rings_per_tc)
4087
4088 #define BNXT_RING_TO_TC(bp, tx) \
4089 ((tx) / (bp)->tx_nr_rings_per_tc)
4090
bnxt_alloc_tx_rings(struct bnxt * bp)4091 static int bnxt_alloc_tx_rings(struct bnxt *bp)
4092 {
4093 int i, j, rc;
4094 struct pci_dev *pdev = bp->pdev;
4095
4096 bp->tx_push_size = 0;
4097 if (bp->tx_push_thresh) {
4098 int push_size;
4099
4100 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
4101 bp->tx_push_thresh);
4102
4103 if (push_size > 256) {
4104 push_size = 0;
4105 bp->tx_push_thresh = 0;
4106 }
4107
4108 bp->tx_push_size = push_size;
4109 }
4110
4111 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
4112 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4113 struct bnxt_ring_struct *ring;
4114 u8 qidx;
4115
4116 ring = &txr->tx_ring_struct;
4117
4118 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4119 if (rc)
4120 return rc;
4121
4122 ring->grp_idx = txr->bnapi->index;
4123 if (bp->tx_push_size) {
4124 dma_addr_t mapping;
4125
4126 /* One pre-allocated DMA buffer to backup
4127 * TX push operation
4128 */
4129 txr->tx_push = dma_alloc_coherent(&pdev->dev,
4130 bp->tx_push_size,
4131 &txr->tx_push_mapping,
4132 GFP_KERNEL);
4133
4134 if (!txr->tx_push)
4135 return -ENOMEM;
4136
4137 mapping = txr->tx_push_mapping +
4138 sizeof(struct tx_push_bd);
4139 txr->data_mapping = cpu_to_le64(mapping);
4140 }
4141 if (!(bp->flags & BNXT_FLAG_UDP_GSO_CAP)) {
4142 rc = bnxt_alloc_tx_inline_buf(txr, pdev,
4143 BNXT_SW_USO_MAX_SEGS *
4144 TSO_HEADER_SIZE);
4145 if (rc)
4146 return rc;
4147 }
4148 qidx = bp->tc_to_qidx[j];
4149 ring->queue_id = bp->q_info[qidx].queue_id;
4150 spin_lock_init(&txr->xdp_tx_lock);
4151 if (i < bp->tx_nr_rings_xdp)
4152 continue;
4153 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4154 j++;
4155 }
4156 return 0;
4157 }
4158
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)4159 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4160 {
4161 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4162
4163 kfree(cpr->cp_desc_ring);
4164 cpr->cp_desc_ring = NULL;
4165 ring->ring_mem.pg_arr = NULL;
4166 kfree(cpr->cp_desc_mapping);
4167 cpr->cp_desc_mapping = NULL;
4168 ring->ring_mem.dma_arr = NULL;
4169 }
4170
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)4171 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4172 {
4173 cpr->cp_desc_ring = kzalloc_objs(*cpr->cp_desc_ring, n);
4174 if (!cpr->cp_desc_ring)
4175 return -ENOMEM;
4176 cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n);
4177 if (!cpr->cp_desc_mapping)
4178 return -ENOMEM;
4179 return 0;
4180 }
4181
bnxt_free_all_cp_arrays(struct bnxt * bp)4182 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4183 {
4184 int i;
4185
4186 if (!bp->bnapi)
4187 return;
4188 for (i = 0; i < bp->cp_nr_rings; i++) {
4189 struct bnxt_napi *bnapi = bp->bnapi[i];
4190
4191 if (!bnapi)
4192 continue;
4193 bnxt_free_cp_arrays(&bnapi->cp_ring);
4194 }
4195 }
4196
bnxt_alloc_all_cp_arrays(struct bnxt * bp)4197 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4198 {
4199 int i, n = bp->cp_nr_pages;
4200
4201 for (i = 0; i < bp->cp_nr_rings; i++) {
4202 struct bnxt_napi *bnapi = bp->bnapi[i];
4203 int rc;
4204
4205 if (!bnapi)
4206 continue;
4207 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4208 if (rc)
4209 return rc;
4210 }
4211 return 0;
4212 }
4213
bnxt_free_cp_rings(struct bnxt * bp)4214 static void bnxt_free_cp_rings(struct bnxt *bp)
4215 {
4216 int i;
4217
4218 if (!bp->bnapi)
4219 return;
4220
4221 for (i = 0; i < bp->cp_nr_rings; i++) {
4222 struct bnxt_napi *bnapi = bp->bnapi[i];
4223 struct bnxt_cp_ring_info *cpr;
4224 struct bnxt_ring_struct *ring;
4225 int j;
4226
4227 if (!bnapi)
4228 continue;
4229
4230 cpr = &bnapi->cp_ring;
4231 ring = &cpr->cp_ring_struct;
4232
4233 bnxt_free_ring(bp, &ring->ring_mem);
4234
4235 if (!cpr->cp_ring_arr)
4236 continue;
4237
4238 for (j = 0; j < cpr->cp_ring_count; j++) {
4239 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4240
4241 ring = &cpr2->cp_ring_struct;
4242 bnxt_free_ring(bp, &ring->ring_mem);
4243 bnxt_free_cp_arrays(cpr2);
4244 }
4245 kfree(cpr->cp_ring_arr);
4246 cpr->cp_ring_arr = NULL;
4247 cpr->cp_ring_count = 0;
4248 }
4249 }
4250
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4251 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4252 struct bnxt_cp_ring_info *cpr)
4253 {
4254 struct bnxt_ring_mem_info *rmem;
4255 struct bnxt_ring_struct *ring;
4256 int rc;
4257
4258 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4259 if (rc) {
4260 bnxt_free_cp_arrays(cpr);
4261 return -ENOMEM;
4262 }
4263 ring = &cpr->cp_ring_struct;
4264 rmem = &ring->ring_mem;
4265 rmem->nr_pages = bp->cp_nr_pages;
4266 rmem->page_size = HW_CMPD_RING_SIZE;
4267 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4268 rmem->dma_arr = cpr->cp_desc_mapping;
4269 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4270 rc = bnxt_alloc_ring(bp, rmem);
4271 if (rc) {
4272 bnxt_free_ring(bp, rmem);
4273 bnxt_free_cp_arrays(cpr);
4274 }
4275 return rc;
4276 }
4277
bnxt_alloc_cp_rings(struct bnxt * bp)4278 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4279 {
4280 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4281 int i, j, rc, ulp_msix;
4282 int tcs = bp->num_tc;
4283
4284 if (!tcs)
4285 tcs = 1;
4286 ulp_msix = bnxt_get_ulp_msix_num(bp);
4287 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4288 struct bnxt_napi *bnapi = bp->bnapi[i];
4289 struct bnxt_cp_ring_info *cpr, *cpr2;
4290 struct bnxt_ring_struct *ring;
4291 int cp_count = 0, k;
4292 int rx = 0, tx = 0;
4293
4294 if (!bnapi)
4295 continue;
4296
4297 cpr = &bnapi->cp_ring;
4298 cpr->bnapi = bnapi;
4299 ring = &cpr->cp_ring_struct;
4300
4301 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4302 if (rc)
4303 return rc;
4304
4305 ring->map_idx = ulp_msix + i;
4306
4307 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4308 continue;
4309
4310 if (i < bp->rx_nr_rings) {
4311 cp_count++;
4312 rx = 1;
4313 }
4314 if (i < bp->tx_nr_rings_xdp) {
4315 cp_count++;
4316 tx = 1;
4317 } else if ((sh && i < bp->tx_nr_rings) ||
4318 (!sh && i >= bp->rx_nr_rings)) {
4319 cp_count += tcs;
4320 tx = 1;
4321 }
4322
4323 cpr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
4324 if (!cpr->cp_ring_arr)
4325 return -ENOMEM;
4326 cpr->cp_ring_count = cp_count;
4327
4328 for (k = 0; k < cp_count; k++) {
4329 cpr2 = &cpr->cp_ring_arr[k];
4330 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4331 if (rc)
4332 return rc;
4333 cpr2->bnapi = bnapi;
4334 cpr2->sw_stats = cpr->sw_stats;
4335 cpr2->cp_idx = k;
4336 if (!k && rx) {
4337 bp->rx_ring[i].rx_cpr = cpr2;
4338 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4339 } else {
4340 int n, tc = k - rx;
4341
4342 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4343 bp->tx_ring[n].tx_cpr = cpr2;
4344 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4345 }
4346 }
4347 if (tx)
4348 j++;
4349 }
4350 return 0;
4351 }
4352
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4353 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4354 struct bnxt_rx_ring_info *rxr)
4355 {
4356 struct bnxt_ring_mem_info *rmem;
4357 struct bnxt_ring_struct *ring;
4358
4359 ring = &rxr->rx_ring_struct;
4360 rmem = &ring->ring_mem;
4361 rmem->nr_pages = bp->rx_nr_pages;
4362 rmem->page_size = HW_RXBD_RING_SIZE;
4363 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4364 rmem->dma_arr = rxr->rx_desc_mapping;
4365 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4366 rmem->vmem = (void **)&rxr->rx_buf_ring;
4367
4368 ring = &rxr->rx_agg_ring_struct;
4369 rmem = &ring->ring_mem;
4370 rmem->nr_pages = bp->rx_agg_nr_pages;
4371 rmem->page_size = HW_RXBD_RING_SIZE;
4372 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4373 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4374 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4375 rmem->vmem = (void **)&rxr->rx_agg_ring;
4376 }
4377
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4378 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4379 struct bnxt_rx_ring_info *rxr)
4380 {
4381 struct bnxt_ring_mem_info *rmem;
4382 struct bnxt_ring_struct *ring;
4383 int i;
4384
4385 rxr->page_pool->p.napi = NULL;
4386 rxr->page_pool = NULL;
4387 rxr->head_pool->p.napi = NULL;
4388 rxr->head_pool = NULL;
4389 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4390
4391 ring = &rxr->rx_ring_struct;
4392 rmem = &ring->ring_mem;
4393 rmem->pg_tbl = NULL;
4394 rmem->pg_tbl_map = 0;
4395 for (i = 0; i < rmem->nr_pages; i++) {
4396 rmem->pg_arr[i] = NULL;
4397 rmem->dma_arr[i] = 0;
4398 }
4399 *rmem->vmem = NULL;
4400
4401 ring = &rxr->rx_agg_ring_struct;
4402 rmem = &ring->ring_mem;
4403 rmem->pg_tbl = NULL;
4404 rmem->pg_tbl_map = 0;
4405 for (i = 0; i < rmem->nr_pages; i++) {
4406 rmem->pg_arr[i] = NULL;
4407 rmem->dma_arr[i] = 0;
4408 }
4409 *rmem->vmem = NULL;
4410 }
4411
bnxt_init_ring_struct(struct bnxt * bp)4412 static void bnxt_init_ring_struct(struct bnxt *bp)
4413 {
4414 int i, j;
4415
4416 for (i = 0; i < bp->cp_nr_rings; i++) {
4417 struct bnxt_napi *bnapi = bp->bnapi[i];
4418 struct netdev_queue_config qcfg;
4419 struct bnxt_ring_mem_info *rmem;
4420 struct bnxt_cp_ring_info *cpr;
4421 struct bnxt_rx_ring_info *rxr;
4422 struct bnxt_tx_ring_info *txr;
4423 struct bnxt_ring_struct *ring;
4424
4425 if (!bnapi)
4426 continue;
4427
4428 cpr = &bnapi->cp_ring;
4429 ring = &cpr->cp_ring_struct;
4430 rmem = &ring->ring_mem;
4431 rmem->nr_pages = bp->cp_nr_pages;
4432 rmem->page_size = HW_CMPD_RING_SIZE;
4433 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4434 rmem->dma_arr = cpr->cp_desc_mapping;
4435 rmem->vmem_size = 0;
4436
4437 rxr = bnapi->rx_ring;
4438 if (!rxr)
4439 goto skip_rx;
4440
4441 netdev_queue_config(bp->dev, i, &qcfg);
4442 rxr->rx_page_size = qcfg.rx_page_size;
4443
4444 ring = &rxr->rx_ring_struct;
4445 rmem = &ring->ring_mem;
4446 rmem->nr_pages = bp->rx_nr_pages;
4447 rmem->page_size = HW_RXBD_RING_SIZE;
4448 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4449 rmem->dma_arr = rxr->rx_desc_mapping;
4450 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4451 rmem->vmem = (void **)&rxr->rx_buf_ring;
4452
4453 ring = &rxr->rx_agg_ring_struct;
4454 rmem = &ring->ring_mem;
4455 rmem->nr_pages = bp->rx_agg_nr_pages;
4456 rmem->page_size = HW_RXBD_RING_SIZE;
4457 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4458 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4459 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4460 rmem->vmem = (void **)&rxr->rx_agg_ring;
4461
4462 skip_rx:
4463 bnxt_for_each_napi_tx(j, bnapi, txr) {
4464 ring = &txr->tx_ring_struct;
4465 rmem = &ring->ring_mem;
4466 rmem->nr_pages = bp->tx_nr_pages;
4467 rmem->page_size = HW_TXBD_RING_SIZE;
4468 rmem->pg_arr = (void **)txr->tx_desc_ring;
4469 rmem->dma_arr = txr->tx_desc_mapping;
4470 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4471 rmem->vmem = (void **)&txr->tx_buf_ring;
4472 }
4473 }
4474 }
4475
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4476 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4477 {
4478 int i;
4479 u32 prod;
4480 struct rx_bd **rx_buf_ring;
4481
4482 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4483 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4484 int j;
4485 struct rx_bd *rxbd;
4486
4487 rxbd = rx_buf_ring[i];
4488 if (!rxbd)
4489 continue;
4490
4491 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4492 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4493 rxbd->rx_bd_opaque = prod;
4494 }
4495 }
4496 }
4497
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4498 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4499 struct bnxt_rx_ring_info *rxr,
4500 int ring_nr)
4501 {
4502 u32 prod;
4503 int i;
4504
4505 prod = rxr->rx_prod;
4506 for (i = 0; i < bp->rx_ring_size; i++) {
4507 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4508 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4509 ring_nr, i, bp->rx_ring_size);
4510 break;
4511 }
4512 prod = NEXT_RX(prod);
4513 }
4514 rxr->rx_prod = prod;
4515 }
4516
bnxt_alloc_one_rx_ring_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4517 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4518 struct bnxt_rx_ring_info *rxr,
4519 int ring_nr)
4520 {
4521 int fill_level, i;
4522 u32 prod;
4523
4524 fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
4525
4526 prod = rxr->rx_agg_prod;
4527 for (i = 0; i < fill_level; i++) {
4528 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4529 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4530 ring_nr, i, bp->rx_agg_ring_size);
4531 break;
4532 }
4533 prod = NEXT_RX_AGG(prod);
4534 }
4535 rxr->rx_agg_prod = prod;
4536 }
4537
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4538 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4539 struct bnxt_rx_ring_info *rxr)
4540 {
4541 dma_addr_t mapping;
4542 u8 *data;
4543 int i;
4544
4545 for (i = 0; i < bp->max_tpa; i++) {
4546 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4547 GFP_KERNEL);
4548 if (!data)
4549 return -ENOMEM;
4550
4551 rxr->rx_tpa[i].data = data;
4552 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4553 rxr->rx_tpa[i].mapping = mapping;
4554 }
4555
4556 return 0;
4557 }
4558
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4559 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4560 {
4561 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4562 int rc;
4563
4564 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4565
4566 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4567 return 0;
4568
4569 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4570
4571 if (rxr->rx_tpa) {
4572 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4573 if (rc)
4574 return rc;
4575 }
4576 return 0;
4577 }
4578
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4579 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4580 struct bnxt_rx_ring_info *rxr)
4581 {
4582 struct bnxt_ring_struct *ring;
4583 u32 type;
4584
4585 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4586 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4587
4588 if (NET_IP_ALIGN == 2)
4589 type |= RX_BD_FLAGS_SOP;
4590
4591 ring = &rxr->rx_ring_struct;
4592 bnxt_init_rxbd_pages(ring, type);
4593 ring->fw_ring_id = INVALID_HW_RING_ID;
4594 }
4595
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4596 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4597 struct bnxt_rx_ring_info *rxr)
4598 {
4599 struct bnxt_ring_struct *ring;
4600 u32 type;
4601
4602 ring = &rxr->rx_agg_ring_struct;
4603 ring->fw_ring_id = INVALID_HW_RING_ID;
4604 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4605 type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
4606 RX_BD_TYPE_RX_AGG_BD;
4607
4608 /* On P7, setting EOP will cause the chip to disable
4609 * Relaxed Ordering (RO) for TPA data. Disable EOP for
4610 * potentially higher performance with RO.
4611 */
4612 if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
4613 type |= RX_BD_FLAGS_AGG_EOP;
4614
4615 bnxt_init_rxbd_pages(ring, type);
4616 }
4617 }
4618
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4619 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4620 {
4621 struct bnxt_rx_ring_info *rxr;
4622
4623 rxr = &bp->rx_ring[ring_nr];
4624 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4625
4626 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4627 &rxr->bnapi->napi);
4628
4629 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4630 bpf_prog_add(bp->xdp_prog, 1);
4631 rxr->xdp_prog = bp->xdp_prog;
4632 }
4633
4634 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4635
4636 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4637 }
4638
bnxt_init_cp_rings(struct bnxt * bp)4639 static void bnxt_init_cp_rings(struct bnxt *bp)
4640 {
4641 int i, j;
4642
4643 for (i = 0; i < bp->cp_nr_rings; i++) {
4644 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4645 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4646
4647 ring->fw_ring_id = INVALID_HW_RING_ID;
4648 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4649 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4650 if (!cpr->cp_ring_arr)
4651 continue;
4652 for (j = 0; j < cpr->cp_ring_count; j++) {
4653 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4654
4655 ring = &cpr2->cp_ring_struct;
4656 ring->fw_ring_id = INVALID_HW_RING_ID;
4657 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4658 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4659 }
4660 }
4661 }
4662
bnxt_init_rx_rings(struct bnxt * bp)4663 static int bnxt_init_rx_rings(struct bnxt *bp)
4664 {
4665 int i, rc = 0;
4666
4667 if (BNXT_RX_PAGE_MODE(bp)) {
4668 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4669 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4670 } else {
4671 bp->rx_offset = BNXT_RX_OFFSET;
4672 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4673 }
4674
4675 for (i = 0; i < bp->rx_nr_rings; i++) {
4676 rc = bnxt_init_one_rx_ring(bp, i);
4677 if (rc)
4678 break;
4679 }
4680
4681 return rc;
4682 }
4683
bnxt_init_tx_rings(struct bnxt * bp)4684 static int bnxt_init_tx_rings(struct bnxt *bp)
4685 {
4686 netdev_features_t features;
4687 u16 i;
4688
4689 features = bp->dev->features;
4690
4691 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4692 bnxt_min_tx_desc_cnt(bp, features));
4693
4694 for (i = 0; i < bp->tx_nr_rings; i++) {
4695 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4696 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4697
4698 ring->fw_ring_id = INVALID_HW_RING_ID;
4699
4700 if (i >= bp->tx_nr_rings_xdp)
4701 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4702 NETDEV_QUEUE_TYPE_TX,
4703 &txr->bnapi->napi);
4704 }
4705
4706 return 0;
4707 }
4708
bnxt_free_ring_grps(struct bnxt * bp)4709 static void bnxt_free_ring_grps(struct bnxt *bp)
4710 {
4711 kfree(bp->grp_info);
4712 bp->grp_info = NULL;
4713 }
4714
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4715 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4716 {
4717 int i;
4718
4719 if (irq_re_init) {
4720 bp->grp_info = kzalloc_objs(struct bnxt_ring_grp_info,
4721 bp->cp_nr_rings);
4722 if (!bp->grp_info)
4723 return -ENOMEM;
4724 }
4725 for (i = 0; i < bp->cp_nr_rings; i++) {
4726 if (irq_re_init)
4727 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4728 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4729 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4730 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4731 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4732 }
4733 return 0;
4734 }
4735
bnxt_free_vnics(struct bnxt * bp)4736 static void bnxt_free_vnics(struct bnxt *bp)
4737 {
4738 kfree(bp->vnic_info);
4739 bp->vnic_info = NULL;
4740 bp->nr_vnics = 0;
4741 }
4742
bnxt_alloc_vnics(struct bnxt * bp)4743 static int bnxt_alloc_vnics(struct bnxt *bp)
4744 {
4745 int num_vnics = 1;
4746
4747 #ifdef CONFIG_RFS_ACCEL
4748 if (bp->flags & BNXT_FLAG_RFS) {
4749 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4750 num_vnics++;
4751 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4752 num_vnics += bp->rx_nr_rings;
4753 }
4754 #endif
4755
4756 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4757 num_vnics++;
4758
4759 bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics);
4760 if (!bp->vnic_info)
4761 return -ENOMEM;
4762
4763 bp->nr_vnics = num_vnics;
4764 return 0;
4765 }
4766
bnxt_init_vnics(struct bnxt * bp)4767 static void bnxt_init_vnics(struct bnxt *bp)
4768 {
4769 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4770 int i;
4771
4772 for (i = 0; i < bp->nr_vnics; i++) {
4773 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4774 int j;
4775
4776 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4777 vnic->vnic_id = i;
4778 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4779 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4780
4781 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4782
4783 if (bp->vnic_info[i].rss_hash_key) {
4784 if (i == BNXT_VNIC_DEFAULT) {
4785 u8 *key = (void *)vnic->rss_hash_key;
4786 int k;
4787
4788 if (!bp->rss_hash_key_valid &&
4789 !bp->rss_hash_key_updated) {
4790 get_random_bytes(bp->rss_hash_key,
4791 HW_HASH_KEY_SIZE);
4792 bp->rss_hash_key_updated = true;
4793 }
4794
4795 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4796 HW_HASH_KEY_SIZE);
4797
4798 if (!bp->rss_hash_key_updated)
4799 continue;
4800
4801 bp->rss_hash_key_updated = false;
4802 bp->rss_hash_key_valid = true;
4803
4804 bp->toeplitz_prefix = 0;
4805 for (k = 0; k < 8; k++) {
4806 bp->toeplitz_prefix <<= 8;
4807 bp->toeplitz_prefix |= key[k];
4808 }
4809 } else {
4810 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4811 HW_HASH_KEY_SIZE);
4812 }
4813 }
4814 }
4815 }
4816
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4817 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4818 {
4819 int pages;
4820
4821 pages = ring_size / desc_per_pg;
4822
4823 if (!pages)
4824 return 1;
4825
4826 pages++;
4827
4828 while (pages & (pages - 1))
4829 pages++;
4830
4831 return pages;
4832 }
4833
bnxt_set_tpa_flags(struct bnxt * bp)4834 void bnxt_set_tpa_flags(struct bnxt *bp)
4835 {
4836 bp->flags &= ~BNXT_FLAG_TPA;
4837 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4838 return;
4839 if (bp->dev->features & NETIF_F_LRO)
4840 bp->flags |= BNXT_FLAG_LRO;
4841 else if (bp->dev->features & NETIF_F_GRO_HW)
4842 bp->flags |= BNXT_FLAG_GRO;
4843 }
4844
bnxt_init_ring_params(struct bnxt * bp)4845 static void bnxt_init_ring_params(struct bnxt *bp)
4846 {
4847 unsigned int rx_size;
4848
4849 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4850 /* Try to fit 4 chunks into a 4k page */
4851 rx_size = SZ_1K -
4852 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4853 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4854 }
4855
4856 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4857 * be set on entry.
4858 */
bnxt_set_ring_params(struct bnxt * bp)4859 void bnxt_set_ring_params(struct bnxt *bp)
4860 {
4861 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4862 u32 agg_factor = 0, agg_ring_size = 0;
4863
4864 /* 8 for CRC and VLAN */
4865 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4866
4867 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4868 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4869
4870 ring_size = bp->rx_ring_size;
4871 bp->rx_agg_ring_size = 0;
4872 bp->rx_agg_nr_pages = 0;
4873
4874 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4875 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4876
4877 bp->flags &= ~BNXT_FLAG_JUMBO;
4878 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4879 u32 jumbo_factor;
4880
4881 bp->flags |= BNXT_FLAG_JUMBO;
4882 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4883 if (jumbo_factor > agg_factor)
4884 agg_factor = jumbo_factor;
4885 }
4886 if (agg_factor) {
4887 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4888 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4889 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4890 bp->rx_ring_size, ring_size);
4891 bp->rx_ring_size = ring_size;
4892 }
4893 agg_ring_size = ring_size * agg_factor;
4894
4895 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4896 RX_DESC_CNT);
4897 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4898 u32 tmp = agg_ring_size;
4899
4900 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4901 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4902 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4903 tmp, agg_ring_size);
4904 }
4905 bp->rx_agg_ring_size = agg_ring_size;
4906 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4907
4908 if (BNXT_RX_PAGE_MODE(bp)) {
4909 rx_space = PAGE_SIZE;
4910 rx_size = PAGE_SIZE -
4911 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4912 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4913 } else {
4914 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4915 bp->rx_copybreak,
4916 bp->dev->cfg_pending->hds_thresh);
4917 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4918 rx_space = rx_size + NET_SKB_PAD +
4919 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4920 }
4921 }
4922
4923 bp->rx_buf_use_size = rx_size;
4924 bp->rx_buf_size = rx_space;
4925
4926 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4927 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4928
4929 ring_size = bp->tx_ring_size;
4930 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4931 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4932
4933 max_rx_cmpl = bp->rx_ring_size;
4934 /* MAX TPA needs to be added because TPA_START completions are
4935 * immediately recycled, so the TPA completions are not bound by
4936 * the RX ring size.
4937 */
4938 if (bp->flags & BNXT_FLAG_TPA)
4939 max_rx_cmpl += bp->max_tpa;
4940 /* RX and TPA completions are 32-byte, all others are 16-byte */
4941 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4942 bp->cp_ring_size = ring_size;
4943
4944 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4945 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4946 bp->cp_nr_pages = MAX_CP_PAGES;
4947 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4948 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4949 ring_size, bp->cp_ring_size);
4950 }
4951 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4952 bp->cp_ring_mask = bp->cp_bit - 1;
4953 }
4954
4955 /* Changing allocation mode of RX rings.
4956 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4957 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4958 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4959 {
4960 struct net_device *dev = bp->dev;
4961
4962 if (page_mode) {
4963 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4964 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4965
4966 if (bp->xdp_prog->aux->xdp_has_frags)
4967 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4968 else
4969 dev->max_mtu =
4970 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4971 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4972 bp->flags |= BNXT_FLAG_JUMBO;
4973 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4974 } else {
4975 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4976 bp->rx_skb_func = bnxt_rx_page_skb;
4977 }
4978 bp->rx_dir = DMA_BIDIRECTIONAL;
4979 } else {
4980 dev->max_mtu = bp->max_mtu;
4981 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4982 bp->rx_dir = DMA_FROM_DEVICE;
4983 bp->rx_skb_func = bnxt_rx_skb;
4984 }
4985 }
4986
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4987 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4988 {
4989 __bnxt_set_rx_skb_mode(bp, page_mode);
4990
4991 if (!page_mode) {
4992 int rx, tx;
4993
4994 bnxt_get_max_rings(bp, &rx, &tx, true);
4995 if (rx > 1) {
4996 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4997 bp->dev->hw_features |= NETIF_F_LRO;
4998 }
4999 }
5000
5001 /* Update LRO and GRO_HW availability */
5002 netdev_update_features(bp->dev);
5003 }
5004
bnxt_free_vnic_attributes(struct bnxt * bp)5005 static void bnxt_free_vnic_attributes(struct bnxt *bp)
5006 {
5007 int i;
5008 struct bnxt_vnic_info *vnic;
5009 struct pci_dev *pdev = bp->pdev;
5010
5011 if (!bp->vnic_info)
5012 return;
5013
5014 for (i = 0; i < bp->nr_vnics; i++) {
5015 vnic = &bp->vnic_info[i];
5016
5017 kfree(vnic->fw_grp_ids);
5018 vnic->fw_grp_ids = NULL;
5019
5020 kfree(vnic->uc_list);
5021 vnic->uc_list = NULL;
5022
5023 if (vnic->mc_list) {
5024 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
5025 vnic->mc_list, vnic->mc_list_mapping);
5026 vnic->mc_list = NULL;
5027 }
5028
5029 if (vnic->rss_table) {
5030 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
5031 vnic->rss_table,
5032 vnic->rss_table_dma_addr);
5033 vnic->rss_table = NULL;
5034 }
5035
5036 vnic->rss_hash_key = NULL;
5037 vnic->flags = 0;
5038 }
5039 }
5040
bnxt_alloc_vnic_attributes(struct bnxt * bp)5041 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
5042 {
5043 int i, rc = 0, size;
5044 struct bnxt_vnic_info *vnic;
5045 struct pci_dev *pdev = bp->pdev;
5046 int max_rings;
5047
5048 for (i = 0; i < bp->nr_vnics; i++) {
5049 vnic = &bp->vnic_info[i];
5050
5051 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
5052 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
5053
5054 if (mem_size > 0) {
5055 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
5056 if (!vnic->uc_list) {
5057 rc = -ENOMEM;
5058 goto out;
5059 }
5060 }
5061 }
5062
5063 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
5064 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
5065 vnic->mc_list =
5066 dma_alloc_coherent(&pdev->dev,
5067 vnic->mc_list_size,
5068 &vnic->mc_list_mapping,
5069 GFP_KERNEL);
5070 if (!vnic->mc_list) {
5071 rc = -ENOMEM;
5072 goto out;
5073 }
5074 }
5075
5076 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5077 goto vnic_skip_grps;
5078
5079 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5080 max_rings = bp->rx_nr_rings;
5081 else
5082 max_rings = 1;
5083
5084 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
5085 if (!vnic->fw_grp_ids) {
5086 rc = -ENOMEM;
5087 goto out;
5088 }
5089 vnic_skip_grps:
5090 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
5091 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
5092 continue;
5093
5094 /* Allocate rss table and hash key */
5095 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
5096 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5097 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
5098
5099 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
5100 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
5101 vnic->rss_table_size,
5102 &vnic->rss_table_dma_addr,
5103 GFP_KERNEL);
5104 if (!vnic->rss_table) {
5105 rc = -ENOMEM;
5106 goto out;
5107 }
5108
5109 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
5110 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
5111 }
5112 return 0;
5113
5114 out:
5115 return rc;
5116 }
5117
bnxt_free_hwrm_resources(struct bnxt * bp)5118 static void bnxt_free_hwrm_resources(struct bnxt *bp)
5119 {
5120 struct bnxt_hwrm_wait_token *token;
5121
5122 dma_pool_destroy(bp->hwrm_dma_pool);
5123 bp->hwrm_dma_pool = NULL;
5124
5125 rcu_read_lock();
5126 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
5127 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
5128 rcu_read_unlock();
5129 }
5130
bnxt_alloc_hwrm_resources(struct bnxt * bp)5131 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5132 {
5133 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5134 BNXT_HWRM_DMA_SIZE,
5135 BNXT_HWRM_DMA_ALIGN, 0);
5136 if (!bp->hwrm_dma_pool)
5137 return -ENOMEM;
5138
5139 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5140
5141 return 0;
5142 }
5143
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)5144 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5145 {
5146 kfree(stats->hw_masks);
5147 stats->hw_masks = NULL;
5148 kfree(stats->sw_stats);
5149 stats->sw_stats = NULL;
5150 if (stats->hw_stats) {
5151 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5152 stats->hw_stats_map);
5153 stats->hw_stats = NULL;
5154 }
5155 }
5156
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)5157 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5158 bool alloc_masks)
5159 {
5160 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5161 &stats->hw_stats_map, GFP_KERNEL);
5162 if (!stats->hw_stats)
5163 return -ENOMEM;
5164
5165 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5166 if (!stats->sw_stats)
5167 goto stats_mem_err;
5168
5169 if (alloc_masks) {
5170 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5171 if (!stats->hw_masks)
5172 goto stats_mem_err;
5173 }
5174 return 0;
5175
5176 stats_mem_err:
5177 bnxt_free_stats_mem(bp, stats);
5178 return -ENOMEM;
5179 }
5180
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)5181 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5182 {
5183 int i;
5184
5185 for (i = 0; i < count; i++)
5186 mask_arr[i] = mask;
5187 }
5188
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)5189 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5190 {
5191 int i;
5192
5193 for (i = 0; i < count; i++)
5194 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5195 }
5196
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)5197 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5198 struct bnxt_stats_mem *stats)
5199 {
5200 struct hwrm_func_qstats_ext_output *resp;
5201 struct hwrm_func_qstats_ext_input *req;
5202 __le64 *hw_masks;
5203 int rc;
5204
5205 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5206 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5207 return -EOPNOTSUPP;
5208
5209 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5210 if (rc)
5211 return rc;
5212
5213 req->fid = cpu_to_le16(0xffff);
5214 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5215
5216 resp = hwrm_req_hold(bp, req);
5217 rc = hwrm_req_send(bp, req);
5218 if (!rc) {
5219 hw_masks = &resp->rx_ucast_pkts;
5220 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5221 }
5222 hwrm_req_drop(bp, req);
5223 return rc;
5224 }
5225
5226 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5227 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5228
bnxt_init_stats(struct bnxt * bp)5229 static void bnxt_init_stats(struct bnxt *bp)
5230 {
5231 struct bnxt_napi *bnapi = bp->bnapi[0];
5232 struct bnxt_cp_ring_info *cpr;
5233 struct bnxt_stats_mem *stats;
5234 __le64 *rx_stats, *tx_stats;
5235 int rc, rx_count, tx_count;
5236 u64 *rx_masks, *tx_masks;
5237 u64 mask;
5238 u8 flags;
5239
5240 cpr = &bnapi->cp_ring;
5241 stats = &cpr->stats;
5242 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5243 if (rc) {
5244 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5245 mask = (1ULL << 48) - 1;
5246 else
5247 mask = -1ULL;
5248 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5249 }
5250 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5251 stats = &bp->port_stats;
5252 rx_stats = stats->hw_stats;
5253 rx_masks = stats->hw_masks;
5254 rx_count = sizeof(struct rx_port_stats) / 8;
5255 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5256 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5257 tx_count = sizeof(struct tx_port_stats) / 8;
5258
5259 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5260 rc = bnxt_hwrm_port_qstats(bp, flags);
5261 if (rc) {
5262 mask = (1ULL << 40) - 1;
5263
5264 bnxt_fill_masks(rx_masks, mask, rx_count);
5265 bnxt_fill_masks(tx_masks, mask, tx_count);
5266 } else {
5267 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5268 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5269 bnxt_hwrm_port_qstats(bp, 0);
5270 }
5271 }
5272 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5273 stats = &bp->rx_port_stats_ext;
5274 rx_stats = stats->hw_stats;
5275 rx_masks = stats->hw_masks;
5276 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5277 stats = &bp->tx_port_stats_ext;
5278 tx_stats = stats->hw_stats;
5279 tx_masks = stats->hw_masks;
5280 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5281
5282 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5283 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5284 if (rc) {
5285 mask = (1ULL << 40) - 1;
5286
5287 bnxt_fill_masks(rx_masks, mask, rx_count);
5288 if (tx_stats)
5289 bnxt_fill_masks(tx_masks, mask, tx_count);
5290 } else {
5291 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5292 if (tx_stats)
5293 bnxt_copy_hw_masks(tx_masks, tx_stats,
5294 tx_count);
5295 bnxt_hwrm_port_qstats_ext(bp, 0);
5296 }
5297 }
5298 }
5299
bnxt_free_port_stats(struct bnxt * bp)5300 static void bnxt_free_port_stats(struct bnxt *bp)
5301 {
5302 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5303 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5304
5305 bnxt_free_stats_mem(bp, &bp->port_stats);
5306 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5307 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5308 }
5309
bnxt_free_ring_stats(struct bnxt * bp)5310 static void bnxt_free_ring_stats(struct bnxt *bp)
5311 {
5312 int i;
5313
5314 if (!bp->bnapi)
5315 return;
5316
5317 for (i = 0; i < bp->cp_nr_rings; i++) {
5318 struct bnxt_napi *bnapi = bp->bnapi[i];
5319 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5320
5321 bnxt_free_stats_mem(bp, &cpr->stats);
5322
5323 kfree(cpr->sw_stats);
5324 cpr->sw_stats = NULL;
5325 }
5326 }
5327
bnxt_alloc_stats(struct bnxt * bp)5328 static int bnxt_alloc_stats(struct bnxt *bp)
5329 {
5330 u32 size, i;
5331 int rc;
5332
5333 size = bp->hw_ring_stats_size;
5334
5335 for (i = 0; i < bp->cp_nr_rings; i++) {
5336 struct bnxt_napi *bnapi = bp->bnapi[i];
5337 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5338
5339 cpr->sw_stats = kzalloc_obj(*cpr->sw_stats);
5340 if (!cpr->sw_stats)
5341 return -ENOMEM;
5342
5343 cpr->stats.len = size;
5344 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5345 if (rc)
5346 return rc;
5347
5348 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5349 }
5350
5351 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5352 return 0;
5353
5354 if (bp->port_stats.hw_stats)
5355 goto alloc_ext_stats;
5356
5357 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5358 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5359 if (rc)
5360 return rc;
5361
5362 bp->flags |= BNXT_FLAG_PORT_STATS;
5363
5364 alloc_ext_stats:
5365 /* Display extended statistics only if FW supports it */
5366 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5367 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5368 return 0;
5369
5370 if (bp->rx_port_stats_ext.hw_stats)
5371 goto alloc_tx_ext_stats;
5372
5373 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5374 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5375 /* Extended stats are optional */
5376 if (rc)
5377 return 0;
5378
5379 alloc_tx_ext_stats:
5380 if (bp->tx_port_stats_ext.hw_stats)
5381 return 0;
5382
5383 if (bp->hwrm_spec_code >= 0x10902 ||
5384 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5385 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5386 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5387 /* Extended stats are optional */
5388 if (rc)
5389 return 0;
5390 }
5391 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5392 return 0;
5393 }
5394
bnxt_clear_ring_indices(struct bnxt * bp)5395 static void bnxt_clear_ring_indices(struct bnxt *bp)
5396 {
5397 int i, j;
5398
5399 if (!bp->bnapi)
5400 return;
5401
5402 for (i = 0; i < bp->cp_nr_rings; i++) {
5403 struct bnxt_napi *bnapi = bp->bnapi[i];
5404 struct bnxt_cp_ring_info *cpr;
5405 struct bnxt_rx_ring_info *rxr;
5406 struct bnxt_tx_ring_info *txr;
5407
5408 if (!bnapi)
5409 continue;
5410
5411 cpr = &bnapi->cp_ring;
5412 cpr->cp_raw_cons = 0;
5413
5414 bnxt_for_each_napi_tx(j, bnapi, txr) {
5415 txr->tx_prod = 0;
5416 txr->tx_cons = 0;
5417 txr->tx_hw_cons = 0;
5418 }
5419
5420 rxr = bnapi->rx_ring;
5421 if (rxr) {
5422 rxr->rx_prod = 0;
5423 rxr->rx_agg_prod = 0;
5424 rxr->rx_sw_agg_prod = 0;
5425 rxr->rx_next_cons = 0;
5426 }
5427 bnapi->events = 0;
5428 }
5429 }
5430
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5431 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5432 {
5433 u8 type = fltr->type, flags = fltr->flags;
5434
5435 INIT_LIST_HEAD(&fltr->list);
5436 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5437 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5438 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5439 }
5440
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5441 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5442 {
5443 if (!list_empty(&fltr->list))
5444 list_del_init(&fltr->list);
5445 }
5446
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5447 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5448 {
5449 struct bnxt_filter_base *usr_fltr, *tmp;
5450
5451 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5452 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5453 continue;
5454 bnxt_del_one_usr_fltr(bp, usr_fltr);
5455 }
5456 }
5457
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5458 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5459 {
5460 hlist_del(&fltr->hash);
5461 bnxt_del_one_usr_fltr(bp, fltr);
5462 if (fltr->flags) {
5463 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5464 bp->ntp_fltr_count--;
5465 }
5466 kfree(fltr);
5467 }
5468
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5469 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5470 {
5471 int i;
5472
5473 netdev_assert_locked_or_invisible(bp->dev);
5474
5475 /* Under netdev instance lock and all our NAPIs have been disabled.
5476 * It's safe to delete the hash table.
5477 */
5478 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5479 struct hlist_head *head;
5480 struct hlist_node *tmp;
5481 struct bnxt_ntuple_filter *fltr;
5482
5483 head = &bp->ntp_fltr_hash_tbl[i];
5484 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5485 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5486 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5487 !list_empty(&fltr->base.list)))
5488 continue;
5489 bnxt_del_fltr(bp, &fltr->base);
5490 }
5491 }
5492 if (!all)
5493 return;
5494
5495 bitmap_free(bp->ntp_fltr_bmap);
5496 bp->ntp_fltr_bmap = NULL;
5497 bp->ntp_fltr_count = 0;
5498 }
5499
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5500 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5501 {
5502 int i, rc = 0;
5503
5504 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5505 return 0;
5506
5507 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5508 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5509
5510 bp->ntp_fltr_count = 0;
5511 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5512
5513 if (!bp->ntp_fltr_bmap)
5514 rc = -ENOMEM;
5515
5516 return rc;
5517 }
5518
bnxt_free_l2_filters(struct bnxt * bp,bool all)5519 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5520 {
5521 int i;
5522
5523 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5524 struct hlist_head *head;
5525 struct hlist_node *tmp;
5526 struct bnxt_l2_filter *fltr;
5527
5528 head = &bp->l2_fltr_hash_tbl[i];
5529 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5530 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5531 !list_empty(&fltr->base.list)))
5532 continue;
5533 bnxt_del_fltr(bp, &fltr->base);
5534 }
5535 }
5536 }
5537
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5538 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5539 {
5540 int i;
5541
5542 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5543 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5544 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5545 }
5546
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5547 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5548 {
5549 bnxt_free_vnic_attributes(bp);
5550 bnxt_free_tx_rings(bp);
5551 bnxt_free_rx_rings(bp);
5552 bnxt_free_cp_rings(bp);
5553 bnxt_free_all_cp_arrays(bp);
5554 bnxt_free_ntp_fltrs(bp, false);
5555 bnxt_free_l2_filters(bp, false);
5556 if (irq_re_init) {
5557 bnxt_free_ring_stats(bp);
5558 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5559 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5560 bnxt_free_port_stats(bp);
5561 bnxt_free_ring_grps(bp);
5562 bnxt_free_vnics(bp);
5563 kfree(bp->tx_ring_map);
5564 bp->tx_ring_map = NULL;
5565 kfree(bp->tx_ring);
5566 bp->tx_ring = NULL;
5567 kfree(bp->rx_ring);
5568 bp->rx_ring = NULL;
5569 kfree(bp->bnapi);
5570 bp->bnapi = NULL;
5571 } else {
5572 bnxt_clear_ring_indices(bp);
5573 }
5574 }
5575
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5576 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5577 {
5578 int i, j, rc, size, arr_size;
5579 void *bnapi;
5580
5581 if (irq_re_init) {
5582 /* Allocate bnapi mem pointer array and mem block for
5583 * all queues
5584 */
5585 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5586 bp->cp_nr_rings);
5587 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5588 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5589 if (!bnapi)
5590 return -ENOMEM;
5591
5592 bp->bnapi = bnapi;
5593 bnapi += arr_size;
5594 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5595 bp->bnapi[i] = bnapi;
5596 bp->bnapi[i]->index = i;
5597 bp->bnapi[i]->bp = bp;
5598 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5599 struct bnxt_cp_ring_info *cpr =
5600 &bp->bnapi[i]->cp_ring;
5601
5602 cpr->cp_ring_struct.ring_mem.flags =
5603 BNXT_RMEM_RING_PTE_FLAG;
5604 }
5605 }
5606
5607 bp->rx_ring = kzalloc_objs(struct bnxt_rx_ring_info,
5608 bp->rx_nr_rings);
5609 if (!bp->rx_ring)
5610 return -ENOMEM;
5611
5612 for (i = 0; i < bp->rx_nr_rings; i++) {
5613 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5614
5615 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5616 rxr->rx_ring_struct.ring_mem.flags =
5617 BNXT_RMEM_RING_PTE_FLAG;
5618 rxr->rx_agg_ring_struct.ring_mem.flags =
5619 BNXT_RMEM_RING_PTE_FLAG;
5620 } else {
5621 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5622 }
5623 rxr->bnapi = bp->bnapi[i];
5624 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5625 }
5626
5627 bp->tx_ring = kzalloc_objs(struct bnxt_tx_ring_info,
5628 bp->tx_nr_rings);
5629 if (!bp->tx_ring)
5630 return -ENOMEM;
5631
5632 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5633 GFP_KERNEL);
5634
5635 if (!bp->tx_ring_map)
5636 return -ENOMEM;
5637
5638 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5639 j = 0;
5640 else
5641 j = bp->rx_nr_rings;
5642
5643 for (i = 0; i < bp->tx_nr_rings; i++) {
5644 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5645 struct bnxt_napi *bnapi2;
5646
5647 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5648 txr->tx_ring_struct.ring_mem.flags =
5649 BNXT_RMEM_RING_PTE_FLAG;
5650 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5651 if (i >= bp->tx_nr_rings_xdp) {
5652 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5653
5654 bnapi2 = bp->bnapi[k];
5655 txr->txq_index = i - bp->tx_nr_rings_xdp;
5656 txr->tx_napi_idx =
5657 BNXT_RING_TO_TC(bp, txr->txq_index);
5658 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5659 bnapi2->tx_int = bnxt_tx_int;
5660 } else {
5661 bnapi2 = bp->bnapi[j];
5662 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5663 bnapi2->tx_ring[0] = txr;
5664 bnapi2->tx_int = bnxt_tx_int_xdp;
5665 j++;
5666 }
5667 txr->bnapi = bnapi2;
5668 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5669 txr->tx_cpr = &bnapi2->cp_ring;
5670 }
5671
5672 rc = bnxt_alloc_stats(bp);
5673 if (rc)
5674 goto alloc_mem_err;
5675 bnxt_init_stats(bp);
5676
5677 rc = bnxt_alloc_ntp_fltrs(bp);
5678 if (rc)
5679 goto alloc_mem_err;
5680
5681 rc = bnxt_alloc_vnics(bp);
5682 if (rc)
5683 goto alloc_mem_err;
5684 }
5685
5686 rc = bnxt_alloc_all_cp_arrays(bp);
5687 if (rc)
5688 goto alloc_mem_err;
5689
5690 bnxt_init_ring_struct(bp);
5691
5692 rc = bnxt_alloc_rx_rings(bp);
5693 if (rc)
5694 goto alloc_mem_err;
5695
5696 rc = bnxt_alloc_tx_rings(bp);
5697 if (rc)
5698 goto alloc_mem_err;
5699
5700 rc = bnxt_alloc_cp_rings(bp);
5701 if (rc)
5702 goto alloc_mem_err;
5703
5704 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5705 BNXT_VNIC_MCAST_FLAG |
5706 BNXT_VNIC_UCAST_FLAG;
5707 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5708 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5709 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5710
5711 rc = bnxt_alloc_vnic_attributes(bp);
5712 if (rc)
5713 goto alloc_mem_err;
5714 return 0;
5715
5716 alloc_mem_err:
5717 bnxt_free_mem(bp, true);
5718 return rc;
5719 }
5720
bnxt_disable_int(struct bnxt * bp)5721 static void bnxt_disable_int(struct bnxt *bp)
5722 {
5723 int i;
5724
5725 if (!bp->bnapi)
5726 return;
5727
5728 for (i = 0; i < bp->cp_nr_rings; i++) {
5729 struct bnxt_napi *bnapi = bp->bnapi[i];
5730 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5731 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5732
5733 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5734 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5735 }
5736 }
5737
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5738 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5739 {
5740 struct bnxt_napi *bnapi = bp->bnapi[n];
5741 struct bnxt_cp_ring_info *cpr;
5742
5743 cpr = &bnapi->cp_ring;
5744 return cpr->cp_ring_struct.map_idx;
5745 }
5746
bnxt_disable_int_sync(struct bnxt * bp)5747 static void bnxt_disable_int_sync(struct bnxt *bp)
5748 {
5749 int i;
5750
5751 if (!bp->irq_tbl)
5752 return;
5753
5754 atomic_inc(&bp->intr_sem);
5755
5756 bnxt_disable_int(bp);
5757 for (i = 0; i < bp->cp_nr_rings; i++) {
5758 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5759
5760 synchronize_irq(bp->irq_tbl[map_idx].vector);
5761 }
5762 }
5763
bnxt_enable_int(struct bnxt * bp)5764 static void bnxt_enable_int(struct bnxt *bp)
5765 {
5766 int i;
5767
5768 atomic_set(&bp->intr_sem, 0);
5769 for (i = 0; i < bp->cp_nr_rings; i++) {
5770 struct bnxt_napi *bnapi = bp->bnapi[i];
5771 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5772
5773 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5774 }
5775 }
5776
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5777 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5778 bool async_only)
5779 {
5780 DECLARE_BITMAP(async_events_bmap, 256);
5781 u32 *events = (u32 *)async_events_bmap;
5782 struct hwrm_func_drv_rgtr_output *resp;
5783 struct hwrm_func_drv_rgtr_input *req;
5784 u32 flags;
5785 int rc, i;
5786
5787 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5788 if (rc)
5789 return rc;
5790
5791 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5792 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5793 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5794
5795 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5796 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5797 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5798 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5799 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5800 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5801 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5802 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5803 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5804 req->flags = cpu_to_le32(flags);
5805 req->ver_maj_8b = DRV_VER_MAJ;
5806 req->ver_min_8b = DRV_VER_MIN;
5807 req->ver_upd_8b = DRV_VER_UPD;
5808 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5809 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5810 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5811
5812 if (BNXT_PF(bp)) {
5813 u32 data[8];
5814 int i;
5815
5816 memset(data, 0, sizeof(data));
5817 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5818 u16 cmd = bnxt_vf_req_snif[i];
5819 unsigned int bit, idx;
5820
5821 if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
5822 cmd == HWRM_PORT_PHY_QCFG)
5823 continue;
5824
5825 idx = cmd / 32;
5826 bit = cmd % 32;
5827 data[idx] |= 1 << bit;
5828 }
5829
5830 for (i = 0; i < 8; i++)
5831 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5832
5833 req->enables |=
5834 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5835 }
5836
5837 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5838 req->flags |= cpu_to_le32(
5839 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5840
5841 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5842 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5843 u16 event_id = bnxt_async_events_arr[i];
5844
5845 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5846 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5847 continue;
5848 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5849 !bp->ptp_cfg)
5850 continue;
5851 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5852 }
5853 if (bmap && bmap_size) {
5854 for (i = 0; i < bmap_size; i++) {
5855 if (test_bit(i, bmap))
5856 __set_bit(i, async_events_bmap);
5857 }
5858 }
5859 for (i = 0; i < 8; i++)
5860 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5861
5862 if (async_only)
5863 req->enables =
5864 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5865
5866 resp = hwrm_req_hold(bp, req);
5867 rc = hwrm_req_send(bp, req);
5868 if (!rc) {
5869 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5870 if (resp->flags &
5871 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5872 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5873 }
5874 hwrm_req_drop(bp, req);
5875 return rc;
5876 }
5877
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5878 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5879 {
5880 struct hwrm_func_drv_unrgtr_input *req;
5881 int rc;
5882
5883 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5884 return 0;
5885
5886 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5887 if (rc)
5888 return rc;
5889 return hwrm_req_send(bp, req);
5890 }
5891
5892 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5893
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5894 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5895 {
5896 struct hwrm_tunnel_dst_port_free_input *req;
5897 int rc;
5898
5899 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5900 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5901 return 0;
5902 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5903 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5904 return 0;
5905
5906 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5907 if (rc)
5908 return rc;
5909
5910 req->tunnel_type = tunnel_type;
5911
5912 switch (tunnel_type) {
5913 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5914 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5915 bp->vxlan_port = 0;
5916 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5917 break;
5918 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5919 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5920 bp->nge_port = 0;
5921 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5922 break;
5923 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5924 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5925 bp->vxlan_gpe_port = 0;
5926 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5927 break;
5928 default:
5929 break;
5930 }
5931
5932 rc = hwrm_req_send(bp, req);
5933 if (rc)
5934 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5935 rc);
5936 if (bp->flags & BNXT_FLAG_TPA)
5937 bnxt_set_tpa(bp, true);
5938 return rc;
5939 }
5940
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5941 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5942 u8 tunnel_type)
5943 {
5944 struct hwrm_tunnel_dst_port_alloc_output *resp;
5945 struct hwrm_tunnel_dst_port_alloc_input *req;
5946 int rc;
5947
5948 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5949 if (rc)
5950 return rc;
5951
5952 req->tunnel_type = tunnel_type;
5953 req->tunnel_dst_port_val = port;
5954
5955 resp = hwrm_req_hold(bp, req);
5956 rc = hwrm_req_send(bp, req);
5957 if (rc) {
5958 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5959 rc);
5960 goto err_out;
5961 }
5962
5963 switch (tunnel_type) {
5964 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5965 bp->vxlan_port = port;
5966 bp->vxlan_fw_dst_port_id =
5967 le16_to_cpu(resp->tunnel_dst_port_id);
5968 break;
5969 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5970 bp->nge_port = port;
5971 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5972 break;
5973 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5974 bp->vxlan_gpe_port = port;
5975 bp->vxlan_gpe_fw_dst_port_id =
5976 le16_to_cpu(resp->tunnel_dst_port_id);
5977 break;
5978 default:
5979 break;
5980 }
5981 if (bp->flags & BNXT_FLAG_TPA)
5982 bnxt_set_tpa(bp, true);
5983
5984 err_out:
5985 hwrm_req_drop(bp, req);
5986 return rc;
5987 }
5988
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5989 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5990 {
5991 struct hwrm_cfa_l2_set_rx_mask_input *req;
5992 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5993 int rc;
5994
5995 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5996 if (rc)
5997 return rc;
5998
5999 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6000 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
6001 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
6002 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
6003 }
6004 req->mask = cpu_to_le32(vnic->rx_mask);
6005 return hwrm_req_send_silent(bp, req);
6006 }
6007
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)6008 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6009 {
6010 if (!atomic_dec_and_test(&fltr->refcnt))
6011 return;
6012 spin_lock_bh(&bp->ntp_fltr_lock);
6013 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
6014 spin_unlock_bh(&bp->ntp_fltr_lock);
6015 return;
6016 }
6017 hlist_del_rcu(&fltr->base.hash);
6018 bnxt_del_one_usr_fltr(bp, &fltr->base);
6019 if (fltr->base.flags) {
6020 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
6021 bp->ntp_fltr_count--;
6022 }
6023 spin_unlock_bh(&bp->ntp_fltr_lock);
6024 kfree_rcu(fltr, base.rcu);
6025 }
6026
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)6027 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
6028 struct bnxt_l2_key *key,
6029 u32 idx)
6030 {
6031 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
6032 struct bnxt_l2_filter *fltr;
6033
6034 hlist_for_each_entry_rcu(fltr, head, base.hash) {
6035 struct bnxt_l2_key *l2_key = &fltr->l2_key;
6036
6037 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
6038 l2_key->vlan == key->vlan)
6039 return fltr;
6040 }
6041 return NULL;
6042 }
6043
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)6044 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
6045 struct bnxt_l2_key *key,
6046 u32 idx)
6047 {
6048 struct bnxt_l2_filter *fltr = NULL;
6049
6050 rcu_read_lock();
6051 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6052 if (fltr)
6053 atomic_inc(&fltr->refcnt);
6054 rcu_read_unlock();
6055 return fltr;
6056 }
6057
6058 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
6059 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
6060 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
6061 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
6062 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
6063
6064 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
6065 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
6066 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
6067 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
6068 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
6069
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)6070 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
6071 {
6072 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6073 if (BNXT_IPV4_4TUPLE(bp, fkeys))
6074 return sizeof(fkeys->addrs.v4addrs) +
6075 sizeof(fkeys->ports);
6076
6077 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
6078 return sizeof(fkeys->addrs.v4addrs);
6079 }
6080
6081 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
6082 if (BNXT_IPV6_4TUPLE(bp, fkeys))
6083 return sizeof(fkeys->addrs.v6addrs) +
6084 sizeof(fkeys->ports);
6085
6086 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
6087 return sizeof(fkeys->addrs.v6addrs);
6088 }
6089
6090 return 0;
6091 }
6092
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)6093 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
6094 const unsigned char *key)
6095 {
6096 u64 prefix = bp->toeplitz_prefix, hash = 0;
6097 struct bnxt_ipv4_tuple tuple4;
6098 struct bnxt_ipv6_tuple tuple6;
6099 int i, j, len = 0;
6100 u8 *four_tuple;
6101
6102 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
6103 if (!len)
6104 return 0;
6105
6106 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6107 tuple4.v4addrs = fkeys->addrs.v4addrs;
6108 tuple4.ports = fkeys->ports;
6109 four_tuple = (unsigned char *)&tuple4;
6110 } else {
6111 tuple6.v6addrs = fkeys->addrs.v6addrs;
6112 tuple6.ports = fkeys->ports;
6113 four_tuple = (unsigned char *)&tuple6;
6114 }
6115
6116 for (i = 0, j = 8; i < len; i++, j++) {
6117 u8 byte = four_tuple[i];
6118 int bit;
6119
6120 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
6121 if (byte & 0x80)
6122 hash ^= prefix;
6123 }
6124 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
6125 }
6126
6127 /* The valid part of the hash is in the upper 32 bits. */
6128 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
6129 }
6130
6131 #ifdef CONFIG_RFS_ACCEL
6132 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)6133 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6134 {
6135 struct bnxt_l2_filter *fltr;
6136 u32 idx;
6137
6138 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6139 BNXT_L2_FLTR_HASH_MASK;
6140 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6141 return fltr;
6142 }
6143 #endif
6144
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)6145 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6146 struct bnxt_l2_key *key, u32 idx)
6147 {
6148 struct hlist_head *head;
6149
6150 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6151 fltr->l2_key.vlan = key->vlan;
6152 fltr->base.type = BNXT_FLTR_TYPE_L2;
6153 if (fltr->base.flags) {
6154 int bit_id;
6155
6156 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6157 bp->max_fltr, 0);
6158 if (bit_id < 0)
6159 return -ENOMEM;
6160 fltr->base.sw_id = (u16)bit_id;
6161 bp->ntp_fltr_count++;
6162 }
6163 head = &bp->l2_fltr_hash_tbl[idx];
6164 hlist_add_head_rcu(&fltr->base.hash, head);
6165 bnxt_insert_usr_fltr(bp, &fltr->base);
6166 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6167 atomic_set(&fltr->refcnt, 1);
6168 return 0;
6169 }
6170
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)6171 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6172 struct bnxt_l2_key *key,
6173 gfp_t gfp)
6174 {
6175 struct bnxt_l2_filter *fltr;
6176 u32 idx;
6177 int rc;
6178
6179 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6180 BNXT_L2_FLTR_HASH_MASK;
6181 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6182 if (fltr)
6183 return fltr;
6184
6185 fltr = kzalloc_obj(*fltr, gfp);
6186 if (!fltr)
6187 return ERR_PTR(-ENOMEM);
6188 spin_lock_bh(&bp->ntp_fltr_lock);
6189 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6190 spin_unlock_bh(&bp->ntp_fltr_lock);
6191 if (rc) {
6192 bnxt_del_l2_filter(bp, fltr);
6193 fltr = ERR_PTR(rc);
6194 }
6195 return fltr;
6196 }
6197
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)6198 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6199 struct bnxt_l2_key *key,
6200 u16 flags)
6201 {
6202 struct bnxt_l2_filter *fltr;
6203 u32 idx;
6204 int rc;
6205
6206 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6207 BNXT_L2_FLTR_HASH_MASK;
6208 spin_lock_bh(&bp->ntp_fltr_lock);
6209 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6210 if (fltr) {
6211 fltr = ERR_PTR(-EEXIST);
6212 goto l2_filter_exit;
6213 }
6214 fltr = kzalloc_obj(*fltr, GFP_ATOMIC);
6215 if (!fltr) {
6216 fltr = ERR_PTR(-ENOMEM);
6217 goto l2_filter_exit;
6218 }
6219 fltr->base.flags = flags;
6220 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6221 if (rc) {
6222 spin_unlock_bh(&bp->ntp_fltr_lock);
6223 bnxt_del_l2_filter(bp, fltr);
6224 return ERR_PTR(rc);
6225 }
6226
6227 l2_filter_exit:
6228 spin_unlock_bh(&bp->ntp_fltr_lock);
6229 return fltr;
6230 }
6231
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6232 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6233 {
6234 #ifdef CONFIG_BNXT_SRIOV
6235 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6236
6237 return vf->fw_fid;
6238 #else
6239 return INVALID_HW_RING_ID;
6240 #endif
6241 }
6242
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6243 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6244 {
6245 struct hwrm_cfa_l2_filter_free_input *req;
6246 u16 target_id = 0xffff;
6247 int rc;
6248
6249 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6250 struct bnxt_pf_info *pf = &bp->pf;
6251
6252 if (fltr->base.vf_idx >= pf->active_vfs)
6253 return -EINVAL;
6254
6255 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6256 if (target_id == INVALID_HW_RING_ID)
6257 return -EINVAL;
6258 }
6259
6260 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6261 if (rc)
6262 return rc;
6263
6264 req->target_id = cpu_to_le16(target_id);
6265 req->l2_filter_id = fltr->base.filter_id;
6266 return hwrm_req_send(bp, req);
6267 }
6268
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6269 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6270 {
6271 struct hwrm_cfa_l2_filter_alloc_output *resp;
6272 struct hwrm_cfa_l2_filter_alloc_input *req;
6273 u16 target_id = 0xffff;
6274 int rc;
6275
6276 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6277 struct bnxt_pf_info *pf = &bp->pf;
6278
6279 if (fltr->base.vf_idx >= pf->active_vfs)
6280 return -EINVAL;
6281
6282 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6283 }
6284 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6285 if (rc)
6286 return rc;
6287
6288 req->target_id = cpu_to_le16(target_id);
6289 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6290
6291 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6292 req->flags |=
6293 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6294 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6295 req->enables =
6296 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6297 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6298 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6299 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6300 eth_broadcast_addr(req->l2_addr_mask);
6301
6302 if (fltr->l2_key.vlan) {
6303 req->enables |=
6304 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6305 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6306 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6307 req->num_vlans = 1;
6308 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6309 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6310 }
6311
6312 resp = hwrm_req_hold(bp, req);
6313 rc = hwrm_req_send(bp, req);
6314 if (!rc) {
6315 fltr->base.filter_id = resp->l2_filter_id;
6316 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6317 }
6318 hwrm_req_drop(bp, req);
6319 return rc;
6320 }
6321
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6322 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6323 struct bnxt_ntuple_filter *fltr)
6324 {
6325 struct hwrm_cfa_ntuple_filter_free_input *req;
6326 int rc;
6327
6328 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6329 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
6330 return 0;
6331
6332 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6333 if (rc)
6334 return rc;
6335
6336 req->ntuple_filter_id = fltr->base.filter_id;
6337 return hwrm_req_send(bp, req);
6338 }
6339
6340 #define BNXT_NTP_FLTR_FLAGS \
6341 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6342 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6343 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6344 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6345 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6346 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6347 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6348 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6349 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6350 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6351 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6352 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6353 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6354
6355 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6356 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6357
bnxt_fill_ipv6_mask(__be32 mask[4])6358 void bnxt_fill_ipv6_mask(__be32 mask[4])
6359 {
6360 int i;
6361
6362 for (i = 0; i < 4; i++)
6363 mask[i] = cpu_to_be32(~0);
6364 }
6365
6366 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6367 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6368 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6369 struct bnxt_ntuple_filter *fltr)
6370 {
6371 u16 rxq = fltr->base.rxq;
6372
6373 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6374 struct ethtool_rxfh_context *ctx;
6375 struct bnxt_rss_ctx *rss_ctx;
6376 struct bnxt_vnic_info *vnic;
6377
6378 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6379 fltr->base.fw_vnic_id);
6380 if (ctx) {
6381 rss_ctx = ethtool_rxfh_context_priv(ctx);
6382 vnic = &rss_ctx->vnic;
6383
6384 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6385 }
6386 return;
6387 }
6388 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6389 struct bnxt_vnic_info *vnic;
6390 u32 enables;
6391
6392 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6393 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6394 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6395 req->enables |= cpu_to_le32(enables);
6396 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6397 } else {
6398 u32 flags;
6399
6400 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6401 req->flags |= cpu_to_le32(flags);
6402 req->dst_id = cpu_to_le16(rxq);
6403 }
6404 }
6405
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6406 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6407 struct bnxt_ntuple_filter *fltr)
6408 {
6409 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6410 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6411 struct bnxt_flow_masks *masks = &fltr->fmasks;
6412 struct flow_keys *keys = &fltr->fkeys;
6413 struct bnxt_l2_filter *l2_fltr;
6414 struct bnxt_vnic_info *vnic;
6415 int rc;
6416
6417 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6418 if (rc)
6419 return rc;
6420
6421 l2_fltr = fltr->l2_fltr;
6422 req->l2_filter_id = l2_fltr->base.filter_id;
6423
6424 if (fltr->base.flags & BNXT_ACT_DROP) {
6425 req->flags =
6426 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6427 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6428 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6429 } else {
6430 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6431 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6432 }
6433 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6434
6435 req->ethertype = htons(ETH_P_IP);
6436 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6437 req->ip_protocol = keys->basic.ip_proto;
6438
6439 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6440 req->ethertype = htons(ETH_P_IPV6);
6441 req->ip_addr_type =
6442 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6443 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6444 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6445 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6446 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6447 } else {
6448 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6449 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6450 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6451 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6452 }
6453 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6454 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6455 req->tunnel_type =
6456 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6457 }
6458
6459 req->src_port = keys->ports.src;
6460 req->src_port_mask = masks->ports.src;
6461 req->dst_port = keys->ports.dst;
6462 req->dst_port_mask = masks->ports.dst;
6463
6464 resp = hwrm_req_hold(bp, req);
6465 rc = hwrm_req_send(bp, req);
6466 if (!rc)
6467 fltr->base.filter_id = resp->ntuple_filter_id;
6468 hwrm_req_drop(bp, req);
6469 return rc;
6470 }
6471
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6472 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6473 const u8 *mac_addr)
6474 {
6475 struct bnxt_l2_filter *fltr;
6476 struct bnxt_l2_key key;
6477 int rc;
6478
6479 ether_addr_copy(key.dst_mac_addr, mac_addr);
6480 key.vlan = 0;
6481 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6482 if (IS_ERR(fltr))
6483 return PTR_ERR(fltr);
6484
6485 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6486 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6487 if (rc)
6488 bnxt_del_l2_filter(bp, fltr);
6489 else
6490 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6491 return rc;
6492 }
6493
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6494 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6495 {
6496 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6497
6498 /* Any associated ntuple filters will also be cleared by firmware. */
6499 for (i = 0; i < num_of_vnics; i++) {
6500 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6501
6502 for (j = 0; j < vnic->uc_filter_count; j++) {
6503 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6504
6505 bnxt_hwrm_l2_filter_free(bp, fltr);
6506 bnxt_del_l2_filter(bp, fltr);
6507 }
6508 vnic->uc_filter_count = 0;
6509 }
6510 }
6511
6512 #define BNXT_DFLT_TUNL_TPA_BMAP \
6513 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6514 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6515 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6516
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6517 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6518 struct hwrm_vnic_tpa_cfg_input *req)
6519 {
6520 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6521
6522 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6523 return;
6524
6525 if (bp->vxlan_port)
6526 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6527 if (bp->vxlan_gpe_port)
6528 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6529 if (bp->nge_port)
6530 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6531
6532 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6533 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6534 }
6535
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6536 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6537 u32 tpa_flags)
6538 {
6539 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6540 struct hwrm_vnic_tpa_cfg_input *req;
6541 int rc;
6542
6543 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6544 return 0;
6545
6546 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6547 if (rc)
6548 return rc;
6549
6550 if (tpa_flags) {
6551 u16 mss = bp->dev->mtu - 40;
6552 u32 nsegs, n, segs = 0, flags;
6553
6554 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6555 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6556 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6557 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6558 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6559 if (tpa_flags & BNXT_FLAG_GRO)
6560 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6561
6562 req->flags = cpu_to_le32(flags);
6563
6564 req->enables =
6565 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6566 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6567 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6568
6569 /* Number of segs are log2 units, and first packet is not
6570 * included as part of this units.
6571 */
6572 if (mss <= BNXT_RX_PAGE_SIZE) {
6573 n = BNXT_RX_PAGE_SIZE / mss;
6574 nsegs = (MAX_SKB_FRAGS - 1) * n;
6575 } else {
6576 n = mss / BNXT_RX_PAGE_SIZE;
6577 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6578 n++;
6579 nsegs = (MAX_SKB_FRAGS - n) / n;
6580 }
6581
6582 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6583 segs = MAX_TPA_SEGS_P5;
6584 max_aggs = bp->max_tpa;
6585 } else {
6586 segs = ilog2(nsegs);
6587 }
6588 req->max_agg_segs = cpu_to_le16(segs);
6589 req->max_aggs = cpu_to_le16(max_aggs);
6590
6591 req->min_agg_len = cpu_to_le32(512);
6592 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6593 }
6594 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6595
6596 return hwrm_req_send(bp, req);
6597 }
6598
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6599 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6600 {
6601 struct bnxt_ring_grp_info *grp_info;
6602
6603 grp_info = &bp->grp_info[ring->grp_idx];
6604 return grp_info->cp_fw_ring_id;
6605 }
6606
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6607 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6608 {
6609 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6610 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6611 else
6612 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6613 }
6614
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6615 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6616 {
6617 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6618 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6619 else
6620 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6621 }
6622
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6623 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6624 {
6625 int entries;
6626
6627 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6628 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6629 else
6630 entries = HW_HASH_INDEX_SIZE;
6631
6632 bp->rss_indir_tbl_entries = entries;
6633 bp->rss_indir_tbl =
6634 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6635 if (!bp->rss_indir_tbl)
6636 return -ENOMEM;
6637
6638 return 0;
6639 }
6640
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6641 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6642 struct ethtool_rxfh_context *rss_ctx)
6643 {
6644 u16 max_rings, max_entries, pad, i;
6645 u32 *rss_indir_tbl;
6646
6647 if (!bp->rx_nr_rings)
6648 return;
6649
6650 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6651 max_rings = bp->rx_nr_rings - 1;
6652 else
6653 max_rings = bp->rx_nr_rings;
6654
6655 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6656 if (rss_ctx)
6657 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6658 else
6659 rss_indir_tbl = &bp->rss_indir_tbl[0];
6660
6661 for (i = 0; i < max_entries; i++)
6662 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6663
6664 pad = bp->rss_indir_tbl_entries - max_entries;
6665 if (pad)
6666 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6667 }
6668
bnxt_get_max_rss_ring(struct bnxt * bp)6669 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6670 {
6671 u32 i, tbl_size, max_ring = 0;
6672
6673 if (!bp->rss_indir_tbl)
6674 return 0;
6675
6676 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6677 for (i = 0; i < tbl_size; i++)
6678 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6679 return max_ring;
6680 }
6681
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6682 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6683 {
6684 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6685 if (!rx_rings)
6686 return 0;
6687 if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
6688 return BNXT_RSS_TABLE_MAX_TBL_P5;
6689
6690 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6691 BNXT_RSS_TABLE_ENTRIES_P5);
6692 }
6693 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6694 return 2;
6695 return 1;
6696 }
6697
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6698 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6699 {
6700 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6701 u16 i, j;
6702
6703 /* Fill the RSS indirection table with ring group ids */
6704 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6705 if (!no_rss)
6706 j = bp->rss_indir_tbl[i];
6707 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6708 }
6709 }
6710
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6711 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6712 struct bnxt_vnic_info *vnic)
6713 {
6714 __le16 *ring_tbl = vnic->rss_table;
6715 struct bnxt_rx_ring_info *rxr;
6716 u16 tbl_size, i;
6717
6718 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6719
6720 for (i = 0; i < tbl_size; i++) {
6721 u16 ring_id, j;
6722
6723 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6724 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6725 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6726 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6727 else
6728 j = bp->rss_indir_tbl[i];
6729 rxr = &bp->rx_ring[j];
6730
6731 ring_id = rxr->rx_ring_struct.fw_ring_id;
6732 *ring_tbl++ = cpu_to_le16(ring_id);
6733 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6734 *ring_tbl++ = cpu_to_le16(ring_id);
6735 }
6736 }
6737
6738 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6739 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6740 struct bnxt_vnic_info *vnic)
6741 {
6742 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6743 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6744 if (bp->flags & BNXT_FLAG_CHIP_P7)
6745 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6746 } else {
6747 bnxt_fill_hw_rss_tbl(bp, vnic);
6748 }
6749
6750 if (bp->rss_hash_delta) {
6751 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6752 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6753 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6754 else
6755 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6756 } else {
6757 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6758 }
6759 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6760 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6761 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6762 }
6763
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6764 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6765 bool set_rss)
6766 {
6767 struct hwrm_vnic_rss_cfg_input *req;
6768 int rc;
6769
6770 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6771 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6772 return 0;
6773
6774 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6775 if (rc)
6776 return rc;
6777
6778 if (set_rss)
6779 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6780 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6781 return hwrm_req_send(bp, req);
6782 }
6783
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6784 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6785 struct bnxt_vnic_info *vnic, bool set_rss)
6786 {
6787 struct hwrm_vnic_rss_cfg_input *req;
6788 dma_addr_t ring_tbl_map;
6789 u32 i, nr_ctxs;
6790 int rc;
6791
6792 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6793 if (rc)
6794 return rc;
6795
6796 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6797 if (!set_rss)
6798 return hwrm_req_send(bp, req);
6799
6800 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6801 ring_tbl_map = vnic->rss_table_dma_addr;
6802 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6803
6804 hwrm_req_hold(bp, req);
6805 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6806 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6807 req->ring_table_pair_index = i;
6808 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6809 rc = hwrm_req_send(bp, req);
6810 if (rc)
6811 goto exit;
6812 }
6813
6814 exit:
6815 hwrm_req_drop(bp, req);
6816 return rc;
6817 }
6818
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6819 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6820 {
6821 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6822 struct hwrm_vnic_rss_qcfg_output *resp;
6823 struct hwrm_vnic_rss_qcfg_input *req;
6824
6825 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6826 return;
6827
6828 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6829 /* all contexts configured to same hash_type, zero always exists */
6830 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6831 resp = hwrm_req_hold(bp, req);
6832 if (!hwrm_req_send(bp, req)) {
6833 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6834 bp->rss_hash_delta = 0;
6835 }
6836 hwrm_req_drop(bp, req);
6837 }
6838
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6839 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6840 {
6841 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6842 struct hwrm_vnic_plcmodes_cfg_input *req;
6843 int rc;
6844
6845 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6846 if (rc)
6847 return rc;
6848
6849 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6850 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6851 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6852
6853 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6854 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6855 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6856 req->enables |=
6857 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6858 req->hds_threshold = cpu_to_le16(hds_thresh);
6859 }
6860 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6861 return hwrm_req_send(bp, req);
6862 }
6863
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6864 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6865 struct bnxt_vnic_info *vnic,
6866 u16 ctx_idx)
6867 {
6868 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6869
6870 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6871 return;
6872
6873 req->rss_cos_lb_ctx_id =
6874 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6875
6876 hwrm_req_send(bp, req);
6877 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6878 }
6879
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6880 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6881 {
6882 int i, j;
6883
6884 for (i = 0; i < bp->nr_vnics; i++) {
6885 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6886
6887 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6888 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6889 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6890 }
6891 }
6892 bp->rsscos_nr_ctxs = 0;
6893 }
6894
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6895 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6896 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6897 {
6898 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6899 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6900 int rc;
6901
6902 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6903 if (rc)
6904 return rc;
6905
6906 resp = hwrm_req_hold(bp, req);
6907 rc = hwrm_req_send(bp, req);
6908 if (!rc)
6909 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6910 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6911 hwrm_req_drop(bp, req);
6912
6913 return rc;
6914 }
6915
bnxt_get_roce_vnic_mode(struct bnxt * bp)6916 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6917 {
6918 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6919 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6920 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6921 }
6922
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6923 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6924 {
6925 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6926 struct hwrm_vnic_cfg_input *req;
6927 unsigned int ring = 0, grp_idx;
6928 u16 def_vlan = 0;
6929 int rc;
6930
6931 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6932 if (rc)
6933 return rc;
6934
6935 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6936 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6937
6938 req->default_rx_ring_id =
6939 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6940 req->default_cmpl_ring_id =
6941 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6942 req->enables =
6943 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6944 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6945 goto vnic_mru;
6946 }
6947 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6948 /* Only RSS support for now TBD: COS & LB */
6949 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6950 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6951 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6952 VNIC_CFG_REQ_ENABLES_MRU);
6953 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6954 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6955 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6956 VNIC_CFG_REQ_ENABLES_MRU);
6957 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6958 } else {
6959 req->rss_rule = cpu_to_le16(0xffff);
6960 }
6961
6962 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6963 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6964 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6965 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6966 } else {
6967 req->cos_rule = cpu_to_le16(0xffff);
6968 }
6969
6970 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6971 ring = 0;
6972 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6973 ring = vnic->vnic_id - 1;
6974 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6975 ring = bp->rx_nr_rings - 1;
6976
6977 grp_idx = bp->rx_ring[ring].bnapi->index;
6978 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6979 req->lb_rule = cpu_to_le16(0xffff);
6980 vnic_mru:
6981 vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6982 req->mru = cpu_to_le16(vnic->mru);
6983
6984 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6985 #ifdef CONFIG_BNXT_SRIOV
6986 if (BNXT_VF(bp))
6987 def_vlan = bp->vf.vlan;
6988 #endif
6989 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6990 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6991 if (vnic->vnic_id == BNXT_VNIC_DEFAULT &&
6992 bnxt_ulp_registered(bp->edev[BNXT_AUXDEV_RDMA]))
6993 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6994
6995 return hwrm_req_send(bp, req);
6996 }
6997
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6998 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6999 struct bnxt_vnic_info *vnic)
7000 {
7001 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
7002 struct hwrm_vnic_free_input *req;
7003
7004 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
7005 return;
7006
7007 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
7008
7009 hwrm_req_send(bp, req);
7010 vnic->fw_vnic_id = INVALID_HW_RING_ID;
7011 }
7012 }
7013
bnxt_hwrm_vnic_free(struct bnxt * bp)7014 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
7015 {
7016 u16 i;
7017
7018 for (i = 0; i < bp->nr_vnics; i++)
7019 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
7020 }
7021
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)7022 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
7023 unsigned int start_rx_ring_idx,
7024 unsigned int nr_rings)
7025 {
7026 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
7027 struct hwrm_vnic_alloc_output *resp;
7028 struct hwrm_vnic_alloc_input *req;
7029 int rc;
7030
7031 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
7032 if (rc)
7033 return rc;
7034
7035 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7036 goto vnic_no_ring_grps;
7037
7038 /* map ring groups to this vnic */
7039 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
7040 grp_idx = bp->rx_ring[i].bnapi->index;
7041 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
7042 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
7043 j, nr_rings);
7044 break;
7045 }
7046 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
7047 }
7048
7049 vnic_no_ring_grps:
7050 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
7051 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
7052 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
7053 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
7054
7055 resp = hwrm_req_hold(bp, req);
7056 rc = hwrm_req_send(bp, req);
7057 if (!rc)
7058 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
7059 hwrm_req_drop(bp, req);
7060 return rc;
7061 }
7062
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)7063 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
7064 {
7065 struct hwrm_vnic_qcaps_output *resp;
7066 struct hwrm_vnic_qcaps_input *req;
7067 int rc;
7068
7069 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
7070 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
7071 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
7072 if (bp->hwrm_spec_code < 0x10600)
7073 return 0;
7074
7075 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
7076 if (rc)
7077 return rc;
7078
7079 resp = hwrm_req_hold(bp, req);
7080 rc = hwrm_req_send(bp, req);
7081 if (!rc) {
7082 u32 flags = le32_to_cpu(resp->flags);
7083
7084 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
7085 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
7086 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
7087 if (flags &
7088 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
7089 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
7090
7091 /* Older P5 fw before EXT_HW_STATS support did not set
7092 * VLAN_STRIP_CAP properly.
7093 */
7094 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
7095 (BNXT_CHIP_P5(bp) &&
7096 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
7097 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
7098 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
7099 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
7100 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
7101 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
7102 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
7103 if (bp->max_tpa_v2) {
7104 if (BNXT_CHIP_P5(bp))
7105 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
7106 else
7107 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
7108 }
7109 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
7110 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
7111 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
7112 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
7113 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
7114 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
7115 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
7116 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
7117 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
7118 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
7119 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
7120 bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
7121 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
7122 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
7123 }
7124 hwrm_req_drop(bp, req);
7125 return rc;
7126 }
7127
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)7128 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
7129 {
7130 struct hwrm_ring_grp_alloc_output *resp;
7131 struct hwrm_ring_grp_alloc_input *req;
7132 int rc;
7133 u16 i;
7134
7135 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7136 return 0;
7137
7138 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
7139 if (rc)
7140 return rc;
7141
7142 resp = hwrm_req_hold(bp, req);
7143 for (i = 0; i < bp->rx_nr_rings; i++) {
7144 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7145
7146 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7147 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7148 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7149 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7150
7151 rc = hwrm_req_send(bp, req);
7152
7153 if (rc)
7154 break;
7155
7156 bp->grp_info[grp_idx].fw_grp_id =
7157 le32_to_cpu(resp->ring_group_id);
7158 }
7159 hwrm_req_drop(bp, req);
7160 return rc;
7161 }
7162
bnxt_hwrm_ring_grp_free(struct bnxt * bp)7163 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7164 {
7165 struct hwrm_ring_grp_free_input *req;
7166 u16 i;
7167
7168 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7169 return;
7170
7171 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7172 return;
7173
7174 hwrm_req_hold(bp, req);
7175 for (i = 0; i < bp->cp_nr_rings; i++) {
7176 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7177 continue;
7178 req->ring_group_id =
7179 cpu_to_le32(bp->grp_info[i].fw_grp_id);
7180
7181 hwrm_req_send(bp, req);
7182 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7183 }
7184 hwrm_req_drop(bp, req);
7185 }
7186
bnxt_set_rx_ring_params_p5(struct bnxt * bp,u32 ring_type,struct hwrm_ring_alloc_input * req,struct bnxt_rx_ring_info * rxr,struct bnxt_ring_struct * ring)7187 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7188 struct hwrm_ring_alloc_input *req,
7189 struct bnxt_rx_ring_info *rxr,
7190 struct bnxt_ring_struct *ring)
7191 {
7192 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7193 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7194 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7195
7196 if (ring_type == HWRM_RING_ALLOC_AGG) {
7197 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7198 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7199 req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
7200 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7201 } else {
7202 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7203 if (NET_IP_ALIGN == 2)
7204 req->flags =
7205 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7206 }
7207 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7208 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7209 req->enables |= cpu_to_le32(enables);
7210 }
7211
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)7212 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7213 struct bnxt_rx_ring_info *rxr,
7214 struct bnxt_ring_struct *ring,
7215 u32 ring_type, u32 map_index)
7216 {
7217 struct hwrm_ring_alloc_output *resp;
7218 struct hwrm_ring_alloc_input *req;
7219 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7220 struct bnxt_ring_grp_info *grp_info;
7221 int rc, err = 0;
7222 u16 ring_id;
7223
7224 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7225 if (rc)
7226 goto exit;
7227
7228 req->enables = 0;
7229 if (rmem->nr_pages > 1) {
7230 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7231 /* Page size is in log2 units */
7232 req->page_size = BNXT_PAGE_SHIFT;
7233 req->page_tbl_depth = 1;
7234 } else {
7235 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
7236 }
7237 req->fbo = 0;
7238 /* Association of ring index with doorbell index and MSIX number */
7239 req->logical_id = cpu_to_le16(map_index);
7240
7241 switch (ring_type) {
7242 case HWRM_RING_ALLOC_TX: {
7243 struct bnxt_tx_ring_info *txr;
7244 u16 flags = 0;
7245
7246 txr = container_of(ring, struct bnxt_tx_ring_info,
7247 tx_ring_struct);
7248 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7249 /* Association of transmit ring with completion ring */
7250 grp_info = &bp->grp_info[ring->grp_idx];
7251 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7252 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7253 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7254 req->queue_id = cpu_to_le16(ring->queue_id);
7255 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7256 req->cmpl_coal_cnt =
7257 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7258 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7259 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7260 req->flags = cpu_to_le16(flags);
7261 break;
7262 }
7263 case HWRM_RING_ALLOC_RX:
7264 case HWRM_RING_ALLOC_AGG:
7265 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7266 req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7267 cpu_to_le32(bp->rx_ring_mask + 1) :
7268 cpu_to_le32(bp->rx_agg_ring_mask + 1);
7269 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7270 bnxt_set_rx_ring_params_p5(bp, ring_type, req,
7271 rxr, ring);
7272 break;
7273 case HWRM_RING_ALLOC_CMPL:
7274 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7275 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7276 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7277 /* Association of cp ring with nq */
7278 grp_info = &bp->grp_info[map_index];
7279 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7280 req->cq_handle = cpu_to_le64(ring->handle);
7281 req->enables |= cpu_to_le32(
7282 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7283 } else {
7284 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7285 }
7286 break;
7287 case HWRM_RING_ALLOC_NQ:
7288 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7289 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7290 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7291 break;
7292 default:
7293 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7294 ring_type);
7295 return -EINVAL;
7296 }
7297
7298 resp = hwrm_req_hold(bp, req);
7299 rc = hwrm_req_send(bp, req);
7300 err = le16_to_cpu(resp->error_code);
7301 ring_id = le16_to_cpu(resp->ring_id);
7302 hwrm_req_drop(bp, req);
7303
7304 exit:
7305 if (rc || err) {
7306 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7307 ring_type, rc, err);
7308 return -EIO;
7309 }
7310 ring->fw_ring_id = ring_id;
7311 return rc;
7312 }
7313
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7314 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7315 {
7316 int rc;
7317
7318 if (BNXT_PF(bp)) {
7319 struct hwrm_func_cfg_input *req;
7320
7321 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7322 if (rc)
7323 return rc;
7324
7325 req->fid = cpu_to_le16(0xffff);
7326 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7327 req->async_event_cr = cpu_to_le16(idx);
7328 return hwrm_req_send(bp, req);
7329 } else {
7330 struct hwrm_func_vf_cfg_input *req;
7331
7332 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7333 if (rc)
7334 return rc;
7335
7336 req->enables =
7337 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7338 req->async_event_cr = cpu_to_le16(idx);
7339 return hwrm_req_send(bp, req);
7340 }
7341 }
7342
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7343 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7344 u32 ring_type)
7345 {
7346 switch (ring_type) {
7347 case HWRM_RING_ALLOC_TX:
7348 db->db_ring_mask = bp->tx_ring_mask;
7349 break;
7350 case HWRM_RING_ALLOC_RX:
7351 db->db_ring_mask = bp->rx_ring_mask;
7352 break;
7353 case HWRM_RING_ALLOC_AGG:
7354 db->db_ring_mask = bp->rx_agg_ring_mask;
7355 break;
7356 case HWRM_RING_ALLOC_CMPL:
7357 case HWRM_RING_ALLOC_NQ:
7358 db->db_ring_mask = bp->cp_ring_mask;
7359 break;
7360 }
7361 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7362 db->db_epoch_mask = db->db_ring_mask + 1;
7363 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7364 }
7365 }
7366
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7367 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7368 u32 map_idx, u32 xid)
7369 {
7370 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7371 switch (ring_type) {
7372 case HWRM_RING_ALLOC_TX:
7373 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7374 break;
7375 case HWRM_RING_ALLOC_RX:
7376 case HWRM_RING_ALLOC_AGG:
7377 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7378 break;
7379 case HWRM_RING_ALLOC_CMPL:
7380 db->db_key64 = DBR_PATH_L2;
7381 break;
7382 case HWRM_RING_ALLOC_NQ:
7383 db->db_key64 = DBR_PATH_L2;
7384 break;
7385 }
7386 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7387
7388 if (bp->flags & BNXT_FLAG_CHIP_P7)
7389 db->db_key64 |= DBR_VALID;
7390
7391 db->doorbell = bp->bar1 + bp->db_offset;
7392 } else {
7393 db->doorbell = bp->bar1 + map_idx * 0x80;
7394 switch (ring_type) {
7395 case HWRM_RING_ALLOC_TX:
7396 db->db_key32 = DB_KEY_TX;
7397 break;
7398 case HWRM_RING_ALLOC_RX:
7399 case HWRM_RING_ALLOC_AGG:
7400 db->db_key32 = DB_KEY_RX;
7401 break;
7402 case HWRM_RING_ALLOC_CMPL:
7403 db->db_key32 = DB_KEY_CP;
7404 break;
7405 }
7406 }
7407 bnxt_set_db_mask(bp, db, ring_type);
7408 }
7409
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7410 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7411 struct bnxt_rx_ring_info *rxr)
7412 {
7413 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7414 struct bnxt_napi *bnapi = rxr->bnapi;
7415 u32 type = HWRM_RING_ALLOC_RX;
7416 u32 map_idx = bnapi->index;
7417 int rc;
7418
7419 rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7420 if (rc)
7421 return rc;
7422
7423 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7424 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7425
7426 return 0;
7427 }
7428
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7429 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7430 struct bnxt_rx_ring_info *rxr)
7431 {
7432 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7433 u32 type = HWRM_RING_ALLOC_AGG;
7434 u32 grp_idx = ring->grp_idx;
7435 u32 map_idx;
7436 int rc;
7437
7438 map_idx = grp_idx + bp->rx_nr_rings;
7439 rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7440 if (rc)
7441 return rc;
7442
7443 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7444 ring->fw_ring_id);
7445 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7446 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7447 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7448
7449 return 0;
7450 }
7451
bnxt_hwrm_cp_ring_alloc_p5(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7452 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7453 struct bnxt_cp_ring_info *cpr)
7454 {
7455 const u32 type = HWRM_RING_ALLOC_CMPL;
7456 struct bnxt_napi *bnapi = cpr->bnapi;
7457 struct bnxt_ring_struct *ring;
7458 u32 map_idx = bnapi->index;
7459 int rc;
7460
7461 ring = &cpr->cp_ring_struct;
7462 ring->handle = BNXT_SET_NQ_HDL(cpr);
7463 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7464 if (rc)
7465 return rc;
7466 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7467 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7468 return 0;
7469 }
7470
bnxt_hwrm_tx_ring_alloc(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u32 tx_idx)7471 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7472 struct bnxt_tx_ring_info *txr, u32 tx_idx)
7473 {
7474 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7475 const u32 type = HWRM_RING_ALLOC_TX;
7476 int rc;
7477
7478 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
7479 if (rc)
7480 return rc;
7481 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7482 return 0;
7483 }
7484
bnxt_hwrm_ring_alloc(struct bnxt * bp)7485 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7486 {
7487 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7488 int i, rc = 0;
7489 u32 type;
7490
7491 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7492 type = HWRM_RING_ALLOC_NQ;
7493 else
7494 type = HWRM_RING_ALLOC_CMPL;
7495 for (i = 0; i < bp->cp_nr_rings; i++) {
7496 struct bnxt_napi *bnapi = bp->bnapi[i];
7497 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7498 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7499 u32 map_idx = ring->map_idx;
7500 unsigned int vector;
7501
7502 vector = bp->irq_tbl[map_idx].vector;
7503 disable_irq_nosync(vector);
7504 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7505 if (rc) {
7506 enable_irq(vector);
7507 goto err_out;
7508 }
7509 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7510 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7511 enable_irq(vector);
7512 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7513
7514 if (!i) {
7515 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7516 if (rc)
7517 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7518 }
7519 }
7520
7521 for (i = 0; i < bp->tx_nr_rings; i++) {
7522 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7523
7524 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7525 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7526 if (rc)
7527 goto err_out;
7528 }
7529 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7530 if (rc)
7531 goto err_out;
7532 }
7533
7534 for (i = 0; i < bp->rx_nr_rings; i++) {
7535 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7536
7537 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7538 if (rc)
7539 goto err_out;
7540 /* If we have agg rings, post agg buffers first. */
7541 if (!agg_rings)
7542 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7543 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7544 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7545 if (rc)
7546 goto err_out;
7547 }
7548 }
7549
7550 if (agg_rings) {
7551 for (i = 0; i < bp->rx_nr_rings; i++) {
7552 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7553 if (rc)
7554 goto err_out;
7555 }
7556 }
7557 err_out:
7558 return rc;
7559 }
7560
bnxt_cancel_dim(struct bnxt * bp)7561 static void bnxt_cancel_dim(struct bnxt *bp)
7562 {
7563 int i;
7564
7565 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7566 * if NAPI is enabled.
7567 */
7568 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7569 return;
7570
7571 /* Make sure NAPI sees that the VNIC is disabled */
7572 synchronize_net();
7573 for (i = 0; i < bp->rx_nr_rings; i++) {
7574 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7575 struct bnxt_napi *bnapi = rxr->bnapi;
7576
7577 cancel_work_sync(&bnapi->cp_ring.dim.work);
7578 }
7579 }
7580
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7581 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7582 struct bnxt_ring_struct *ring,
7583 u32 ring_type, int cmpl_ring_id)
7584 {
7585 struct hwrm_ring_free_output *resp;
7586 struct hwrm_ring_free_input *req;
7587 u16 error_code = 0;
7588 int rc;
7589
7590 if (BNXT_NO_FW_ACCESS(bp))
7591 return 0;
7592
7593 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7594 if (rc)
7595 goto exit;
7596
7597 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7598 req->ring_type = ring_type;
7599 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7600
7601 resp = hwrm_req_hold(bp, req);
7602 rc = hwrm_req_send(bp, req);
7603 error_code = le16_to_cpu(resp->error_code);
7604 hwrm_req_drop(bp, req);
7605 exit:
7606 if (rc || error_code) {
7607 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7608 ring_type, rc, error_code);
7609 return -EIO;
7610 }
7611 return 0;
7612 }
7613
bnxt_hwrm_tx_ring_free(struct bnxt * bp,struct bnxt_tx_ring_info * txr,bool close_path)7614 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7615 struct bnxt_tx_ring_info *txr,
7616 bool close_path)
7617 {
7618 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7619 u32 cmpl_ring_id;
7620
7621 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7622 return;
7623
7624 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7625 INVALID_HW_RING_ID;
7626 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7627 cmpl_ring_id);
7628 ring->fw_ring_id = INVALID_HW_RING_ID;
7629 }
7630
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7631 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7632 struct bnxt_rx_ring_info *rxr,
7633 bool close_path)
7634 {
7635 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7636 u32 grp_idx = rxr->bnapi->index;
7637 u32 cmpl_ring_id;
7638
7639 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7640 return;
7641
7642 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7643 hwrm_ring_free_send_msg(bp, ring,
7644 RING_FREE_REQ_RING_TYPE_RX,
7645 close_path ? cmpl_ring_id :
7646 INVALID_HW_RING_ID);
7647 ring->fw_ring_id = INVALID_HW_RING_ID;
7648 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7649 }
7650
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7651 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7652 struct bnxt_rx_ring_info *rxr,
7653 bool close_path)
7654 {
7655 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7656 u32 grp_idx = rxr->bnapi->index;
7657 u32 type, cmpl_ring_id;
7658
7659 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7660 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7661 else
7662 type = RING_FREE_REQ_RING_TYPE_RX;
7663
7664 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7665 return;
7666
7667 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7668 hwrm_ring_free_send_msg(bp, ring, type,
7669 close_path ? cmpl_ring_id :
7670 INVALID_HW_RING_ID);
7671 ring->fw_ring_id = INVALID_HW_RING_ID;
7672 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7673 }
7674
bnxt_hwrm_cp_ring_free(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7675 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7676 struct bnxt_cp_ring_info *cpr)
7677 {
7678 struct bnxt_ring_struct *ring;
7679
7680 ring = &cpr->cp_ring_struct;
7681 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7682 return;
7683
7684 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7685 INVALID_HW_RING_ID);
7686 ring->fw_ring_id = INVALID_HW_RING_ID;
7687 }
7688
bnxt_clear_one_cp_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7689 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7690 {
7691 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7692 int i, size = ring->ring_mem.page_size;
7693
7694 cpr->cp_raw_cons = 0;
7695 cpr->toggle = 0;
7696
7697 for (i = 0; i < bp->cp_nr_pages; i++)
7698 if (cpr->cp_desc_ring[i])
7699 memset(cpr->cp_desc_ring[i], 0, size);
7700 }
7701
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7702 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7703 {
7704 u32 type;
7705 int i;
7706
7707 if (!bp->bnapi)
7708 return;
7709
7710 for (i = 0; i < bp->tx_nr_rings; i++)
7711 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7712
7713 bnxt_cancel_dim(bp);
7714 for (i = 0; i < bp->rx_nr_rings; i++) {
7715 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7716 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7717 }
7718
7719 /* The completion rings are about to be freed. After that the
7720 * IRQ doorbell will not work anymore. So we need to disable
7721 * IRQ here.
7722 */
7723 bnxt_disable_int_sync(bp);
7724
7725 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7726 type = RING_FREE_REQ_RING_TYPE_NQ;
7727 else
7728 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7729 for (i = 0; i < bp->cp_nr_rings; i++) {
7730 struct bnxt_napi *bnapi = bp->bnapi[i];
7731 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7732 struct bnxt_ring_struct *ring;
7733 int j;
7734
7735 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7736 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7737
7738 ring = &cpr->cp_ring_struct;
7739 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7740 hwrm_ring_free_send_msg(bp, ring, type,
7741 INVALID_HW_RING_ID);
7742 ring->fw_ring_id = INVALID_HW_RING_ID;
7743 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7744 }
7745 }
7746 }
7747
7748 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7749 bool shared);
7750 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7751 bool shared);
7752
bnxt_hwrm_get_rings(struct bnxt * bp)7753 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7754 {
7755 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7756 struct hwrm_func_qcfg_output *resp;
7757 struct hwrm_func_qcfg_input *req;
7758 int rc;
7759
7760 if (bp->hwrm_spec_code < 0x10601)
7761 return 0;
7762
7763 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7764 if (rc)
7765 return rc;
7766
7767 req->fid = cpu_to_le16(0xffff);
7768 resp = hwrm_req_hold(bp, req);
7769 rc = hwrm_req_send(bp, req);
7770 if (rc) {
7771 hwrm_req_drop(bp, req);
7772 return rc;
7773 }
7774
7775 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7776 if (BNXT_NEW_RM(bp)) {
7777 u16 cp, stats;
7778
7779 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7780 hw_resc->resv_hw_ring_grps =
7781 le32_to_cpu(resp->alloc_hw_ring_grps);
7782 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7783 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7784 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7785 stats = le16_to_cpu(resp->alloc_stat_ctx);
7786 hw_resc->resv_irqs = cp;
7787 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7788 int rx = hw_resc->resv_rx_rings;
7789 int tx = hw_resc->resv_tx_rings;
7790
7791 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7792 rx >>= 1;
7793 if (cp < (rx + tx)) {
7794 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7795 if (rc)
7796 goto get_rings_exit;
7797 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7798 rx <<= 1;
7799 hw_resc->resv_rx_rings = rx;
7800 hw_resc->resv_tx_rings = tx;
7801 }
7802 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7803 hw_resc->resv_hw_ring_grps = rx;
7804 }
7805 hw_resc->resv_cp_rings = cp;
7806 hw_resc->resv_stat_ctxs = stats;
7807 }
7808 get_rings_exit:
7809 hwrm_req_drop(bp, req);
7810 return rc;
7811 }
7812
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7813 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7814 {
7815 struct hwrm_func_qcfg_output *resp;
7816 struct hwrm_func_qcfg_input *req;
7817 int rc;
7818
7819 if (bp->hwrm_spec_code < 0x10601)
7820 return 0;
7821
7822 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7823 if (rc)
7824 return rc;
7825
7826 req->fid = cpu_to_le16(fid);
7827 resp = hwrm_req_hold(bp, req);
7828 rc = hwrm_req_send(bp, req);
7829 if (!rc)
7830 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7831
7832 hwrm_req_drop(bp, req);
7833 return rc;
7834 }
7835
7836 static bool bnxt_rfs_supported(struct bnxt *bp);
7837
7838 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7839 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7840 {
7841 struct hwrm_func_cfg_input *req;
7842 u32 enables = 0;
7843
7844 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7845 return NULL;
7846
7847 req->fid = cpu_to_le16(0xffff);
7848 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7849 req->num_tx_rings = cpu_to_le16(hwr->tx);
7850 if (BNXT_NEW_RM(bp)) {
7851 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7852 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7853 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7854 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7855 enables |= hwr->cp_p5 ?
7856 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7857 } else {
7858 enables |= hwr->cp ?
7859 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7860 enables |= hwr->grp ?
7861 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7862 }
7863 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7864 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7865 0;
7866 req->num_rx_rings = cpu_to_le16(hwr->rx);
7867 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7868 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7869 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7870 req->num_msix = cpu_to_le16(hwr->cp);
7871 } else {
7872 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7873 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7874 }
7875 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7876 req->num_vnics = cpu_to_le16(hwr->vnic);
7877 }
7878 req->enables = cpu_to_le32(enables);
7879 return req;
7880 }
7881
7882 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7883 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7884 {
7885 struct hwrm_func_vf_cfg_input *req;
7886 u32 enables = 0;
7887
7888 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7889 return NULL;
7890
7891 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7892 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7893 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7894 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7895 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7896 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7897 enables |= hwr->cp_p5 ?
7898 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7899 } else {
7900 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7901 enables |= hwr->grp ?
7902 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7903 }
7904 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7905 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7906
7907 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7908 req->num_tx_rings = cpu_to_le16(hwr->tx);
7909 req->num_rx_rings = cpu_to_le16(hwr->rx);
7910 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7911 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7912 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7913 } else {
7914 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7915 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7916 }
7917 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7918 req->num_vnics = cpu_to_le16(hwr->vnic);
7919
7920 req->enables = cpu_to_le32(enables);
7921 return req;
7922 }
7923
7924 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7925 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7926 {
7927 struct hwrm_func_cfg_input *req;
7928 int rc;
7929
7930 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7931 if (!req)
7932 return -ENOMEM;
7933
7934 if (!req->enables) {
7935 hwrm_req_drop(bp, req);
7936 return 0;
7937 }
7938
7939 rc = hwrm_req_send(bp, req);
7940 if (rc)
7941 return rc;
7942
7943 if (bp->hwrm_spec_code < 0x10601)
7944 bp->hw_resc.resv_tx_rings = hwr->tx;
7945
7946 return bnxt_hwrm_get_rings(bp);
7947 }
7948
7949 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7950 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7951 {
7952 struct hwrm_func_vf_cfg_input *req;
7953 int rc;
7954
7955 if (!BNXT_NEW_RM(bp)) {
7956 bp->hw_resc.resv_tx_rings = hwr->tx;
7957 return 0;
7958 }
7959
7960 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7961 if (!req)
7962 return -ENOMEM;
7963
7964 rc = hwrm_req_send(bp, req);
7965 if (rc)
7966 return rc;
7967
7968 return bnxt_hwrm_get_rings(bp);
7969 }
7970
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7971 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7972 {
7973 if (BNXT_PF(bp))
7974 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7975 else
7976 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7977 }
7978
bnxt_nq_rings_in_use(struct bnxt * bp)7979 int bnxt_nq_rings_in_use(struct bnxt *bp)
7980 {
7981 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7982 }
7983
bnxt_cp_rings_in_use(struct bnxt * bp)7984 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7985 {
7986 int cp;
7987
7988 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7989 return bnxt_nq_rings_in_use(bp);
7990
7991 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7992 return cp;
7993 }
7994
bnxt_get_func_stat_ctxs(struct bnxt * bp)7995 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7996 {
7997 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7998 }
7999
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)8000 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8001 {
8002 if (!hwr->grp)
8003 return 0;
8004 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8005 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
8006
8007 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
8008 rss_ctx *= hwr->vnic;
8009 return rss_ctx;
8010 }
8011 if (BNXT_VF(bp))
8012 return BNXT_VF_MAX_RSS_CTX;
8013 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
8014 return hwr->grp + 1;
8015 return 1;
8016 }
8017
8018 /* Check if a default RSS map needs to be setup. This function is only
8019 * used on older firmware that does not require reserving RX rings.
8020 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)8021 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
8022 {
8023 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8024
8025 /* The RSS map is valid for RX rings set to resv_rx_rings */
8026 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
8027 hw_resc->resv_rx_rings = bp->rx_nr_rings;
8028 if (!netif_is_rxfh_configured(bp->dev))
8029 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8030 }
8031 }
8032
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)8033 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
8034 {
8035 if (bp->flags & BNXT_FLAG_RFS) {
8036 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
8037 return 2 + bp->num_rss_ctx;
8038 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8039 return rx_rings + 1;
8040 }
8041 return 1;
8042 }
8043
bnxt_get_total_resources(struct bnxt * bp,struct bnxt_hw_rings * hwr)8044 static void bnxt_get_total_resources(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8045 {
8046 hwr->cp = bnxt_nq_rings_in_use(bp);
8047 hwr->cp_p5 = 0;
8048 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8049 hwr->cp_p5 = bnxt_cp_rings_in_use(bp);
8050 hwr->tx = bp->tx_nr_rings;
8051 hwr->rx = bp->rx_nr_rings;
8052 hwr->grp = hwr->rx;
8053 hwr->vnic = bnxt_get_total_vnics(bp, hwr->rx);
8054 hwr->rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr);
8055 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8056 hwr->rx <<= 1;
8057 hwr->stat = bnxt_get_func_stat_ctxs(bp);
8058 }
8059
bnxt_need_reserve_rings(struct bnxt * bp)8060 static bool bnxt_need_reserve_rings(struct bnxt *bp)
8061 {
8062 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8063 struct bnxt_hw_rings hwr;
8064
8065 bnxt_get_total_resources(bp, &hwr);
8066
8067 /* Old firmware does not need RX ring reservations but we still
8068 * need to setup a default RSS map when needed. With new firmware
8069 * we go through RX ring reservations first and then set up the
8070 * RSS map for the successfully reserved RX rings when needed.
8071 */
8072 if (!BNXT_NEW_RM(bp))
8073 bnxt_check_rss_tbl_no_rmgr(bp);
8074
8075 if (hw_resc->resv_tx_rings != hwr.tx && bp->hwrm_spec_code >= 0x10601)
8076 return true;
8077
8078 if (!BNXT_NEW_RM(bp))
8079 return false;
8080
8081 if (hw_resc->resv_rx_rings != hwr.rx ||
8082 hw_resc->resv_vnics != hwr.vnic ||
8083 hw_resc->resv_stat_ctxs != hwr.stat ||
8084 hw_resc->resv_rsscos_ctxs != hwr.rss_ctx ||
8085 (hw_resc->resv_hw_ring_grps != hwr.grp &&
8086 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
8087 return true;
8088 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8089 if (hw_resc->resv_cp_rings != hwr.cp_p5)
8090 return true;
8091 } else if (hw_resc->resv_cp_rings != hwr.cp) {
8092 return true;
8093 }
8094 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
8095 hw_resc->resv_irqs != hwr.cp)
8096 return true;
8097 return false;
8098 }
8099
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8100 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8101 {
8102 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8103
8104 hwr->tx = hw_resc->resv_tx_rings;
8105 if (BNXT_NEW_RM(bp)) {
8106 hwr->rx = hw_resc->resv_rx_rings;
8107 hwr->cp = hw_resc->resv_irqs;
8108 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8109 hwr->cp_p5 = hw_resc->resv_cp_rings;
8110 hwr->grp = hw_resc->resv_hw_ring_grps;
8111 hwr->vnic = hw_resc->resv_vnics;
8112 hwr->stat = hw_resc->resv_stat_ctxs;
8113 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
8114 }
8115 }
8116
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)8117 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8118 {
8119 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
8120 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
8121 }
8122
8123 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
8124
__bnxt_reserve_rings(struct bnxt * bp)8125 static int __bnxt_reserve_rings(struct bnxt *bp)
8126 {
8127 struct bnxt_en_dev *edev = bp->edev[BNXT_AUXDEV_RDMA];
8128 struct bnxt_hw_rings hwr = {0};
8129 int rx_rings, old_rx_rings, rc;
8130 int cp = bp->cp_nr_rings;
8131 int ulp_msix = 0;
8132 bool sh = false;
8133 int tx_cp;
8134
8135 if (!bnxt_need_reserve_rings(bp))
8136 return 0;
8137
8138 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(edev)) {
8139 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
8140 if (!ulp_msix)
8141 bnxt_set_ulp_stat_ctxs(bp, 0);
8142 else
8143 bnxt_set_dflt_ulp_stat_ctxs(bp);
8144
8145 if (ulp_msix > bp->ulp_num_msix_want)
8146 ulp_msix = bp->ulp_num_msix_want;
8147 hwr.cp = cp + ulp_msix;
8148 } else {
8149 hwr.cp = bnxt_nq_rings_in_use(bp);
8150 }
8151
8152 hwr.tx = bp->tx_nr_rings;
8153 hwr.rx = bp->rx_nr_rings;
8154 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8155 sh = true;
8156 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8157 hwr.cp_p5 = hwr.rx + hwr.tx;
8158
8159 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
8160
8161 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8162 hwr.rx <<= 1;
8163 hwr.grp = bp->rx_nr_rings;
8164 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8165 hwr.stat = bnxt_get_func_stat_ctxs(bp);
8166 old_rx_rings = bp->hw_resc.resv_rx_rings;
8167
8168 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8169 if (rc)
8170 return rc;
8171
8172 bnxt_copy_reserved_rings(bp, &hwr);
8173
8174 rx_rings = hwr.rx;
8175 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8176 if (hwr.rx >= 2) {
8177 rx_rings = hwr.rx >> 1;
8178 } else {
8179 if (netif_running(bp->dev))
8180 return -ENOMEM;
8181
8182 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8183 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8184 bp->dev->hw_features &= ~NETIF_F_LRO;
8185 bp->dev->features &= ~NETIF_F_LRO;
8186 bnxt_set_ring_params(bp);
8187 }
8188 }
8189 rx_rings = min_t(int, rx_rings, hwr.grp);
8190 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8191 if (bnxt_ulp_registered(edev) && hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8192 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8193 hwr.cp = min_t(int, hwr.cp, hwr.stat);
8194 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8195 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8196 hwr.rx = rx_rings << 1;
8197 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8198 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8199 if (hwr.tx != bp->tx_nr_rings) {
8200 netdev_warn(bp->dev,
8201 "Able to reserve only %d out of %d requested TX rings\n",
8202 hwr.tx, bp->tx_nr_rings);
8203 }
8204 bp->tx_nr_rings = hwr.tx;
8205
8206 /* If we cannot reserve all the RX rings, reset the RSS map only
8207 * if absolutely necessary
8208 */
8209 if (rx_rings != bp->rx_nr_rings) {
8210 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8211 rx_rings, bp->rx_nr_rings);
8212 if (netif_is_rxfh_configured(bp->dev) &&
8213 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8214 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8215 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8216 ethtool_rxfh_indir_lost(bp->dev);
8217 }
8218 }
8219 bp->rx_nr_rings = rx_rings;
8220 bp->cp_nr_rings = hwr.cp;
8221
8222 /* Fall back if we cannot reserve enough HW RSS contexts */
8223 if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
8224 hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
8225 bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
8226
8227 if (!bnxt_rings_ok(bp, &hwr))
8228 return -ENOMEM;
8229
8230 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8231 !netif_is_rxfh_configured(bp->dev))
8232 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8233
8234 if (!bnxt_ulp_registered(edev) && BNXT_NEW_RM(bp)) {
8235 int resv_msix, resv_ctx, ulp_ctxs;
8236 struct bnxt_hw_resc *hw_resc;
8237
8238 hw_resc = &bp->hw_resc;
8239 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8240 ulp_msix = min_t(int, resv_msix, ulp_msix);
8241 bnxt_set_ulp_msix_num(bp, ulp_msix);
8242 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
8243 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8244 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8245 }
8246
8247 return rc;
8248 }
8249
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8250 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8251 {
8252 struct hwrm_func_vf_cfg_input *req;
8253 u32 flags;
8254
8255 if (!BNXT_NEW_RM(bp))
8256 return 0;
8257
8258 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8259 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8260 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8261 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8262 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8263 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8264 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8265 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8266 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8267
8268 req->flags = cpu_to_le32(flags);
8269 return hwrm_req_send_silent(bp, req);
8270 }
8271
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8272 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8273 {
8274 struct hwrm_func_cfg_input *req;
8275 u32 flags;
8276
8277 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8278 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8279 if (BNXT_NEW_RM(bp)) {
8280 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8281 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8282 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8283 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8284 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8285 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8286 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8287 else
8288 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8289 }
8290
8291 req->flags = cpu_to_le32(flags);
8292 return hwrm_req_send_silent(bp, req);
8293 }
8294
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8295 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8296 {
8297 if (bp->hwrm_spec_code < 0x10801)
8298 return 0;
8299
8300 if (BNXT_PF(bp))
8301 return bnxt_hwrm_check_pf_rings(bp, hwr);
8302
8303 return bnxt_hwrm_check_vf_rings(bp, hwr);
8304 }
8305
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8306 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8307 {
8308 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8309 struct hwrm_ring_aggint_qcaps_output *resp;
8310 struct hwrm_ring_aggint_qcaps_input *req;
8311 int rc;
8312
8313 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8314 coal_cap->num_cmpl_dma_aggr_max = 63;
8315 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8316 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8317 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8318 coal_cap->int_lat_tmr_min_max = 65535;
8319 coal_cap->int_lat_tmr_max_max = 65535;
8320 coal_cap->num_cmpl_aggr_int_max = 65535;
8321 coal_cap->timer_units = 80;
8322
8323 if (bp->hwrm_spec_code < 0x10902)
8324 return;
8325
8326 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8327 return;
8328
8329 resp = hwrm_req_hold(bp, req);
8330 rc = hwrm_req_send_silent(bp, req);
8331 if (!rc) {
8332 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8333 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8334 coal_cap->num_cmpl_dma_aggr_max =
8335 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8336 coal_cap->num_cmpl_dma_aggr_during_int_max =
8337 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8338 coal_cap->cmpl_aggr_dma_tmr_max =
8339 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8340 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8341 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8342 coal_cap->int_lat_tmr_min_max =
8343 le16_to_cpu(resp->int_lat_tmr_min_max);
8344 coal_cap->int_lat_tmr_max_max =
8345 le16_to_cpu(resp->int_lat_tmr_max_max);
8346 coal_cap->num_cmpl_aggr_int_max =
8347 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8348 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8349 }
8350 hwrm_req_drop(bp, req);
8351 }
8352
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8353 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8354 {
8355 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8356
8357 return usec * 1000 / coal_cap->timer_units;
8358 }
8359
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8360 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8361 struct bnxt_coal *hw_coal,
8362 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8363 {
8364 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8365 u16 val, tmr, max, flags = hw_coal->flags;
8366 u32 cmpl_params = coal_cap->cmpl_params;
8367
8368 max = hw_coal->bufs_per_record * 128;
8369 if (hw_coal->budget)
8370 max = hw_coal->bufs_per_record * hw_coal->budget;
8371 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8372
8373 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8374 req->num_cmpl_aggr_int = cpu_to_le16(val);
8375
8376 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8377 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8378
8379 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8380 coal_cap->num_cmpl_dma_aggr_during_int_max);
8381 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8382
8383 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8384 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8385 req->int_lat_tmr_max = cpu_to_le16(tmr);
8386
8387 /* min timer set to 1/2 of interrupt timer */
8388 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8389 val = tmr / 2;
8390 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8391 req->int_lat_tmr_min = cpu_to_le16(val);
8392 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8393 }
8394
8395 /* buf timer set to 1/4 of interrupt timer */
8396 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8397 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8398
8399 if (cmpl_params &
8400 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8401 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8402 val = clamp_t(u16, tmr, 1,
8403 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8404 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8405 req->enables |=
8406 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8407 }
8408
8409 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8410 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8411 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8412 req->flags = cpu_to_le16(flags);
8413 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8414 }
8415
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8416 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8417 struct bnxt_coal *hw_coal)
8418 {
8419 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8420 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8421 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8422 u32 nq_params = coal_cap->nq_params;
8423 u16 tmr;
8424 int rc;
8425
8426 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8427 return 0;
8428
8429 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8430 if (rc)
8431 return rc;
8432
8433 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8434 req->flags =
8435 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8436
8437 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8438 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8439 req->int_lat_tmr_min = cpu_to_le16(tmr);
8440 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8441 return hwrm_req_send(bp, req);
8442 }
8443
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8444 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8445 {
8446 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8447 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8448 struct bnxt_coal coal;
8449 int rc;
8450
8451 /* Tick values in micro seconds.
8452 * 1 coal_buf x bufs_per_record = 1 completion record.
8453 */
8454 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8455
8456 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8457 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8458
8459 if (!bnapi->rx_ring)
8460 return -ENODEV;
8461
8462 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8463 if (rc)
8464 return rc;
8465
8466 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8467
8468 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8469
8470 return hwrm_req_send(bp, req_rx);
8471 }
8472
8473 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8474 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8475 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8476 {
8477 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8478
8479 req->ring_id = cpu_to_le16(ring_id);
8480 return hwrm_req_send(bp, req);
8481 }
8482
8483 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8484 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8485 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8486 {
8487 struct bnxt_tx_ring_info *txr;
8488 int i, rc;
8489
8490 bnxt_for_each_napi_tx(i, bnapi, txr) {
8491 u16 ring_id;
8492
8493 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8494 req->ring_id = cpu_to_le16(ring_id);
8495 rc = hwrm_req_send(bp, req);
8496 if (rc)
8497 return rc;
8498 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8499 return 0;
8500 }
8501 return 0;
8502 }
8503
bnxt_hwrm_set_coal(struct bnxt * bp)8504 int bnxt_hwrm_set_coal(struct bnxt *bp)
8505 {
8506 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8507 int i, rc;
8508
8509 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8510 if (rc)
8511 return rc;
8512
8513 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8514 if (rc) {
8515 hwrm_req_drop(bp, req_rx);
8516 return rc;
8517 }
8518
8519 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8520 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8521
8522 hwrm_req_hold(bp, req_rx);
8523 hwrm_req_hold(bp, req_tx);
8524 for (i = 0; i < bp->cp_nr_rings; i++) {
8525 struct bnxt_napi *bnapi = bp->bnapi[i];
8526 struct bnxt_coal *hw_coal;
8527
8528 if (!bnapi->rx_ring)
8529 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8530 else
8531 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8532 if (rc)
8533 break;
8534
8535 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8536 continue;
8537
8538 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8539 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8540 if (rc)
8541 break;
8542 }
8543 if (bnapi->rx_ring)
8544 hw_coal = &bp->rx_coal;
8545 else
8546 hw_coal = &bp->tx_coal;
8547 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8548 }
8549 hwrm_req_drop(bp, req_rx);
8550 hwrm_req_drop(bp, req_tx);
8551 return rc;
8552 }
8553
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8554 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8555 {
8556 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8557 struct hwrm_stat_ctx_free_input *req;
8558 int i;
8559
8560 if (!bp->bnapi)
8561 return;
8562
8563 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8564 return;
8565
8566 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8567 return;
8568 if (BNXT_FW_MAJ(bp) <= 20) {
8569 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8570 hwrm_req_drop(bp, req);
8571 return;
8572 }
8573 hwrm_req_hold(bp, req0);
8574 }
8575 hwrm_req_hold(bp, req);
8576 for (i = 0; i < bp->cp_nr_rings; i++) {
8577 struct bnxt_napi *bnapi = bp->bnapi[i];
8578 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8579
8580 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8581 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8582 if (req0) {
8583 req0->stat_ctx_id = req->stat_ctx_id;
8584 hwrm_req_send(bp, req0);
8585 }
8586 hwrm_req_send(bp, req);
8587
8588 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8589 }
8590 }
8591 hwrm_req_drop(bp, req);
8592 if (req0)
8593 hwrm_req_drop(bp, req0);
8594 }
8595
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8596 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8597 {
8598 struct hwrm_stat_ctx_alloc_output *resp;
8599 struct hwrm_stat_ctx_alloc_input *req;
8600 int rc, i;
8601
8602 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8603 return 0;
8604
8605 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8606 if (rc)
8607 return rc;
8608
8609 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8610 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8611
8612 resp = hwrm_req_hold(bp, req);
8613 for (i = 0; i < bp->cp_nr_rings; i++) {
8614 struct bnxt_napi *bnapi = bp->bnapi[i];
8615 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8616
8617 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8618
8619 rc = hwrm_req_send(bp, req);
8620 if (rc)
8621 break;
8622
8623 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8624
8625 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8626 }
8627 hwrm_req_drop(bp, req);
8628 return rc;
8629 }
8630
bnxt_hwrm_func_qcfg(struct bnxt * bp)8631 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8632 {
8633 struct hwrm_func_qcfg_output *resp;
8634 struct hwrm_func_qcfg_input *req;
8635 u16 flags;
8636 int rc;
8637
8638 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8639 if (rc)
8640 return rc;
8641
8642 req->fid = cpu_to_le16(0xffff);
8643 resp = hwrm_req_hold(bp, req);
8644 rc = hwrm_req_send(bp, req);
8645 if (rc)
8646 goto func_qcfg_exit;
8647
8648 flags = le16_to_cpu(resp->flags);
8649 #ifdef CONFIG_BNXT_SRIOV
8650 if (BNXT_VF(bp)) {
8651 struct bnxt_vf_info *vf = &bp->vf;
8652
8653 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8654 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8655 vf->flags |= BNXT_VF_TRUST;
8656 else
8657 vf->flags &= ~BNXT_VF_TRUST;
8658 } else {
8659 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8660 }
8661 #endif
8662 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8663 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8664 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8665 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8666 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8667 }
8668 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8669 bp->flags |= BNXT_FLAG_MULTI_HOST;
8670
8671 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8672 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8673
8674 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8675 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8676 if (resp->roce_bidi_opt_mode &
8677 FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
8678 bp->cos0_cos1_shared = 1;
8679 else
8680 bp->cos0_cos1_shared = 0;
8681
8682 switch (resp->port_partition_type) {
8683 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8684 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8685 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8686 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8687 bp->port_partition_type = resp->port_partition_type;
8688 break;
8689 }
8690 if (bp->hwrm_spec_code < 0x10707 ||
8691 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8692 bp->br_mode = BRIDGE_MODE_VEB;
8693 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8694 bp->br_mode = BRIDGE_MODE_VEPA;
8695 else
8696 bp->br_mode = BRIDGE_MODE_UNDEF;
8697
8698 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8699 if (!bp->max_mtu)
8700 bp->max_mtu = BNXT_MAX_MTU;
8701
8702 if (bp->db_size)
8703 goto func_qcfg_exit;
8704
8705 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8706 if (BNXT_CHIP_P5(bp)) {
8707 if (BNXT_PF(bp))
8708 bp->db_offset = DB_PF_OFFSET_P5;
8709 else
8710 bp->db_offset = DB_VF_OFFSET_P5;
8711 }
8712 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8713 1024);
8714 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8715 bp->db_size <= bp->db_offset)
8716 bp->db_size = pci_resource_len(bp->pdev, 2);
8717
8718 func_qcfg_exit:
8719 hwrm_req_drop(bp, req);
8720 return rc;
8721 }
8722
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8723 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8724 u8 init_val, u8 init_offset,
8725 bool init_mask_set)
8726 {
8727 ctxm->init_value = init_val;
8728 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8729 if (init_mask_set)
8730 ctxm->init_offset = init_offset * 4;
8731 else
8732 ctxm->init_value = 0;
8733 }
8734
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8735 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8736 {
8737 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8738 u16 type;
8739
8740 for (type = 0; type < ctx_max; type++) {
8741 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8742 int n = 1;
8743
8744 if (!ctxm->max_entries || ctxm->pg_info)
8745 continue;
8746
8747 if (ctxm->instance_bmap)
8748 n = hweight32(ctxm->instance_bmap);
8749 ctxm->pg_info = kzalloc_objs(*ctxm->pg_info, n);
8750 if (!ctxm->pg_info)
8751 return -ENOMEM;
8752 }
8753 return 0;
8754 }
8755
8756 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8757 struct bnxt_ctx_mem_type *ctxm, bool force);
8758
8759 #define BNXT_CTX_INIT_VALID(flags) \
8760 (!!((flags) & \
8761 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8762
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8763 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8764 {
8765 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8766 struct hwrm_func_backing_store_qcaps_v2_input *req;
8767 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8768 u16 type, next_type = 0;
8769 int rc;
8770
8771 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8772 if (rc)
8773 return rc;
8774
8775 if (!ctx) {
8776 ctx = kzalloc_obj(*ctx);
8777 if (!ctx)
8778 return -ENOMEM;
8779 bp->ctx = ctx;
8780 }
8781
8782 resp = hwrm_req_hold(bp, req);
8783
8784 for (type = 0; type < BNXT_CTX_V2_MAX; type = next_type) {
8785 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8786 u8 init_val, init_off, i;
8787 u32 max_entries;
8788 u16 entry_size;
8789 __le32 *p;
8790 u32 flags;
8791
8792 req->type = cpu_to_le16(type);
8793 rc = hwrm_req_send(bp, req);
8794 if (rc)
8795 goto ctx_done;
8796 flags = le32_to_cpu(resp->flags);
8797 next_type = le16_to_cpu(resp->next_valid_type);
8798 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8799 bnxt_free_one_ctx_mem(bp, ctxm, true);
8800 continue;
8801 }
8802 entry_size = le16_to_cpu(resp->entry_size);
8803 max_entries = le32_to_cpu(resp->max_num_entries);
8804 if (ctxm->mem_valid) {
8805 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8806 ctxm->entry_size != entry_size ||
8807 ctxm->max_entries != max_entries)
8808 bnxt_free_one_ctx_mem(bp, ctxm, true);
8809 else
8810 continue;
8811 }
8812 ctxm->type = type;
8813 ctxm->entry_size = entry_size;
8814 ctxm->flags = flags;
8815 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8816 ctxm->entry_multiple = resp->entry_multiple;
8817 ctxm->max_entries = max_entries;
8818 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8819 init_val = resp->ctx_init_value;
8820 init_off = resp->ctx_init_offset;
8821 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8822 BNXT_CTX_INIT_VALID(flags));
8823 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8824 BNXT_MAX_SPLIT_ENTRY);
8825 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8826 i++, p++)
8827 ctxm->split[i] = le32_to_cpu(*p);
8828 }
8829 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8830
8831 ctx_done:
8832 hwrm_req_drop(bp, req);
8833 return rc;
8834 }
8835
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8836 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8837 {
8838 struct hwrm_func_backing_store_qcaps_output *resp;
8839 struct hwrm_func_backing_store_qcaps_input *req;
8840 int rc;
8841
8842 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8843 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8844 return 0;
8845
8846 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8847 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8848
8849 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8850 if (rc)
8851 return rc;
8852
8853 resp = hwrm_req_hold(bp, req);
8854 rc = hwrm_req_send_silent(bp, req);
8855 if (!rc) {
8856 struct bnxt_ctx_mem_type *ctxm;
8857 struct bnxt_ctx_mem_info *ctx;
8858 u8 init_val, init_idx = 0;
8859 u16 init_mask;
8860
8861 ctx = bp->ctx;
8862 if (!ctx) {
8863 ctx = kzalloc_obj(*ctx);
8864 if (!ctx) {
8865 rc = -ENOMEM;
8866 goto ctx_err;
8867 }
8868 bp->ctx = ctx;
8869 }
8870 init_val = resp->ctx_kind_initializer;
8871 init_mask = le16_to_cpu(resp->ctx_init_mask);
8872
8873 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8874 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8875 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8876 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8877 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8878 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8879 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8880 (init_mask & (1 << init_idx++)) != 0);
8881
8882 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8883 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8884 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8885 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8886 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8887 (init_mask & (1 << init_idx++)) != 0);
8888
8889 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8890 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8891 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8892 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8893 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8894 (init_mask & (1 << init_idx++)) != 0);
8895
8896 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8897 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8898 ctxm->max_entries = ctxm->vnic_entries +
8899 le16_to_cpu(resp->vnic_max_ring_table_entries);
8900 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8901 bnxt_init_ctx_initializer(ctxm, init_val,
8902 resp->vnic_init_offset,
8903 (init_mask & (1 << init_idx++)) != 0);
8904
8905 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8906 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8907 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8908 bnxt_init_ctx_initializer(ctxm, init_val,
8909 resp->stat_init_offset,
8910 (init_mask & (1 << init_idx++)) != 0);
8911
8912 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8913 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8914 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8915 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8916 ctxm->entry_multiple = resp->tqm_entries_multiple;
8917 if (!ctxm->entry_multiple)
8918 ctxm->entry_multiple = 1;
8919
8920 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8921
8922 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8923 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8924 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8925 ctxm->mrav_num_entries_units =
8926 le16_to_cpu(resp->mrav_num_entries_units);
8927 bnxt_init_ctx_initializer(ctxm, init_val,
8928 resp->mrav_init_offset,
8929 (init_mask & (1 << init_idx++)) != 0);
8930
8931 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8932 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8933 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8934
8935 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8936 if (!ctx->tqm_fp_rings_count)
8937 ctx->tqm_fp_rings_count = bp->max_q;
8938 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8939 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8940
8941 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8942 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8943 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8944
8945 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8946 } else {
8947 rc = 0;
8948 }
8949 ctx_err:
8950 hwrm_req_drop(bp, req);
8951 return rc;
8952 }
8953
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8954 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8955 __le64 *pg_dir)
8956 {
8957 if (!rmem->nr_pages)
8958 return;
8959
8960 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8961 if (rmem->depth >= 1) {
8962 if (rmem->depth == 2)
8963 *pg_attr |= 2;
8964 else
8965 *pg_attr |= 1;
8966 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8967 } else {
8968 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8969 }
8970 }
8971
8972 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8973 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8974 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8975 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8976 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8977 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8978
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8979 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8980 {
8981 struct hwrm_func_backing_store_cfg_input *req;
8982 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8983 struct bnxt_ctx_pg_info *ctx_pg;
8984 struct bnxt_ctx_mem_type *ctxm;
8985 void **__req = (void **)&req;
8986 u32 req_len = sizeof(*req);
8987 __le32 *num_entries;
8988 __le64 *pg_dir;
8989 u32 flags = 0;
8990 u8 *pg_attr;
8991 u32 ena;
8992 int rc;
8993 int i;
8994
8995 if (!ctx)
8996 return 0;
8997
8998 if (req_len > bp->hwrm_max_ext_req_len)
8999 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
9000 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
9001 if (rc)
9002 return rc;
9003
9004 req->enables = cpu_to_le32(enables);
9005 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
9006 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9007 ctx_pg = ctxm->pg_info;
9008 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
9009 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
9010 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
9011 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
9012 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9013 &req->qpc_pg_size_qpc_lvl,
9014 &req->qpc_page_dir);
9015
9016 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
9017 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
9018 }
9019 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
9020 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9021 ctx_pg = ctxm->pg_info;
9022 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
9023 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
9024 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
9025 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9026 &req->srq_pg_size_srq_lvl,
9027 &req->srq_page_dir);
9028 }
9029 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
9030 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9031 ctx_pg = ctxm->pg_info;
9032 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
9033 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
9034 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
9035 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9036 &req->cq_pg_size_cq_lvl,
9037 &req->cq_page_dir);
9038 }
9039 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
9040 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9041 ctx_pg = ctxm->pg_info;
9042 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
9043 req->vnic_num_ring_table_entries =
9044 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
9045 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
9046 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9047 &req->vnic_pg_size_vnic_lvl,
9048 &req->vnic_page_dir);
9049 }
9050 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
9051 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9052 ctx_pg = ctxm->pg_info;
9053 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
9054 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
9055 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9056 &req->stat_pg_size_stat_lvl,
9057 &req->stat_page_dir);
9058 }
9059 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
9060 u32 units;
9061
9062 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9063 ctx_pg = ctxm->pg_info;
9064 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
9065 units = ctxm->mrav_num_entries_units;
9066 if (units) {
9067 u32 num_mr, num_ah = ctxm->mrav_av_entries;
9068 u32 entries;
9069
9070 num_mr = ctx_pg->entries - num_ah;
9071 entries = ((num_mr / units) << 16) | (num_ah / units);
9072 req->mrav_num_entries = cpu_to_le32(entries);
9073 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
9074 }
9075 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
9076 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9077 &req->mrav_pg_size_mrav_lvl,
9078 &req->mrav_page_dir);
9079 }
9080 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
9081 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9082 ctx_pg = ctxm->pg_info;
9083 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
9084 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
9085 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9086 &req->tim_pg_size_tim_lvl,
9087 &req->tim_page_dir);
9088 }
9089 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9090 for (i = 0, num_entries = &req->tqm_sp_num_entries,
9091 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
9092 pg_dir = &req->tqm_sp_page_dir,
9093 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
9094 ctx_pg = ctxm->pg_info;
9095 i < BNXT_MAX_TQM_RINGS;
9096 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
9097 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
9098 if (!(enables & ena))
9099 continue;
9100
9101 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
9102 *num_entries = cpu_to_le32(ctx_pg->entries);
9103 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
9104 }
9105 req->flags = cpu_to_le32(flags);
9106 return hwrm_req_send(bp, req);
9107 }
9108
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9109 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
9110 struct bnxt_ctx_pg_info *ctx_pg)
9111 {
9112 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9113
9114 rmem->page_size = BNXT_PAGE_SIZE;
9115 rmem->pg_arr = ctx_pg->ctx_pg_arr;
9116 rmem->dma_arr = ctx_pg->ctx_dma_arr;
9117 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
9118 if (rmem->depth >= 1)
9119 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
9120 return bnxt_alloc_ring(bp, rmem);
9121 }
9122
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)9123 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
9124 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
9125 u8 depth, struct bnxt_ctx_mem_type *ctxm)
9126 {
9127 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9128 int rc;
9129
9130 if (!mem_size)
9131 return -EINVAL;
9132
9133 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9134 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
9135 ctx_pg->nr_pages = 0;
9136 return -EINVAL;
9137 }
9138 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
9139 int nr_tbls, i;
9140
9141 rmem->depth = 2;
9142 ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES);
9143 if (!ctx_pg->ctx_pg_tbl)
9144 return -ENOMEM;
9145 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
9146 rmem->nr_pages = nr_tbls;
9147 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9148 if (rc)
9149 return rc;
9150 for (i = 0; i < nr_tbls; i++) {
9151 struct bnxt_ctx_pg_info *pg_tbl;
9152
9153 pg_tbl = kzalloc_obj(*pg_tbl);
9154 if (!pg_tbl)
9155 return -ENOMEM;
9156 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
9157 rmem = &pg_tbl->ring_mem;
9158 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
9159 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
9160 rmem->depth = 1;
9161 rmem->nr_pages = MAX_CTX_PAGES;
9162 rmem->ctx_mem = ctxm;
9163 if (i == (nr_tbls - 1)) {
9164 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
9165
9166 if (rem)
9167 rmem->nr_pages = rem;
9168 }
9169 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
9170 if (rc)
9171 break;
9172 }
9173 } else {
9174 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9175 if (rmem->nr_pages > 1 || depth)
9176 rmem->depth = 1;
9177 rmem->ctx_mem = ctxm;
9178 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9179 }
9180 return rc;
9181 }
9182
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)9183 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9184 struct bnxt_ctx_pg_info *ctx_pg,
9185 void *buf, size_t offset, size_t head,
9186 size_t tail)
9187 {
9188 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9189 size_t nr_pages = ctx_pg->nr_pages;
9190 int page_size = rmem->page_size;
9191 size_t len = 0, total_len = 0;
9192 u16 depth = rmem->depth;
9193
9194 tail %= nr_pages * page_size;
9195 do {
9196 if (depth > 1) {
9197 int i = head / (page_size * MAX_CTX_PAGES);
9198 struct bnxt_ctx_pg_info *pg_tbl;
9199
9200 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9201 rmem = &pg_tbl->ring_mem;
9202 }
9203 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9204 head += len;
9205 offset += len;
9206 total_len += len;
9207 if (head >= nr_pages * page_size)
9208 head = 0;
9209 } while (head != tail);
9210 return total_len;
9211 }
9212
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9213 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9214 struct bnxt_ctx_pg_info *ctx_pg)
9215 {
9216 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9217
9218 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9219 ctx_pg->ctx_pg_tbl) {
9220 int i, nr_tbls = rmem->nr_pages;
9221
9222 for (i = 0; i < nr_tbls; i++) {
9223 struct bnxt_ctx_pg_info *pg_tbl;
9224 struct bnxt_ring_mem_info *rmem2;
9225
9226 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9227 if (!pg_tbl)
9228 continue;
9229 rmem2 = &pg_tbl->ring_mem;
9230 bnxt_free_ring(bp, rmem2);
9231 ctx_pg->ctx_pg_arr[i] = NULL;
9232 kfree(pg_tbl);
9233 ctx_pg->ctx_pg_tbl[i] = NULL;
9234 }
9235 kfree(ctx_pg->ctx_pg_tbl);
9236 ctx_pg->ctx_pg_tbl = NULL;
9237 }
9238 bnxt_free_ring(bp, rmem);
9239 ctx_pg->nr_pages = 0;
9240 }
9241
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)9242 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9243 struct bnxt_ctx_mem_type *ctxm, u32 entries,
9244 u8 pg_lvl)
9245 {
9246 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9247 int i, rc = 0, n = 1;
9248 u32 mem_size;
9249
9250 if (!ctxm->entry_size || !ctx_pg)
9251 return -EINVAL;
9252 if (ctxm->instance_bmap)
9253 n = hweight32(ctxm->instance_bmap);
9254 if (ctxm->entry_multiple)
9255 entries = roundup(entries, ctxm->entry_multiple);
9256 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9257 mem_size = entries * ctxm->entry_size;
9258 for (i = 0; i < n && !rc; i++) {
9259 ctx_pg[i].entries = entries;
9260 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9261 ctxm->init_value ? ctxm : NULL);
9262 }
9263 if (!rc)
9264 ctxm->mem_valid = 1;
9265 return rc;
9266 }
9267
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)9268 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9269 struct bnxt_ctx_mem_type *ctxm,
9270 bool last)
9271 {
9272 struct hwrm_func_backing_store_cfg_v2_input *req;
9273 u32 instance_bmap = ctxm->instance_bmap;
9274 int i, j, rc = 0, n = 1;
9275 __le32 *p;
9276
9277 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9278 return 0;
9279
9280 if (instance_bmap)
9281 n = hweight32(ctxm->instance_bmap);
9282 else
9283 instance_bmap = 1;
9284
9285 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9286 if (rc)
9287 return rc;
9288 hwrm_req_hold(bp, req);
9289 req->type = cpu_to_le16(ctxm->type);
9290 req->entry_size = cpu_to_le16(ctxm->entry_size);
9291 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9292 bnxt_bs_trace_avail(bp, ctxm->type)) {
9293 struct bnxt_bs_trace_info *bs_trace;
9294 u32 enables;
9295
9296 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9297 req->enables = cpu_to_le32(enables);
9298 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9299 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9300 }
9301 req->subtype_valid_cnt = ctxm->split_entry_cnt;
9302 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9303 p[i] = cpu_to_le32(ctxm->split[i]);
9304 for (i = 0, j = 0; j < n && !rc; i++) {
9305 struct bnxt_ctx_pg_info *ctx_pg;
9306
9307 if (!(instance_bmap & (1 << i)))
9308 continue;
9309 req->instance = cpu_to_le16(i);
9310 ctx_pg = &ctxm->pg_info[j++];
9311 if (!ctx_pg->entries)
9312 continue;
9313 req->num_entries = cpu_to_le32(ctx_pg->entries);
9314 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9315 &req->page_size_pbl_level,
9316 &req->page_dir);
9317 if (last && j == n)
9318 req->flags =
9319 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9320 rc = hwrm_req_send(bp, req);
9321 }
9322 hwrm_req_drop(bp, req);
9323 return rc;
9324 }
9325
bnxt_backing_store_cfg_v2(struct bnxt * bp)9326 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9327 {
9328 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9329 struct bnxt_ctx_mem_type *ctxm;
9330 u16 last_type = BNXT_CTX_INV;
9331 int rc = 0;
9332 u16 type;
9333
9334 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9335 ctxm = &ctx->ctx_arr[type];
9336 if (!bnxt_bs_trace_avail(bp, type))
9337 continue;
9338 if (!ctxm->mem_valid) {
9339 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9340 ctxm->max_entries, 1);
9341 if (rc) {
9342 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9343 type);
9344 continue;
9345 }
9346 bnxt_bs_trace_init(bp, ctxm);
9347 }
9348 last_type = type;
9349 }
9350
9351 if (last_type == BNXT_CTX_INV) {
9352 for (type = 0; type < BNXT_CTX_MAX; type++) {
9353 ctxm = &ctx->ctx_arr[type];
9354 if (ctxm->mem_valid)
9355 last_type = type;
9356 }
9357 if (last_type == BNXT_CTX_INV)
9358 return 0;
9359 }
9360 ctx->ctx_arr[last_type].last = 1;
9361
9362 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9363 ctxm = &ctx->ctx_arr[type];
9364
9365 if (!ctxm->mem_valid)
9366 continue;
9367 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9368 if (rc)
9369 return rc;
9370 }
9371 return 0;
9372 }
9373
9374 /**
9375 * __bnxt_copy_ctx_mem - copy host context memory
9376 * @bp: The driver context
9377 * @ctxm: The pointer to the context memory type
9378 * @buf: The destination buffer or NULL to just obtain the length
9379 * @offset: The buffer offset to copy the data to
9380 * @head: The head offset of context memory to copy from
9381 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9382 *
9383 * This function is called for debugging purposes to dump the host context
9384 * used by the chip.
9385 *
9386 * Return: Length of memory copied
9387 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9388 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9389 struct bnxt_ctx_mem_type *ctxm, void *buf,
9390 size_t offset, size_t head, size_t tail)
9391 {
9392 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9393 size_t len = 0, total_len = 0;
9394 int i, n = 1;
9395
9396 if (!ctx_pg)
9397 return 0;
9398
9399 if (ctxm->instance_bmap)
9400 n = hweight32(ctxm->instance_bmap);
9401 for (i = 0; i < n; i++) {
9402 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9403 tail);
9404 offset += len;
9405 total_len += len;
9406 }
9407 return total_len;
9408 }
9409
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9410 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9411 void *buf, size_t offset)
9412 {
9413 size_t tail = ctxm->max_entries * ctxm->entry_size;
9414
9415 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9416 }
9417
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9418 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9419 struct bnxt_ctx_mem_type *ctxm, bool force)
9420 {
9421 struct bnxt_ctx_pg_info *ctx_pg;
9422 int i, n = 1;
9423
9424 ctxm->last = 0;
9425
9426 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9427 return;
9428
9429 ctx_pg = ctxm->pg_info;
9430 if (ctx_pg) {
9431 if (ctxm->instance_bmap)
9432 n = hweight32(ctxm->instance_bmap);
9433 for (i = 0; i < n; i++)
9434 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9435
9436 kfree(ctx_pg);
9437 ctxm->pg_info = NULL;
9438 ctxm->mem_valid = 0;
9439 }
9440 memset(ctxm, 0, sizeof(*ctxm));
9441 }
9442
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9443 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9444 {
9445 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9446 u16 type;
9447
9448 if (!ctx)
9449 return;
9450
9451 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9452 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9453
9454 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9455 if (force) {
9456 kfree(ctx);
9457 bp->ctx = NULL;
9458 }
9459 }
9460
bnxt_alloc_ctx_mem(struct bnxt * bp)9461 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9462 {
9463 struct bnxt_ctx_mem_type *ctxm;
9464 struct bnxt_ctx_mem_info *ctx;
9465 u32 l2_qps, qp1_qps, max_qps;
9466 u32 ena, entries_sp, entries;
9467 u32 srqs, max_srqs, min;
9468 u32 num_mr, num_ah;
9469 u32 extra_srqs = 0;
9470 u32 extra_qps = 0;
9471 u32 fast_qpmd_qps;
9472 u8 pg_lvl = 1;
9473 int i, rc;
9474
9475 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9476 if (rc) {
9477 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9478 rc);
9479 return rc;
9480 }
9481 ctx = bp->ctx;
9482 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9483 return 0;
9484
9485 ena = 0;
9486 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9487 goto skip_legacy;
9488
9489 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9490 l2_qps = ctxm->qp_l2_entries;
9491 qp1_qps = ctxm->qp_qp1_entries;
9492 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9493 max_qps = ctxm->max_entries;
9494 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9495 srqs = ctxm->srq_l2_entries;
9496 max_srqs = ctxm->max_entries;
9497 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9498 pg_lvl = 2;
9499 if (BNXT_SW_RES_LMT(bp)) {
9500 extra_qps = max_qps - l2_qps - qp1_qps;
9501 extra_srqs = max_srqs - srqs;
9502 } else {
9503 extra_qps = min_t(u32, 65536,
9504 max_qps - l2_qps - qp1_qps);
9505 /* allocate extra qps if fw supports RoCE fast qp
9506 * destroy feature
9507 */
9508 extra_qps += fast_qpmd_qps;
9509 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9510 }
9511 if (fast_qpmd_qps)
9512 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9513 }
9514
9515 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9516 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9517 pg_lvl);
9518 if (rc)
9519 return rc;
9520
9521 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9522 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9523 if (rc)
9524 return rc;
9525
9526 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9527 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9528 extra_qps * 2, pg_lvl);
9529 if (rc)
9530 return rc;
9531
9532 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9533 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9534 if (rc)
9535 return rc;
9536
9537 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9538 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9539 if (rc)
9540 return rc;
9541
9542 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9543 goto skip_rdma;
9544
9545 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9546 if (BNXT_SW_RES_LMT(bp) &&
9547 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9548 num_ah = ctxm->mrav_av_entries;
9549 num_mr = ctxm->max_entries - num_ah;
9550 } else {
9551 /* 128K extra is needed to accommodate static AH context
9552 * allocation by f/w.
9553 */
9554 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9555 num_ah = min_t(u32, num_mr, 1024 * 128);
9556 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9557 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9558 ctxm->mrav_av_entries = num_ah;
9559 }
9560
9561 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9562 if (rc)
9563 return rc;
9564 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9565
9566 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9567 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9568 if (rc)
9569 return rc;
9570 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9571
9572 skip_rdma:
9573 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9574 min = ctxm->min_entries;
9575 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9576 2 * (extra_qps + qp1_qps) + min;
9577 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9578 if (rc)
9579 return rc;
9580
9581 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9582 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9583 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9584 if (rc)
9585 return rc;
9586 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9587 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9588 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9589
9590 skip_legacy:
9591 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9592 rc = bnxt_backing_store_cfg_v2(bp);
9593 else
9594 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9595 if (rc) {
9596 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9597 rc);
9598 return rc;
9599 }
9600 ctx->flags |= BNXT_CTX_FLAG_INITED;
9601 return 0;
9602 }
9603
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9604 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9605 {
9606 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9607 u16 page_attr;
9608 int rc;
9609
9610 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9611 return 0;
9612
9613 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9614 if (rc)
9615 return rc;
9616
9617 if (BNXT_PAGE_SIZE == 0x2000)
9618 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9619 else if (BNXT_PAGE_SIZE == 0x10000)
9620 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9621 else
9622 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9623 req->pg_size_lvl = cpu_to_le16(page_attr |
9624 bp->fw_crash_mem->ring_mem.depth);
9625 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9626 req->size = cpu_to_le32(bp->fw_crash_len);
9627 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9628 return hwrm_req_send(bp, req);
9629 }
9630
bnxt_free_crash_dump_mem(struct bnxt * bp)9631 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9632 {
9633 if (bp->fw_crash_mem) {
9634 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9635 kfree(bp->fw_crash_mem);
9636 bp->fw_crash_mem = NULL;
9637 }
9638 }
9639
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9640 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9641 {
9642 u32 mem_size = 0;
9643 int rc;
9644
9645 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9646 return 0;
9647
9648 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9649 if (rc)
9650 return rc;
9651
9652 mem_size = round_up(mem_size, 4);
9653
9654 /* keep and use the existing pages */
9655 if (bp->fw_crash_mem &&
9656 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9657 goto alloc_done;
9658
9659 if (bp->fw_crash_mem)
9660 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9661 else
9662 bp->fw_crash_mem = kzalloc_obj(*bp->fw_crash_mem);
9663 if (!bp->fw_crash_mem)
9664 return -ENOMEM;
9665
9666 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9667 if (rc) {
9668 bnxt_free_crash_dump_mem(bp);
9669 return rc;
9670 }
9671
9672 alloc_done:
9673 bp->fw_crash_len = mem_size;
9674 return 0;
9675 }
9676
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9677 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9678 {
9679 struct hwrm_func_resource_qcaps_output *resp;
9680 struct hwrm_func_resource_qcaps_input *req;
9681 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9682 int rc;
9683
9684 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9685 if (rc)
9686 return rc;
9687
9688 req->fid = cpu_to_le16(0xffff);
9689 resp = hwrm_req_hold(bp, req);
9690 rc = hwrm_req_send_silent(bp, req);
9691 if (rc)
9692 goto hwrm_func_resc_qcaps_exit;
9693
9694 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9695 if (!all)
9696 goto hwrm_func_resc_qcaps_exit;
9697
9698 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9699 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9700 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9701 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9702 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9703 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9704 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9705 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9706 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9707 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9708 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9709 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9710 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9711 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9712 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9713 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9714
9715 if (hw_resc->max_rsscos_ctxs >=
9716 hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
9717 bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
9718
9719 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9720 u16 max_msix = le16_to_cpu(resp->max_msix);
9721
9722 hw_resc->max_nqs = max_msix;
9723 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9724 }
9725
9726 if (BNXT_PF(bp)) {
9727 struct bnxt_pf_info *pf = &bp->pf;
9728
9729 pf->vf_resv_strategy =
9730 le16_to_cpu(resp->vf_reservation_strategy);
9731 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9732 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9733 }
9734 hwrm_func_resc_qcaps_exit:
9735 hwrm_req_drop(bp, req);
9736 return rc;
9737 }
9738
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9739 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9740 {
9741 struct hwrm_port_mac_ptp_qcfg_output *resp;
9742 struct hwrm_port_mac_ptp_qcfg_input *req;
9743 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9744 u8 flags;
9745 int rc;
9746
9747 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9748 rc = -ENODEV;
9749 goto no_ptp;
9750 }
9751
9752 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9753 if (rc)
9754 goto no_ptp;
9755
9756 req->port_id = cpu_to_le16(bp->pf.port_id);
9757 resp = hwrm_req_hold(bp, req);
9758 rc = hwrm_req_send(bp, req);
9759 if (rc)
9760 goto exit;
9761
9762 flags = resp->flags;
9763 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9764 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9765 rc = -ENODEV;
9766 goto exit;
9767 }
9768 if (!ptp) {
9769 ptp = kzalloc_obj(*ptp);
9770 if (!ptp) {
9771 rc = -ENOMEM;
9772 goto exit;
9773 }
9774 ptp->bp = bp;
9775 bp->ptp_cfg = ptp;
9776 }
9777
9778 if (flags &
9779 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9780 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9781 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9782 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9783 } else if (BNXT_CHIP_P5(bp)) {
9784 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9785 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9786 } else {
9787 rc = -ENODEV;
9788 goto exit;
9789 }
9790 ptp->rtc_configured =
9791 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9792 rc = bnxt_ptp_init(bp);
9793 if (rc)
9794 netdev_warn(bp->dev, "PTP initialization failed.\n");
9795 exit:
9796 hwrm_req_drop(bp, req);
9797 if (!rc)
9798 return 0;
9799
9800 no_ptp:
9801 bnxt_ptp_clear(bp);
9802 kfree(ptp);
9803 bp->ptp_cfg = NULL;
9804 return rc;
9805 }
9806
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9807 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9808 {
9809 u32 flags, flags_ext, flags_ext2, flags_ext3;
9810 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9811 struct hwrm_func_qcaps_output *resp;
9812 struct hwrm_func_qcaps_input *req;
9813 int rc;
9814
9815 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9816 if (rc)
9817 return rc;
9818
9819 req->fid = cpu_to_le16(0xffff);
9820 resp = hwrm_req_hold(bp, req);
9821 rc = hwrm_req_send(bp, req);
9822 if (rc)
9823 goto hwrm_func_qcaps_exit;
9824
9825 flags = le32_to_cpu(resp->flags);
9826 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9827 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9828 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9829 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9830 if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
9831 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
9832 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9833 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9834 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9835 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9836 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9837 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9838 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9839 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9840 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9841 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9842 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9843 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9844 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9845 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9846
9847 flags_ext = le32_to_cpu(resp->flags_ext);
9848 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9849 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9850 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9851 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9852 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
9853 bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
9854 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9855 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9856 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9857 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9858 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9859 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9860 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9861 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9862 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9863 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9864 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9865 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9866 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9867 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9868
9869 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9870 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9871 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9872 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9873 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9874 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9875 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9876 if (flags_ext2 &
9877 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9878 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9879 if (BNXT_PF(bp) &&
9880 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9881 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9882
9883 flags_ext3 = le32_to_cpu(resp->flags_ext3);
9884 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9885 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9886 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9887 bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9888
9889 bp->tx_push_thresh = 0;
9890 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9891 BNXT_FW_MAJ(bp) > 217)
9892 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9893
9894 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9895 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9896 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9897 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9898 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9899 if (!hw_resc->max_hw_ring_grps)
9900 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9901 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9902 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9903 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9904
9905 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9906 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9907 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9908 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9909 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9910 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9911
9912 if (BNXT_PF(bp)) {
9913 struct bnxt_pf_info *pf = &bp->pf;
9914
9915 pf->fw_fid = le16_to_cpu(resp->fid);
9916 pf->port_id = le16_to_cpu(resp->port_id);
9917 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9918 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9919 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9920 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9921 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9922 bp->flags |= BNXT_FLAG_WOL_CAP;
9923 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9924 bp->fw_cap |= BNXT_FW_CAP_PTP;
9925 } else {
9926 bnxt_ptp_clear(bp);
9927 kfree(bp->ptp_cfg);
9928 bp->ptp_cfg = NULL;
9929 }
9930 } else {
9931 #ifdef CONFIG_BNXT_SRIOV
9932 struct bnxt_vf_info *vf = &bp->vf;
9933
9934 vf->fw_fid = le16_to_cpu(resp->fid);
9935 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9936 #endif
9937 }
9938 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9939
9940 hwrm_func_qcaps_exit:
9941 hwrm_req_drop(bp, req);
9942 return rc;
9943 }
9944
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9945 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9946 {
9947 struct hwrm_dbg_qcaps_output *resp;
9948 struct hwrm_dbg_qcaps_input *req;
9949 int rc;
9950
9951 bp->fw_dbg_cap = 0;
9952 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9953 return;
9954
9955 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9956 if (rc)
9957 return;
9958
9959 req->fid = cpu_to_le16(0xffff);
9960 resp = hwrm_req_hold(bp, req);
9961 rc = hwrm_req_send(bp, req);
9962 if (rc)
9963 goto hwrm_dbg_qcaps_exit;
9964
9965 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9966
9967 hwrm_dbg_qcaps_exit:
9968 hwrm_req_drop(bp, req);
9969 }
9970
9971 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9972
bnxt_hwrm_func_qcaps(struct bnxt * bp)9973 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9974 {
9975 int rc;
9976
9977 rc = __bnxt_hwrm_func_qcaps(bp);
9978 if (rc)
9979 return rc;
9980
9981 bnxt_hwrm_dbg_qcaps(bp);
9982
9983 rc = bnxt_hwrm_queue_qportcfg(bp);
9984 if (rc) {
9985 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9986 return rc;
9987 }
9988 if (bp->hwrm_spec_code >= 0x10803) {
9989 rc = bnxt_alloc_ctx_mem(bp);
9990 if (rc)
9991 return rc;
9992 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9993 if (!rc)
9994 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9995 }
9996 return 0;
9997 }
9998
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9999 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
10000 {
10001 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
10002 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
10003 u32 flags;
10004 int rc;
10005
10006 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
10007 return 0;
10008
10009 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
10010 if (rc)
10011 return rc;
10012
10013 resp = hwrm_req_hold(bp, req);
10014 rc = hwrm_req_send(bp, req);
10015 if (rc)
10016 goto hwrm_cfa_adv_qcaps_exit;
10017
10018 flags = le32_to_cpu(resp->flags);
10019 if (flags &
10020 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
10021 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
10022
10023 if (flags &
10024 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
10025 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
10026
10027 if (flags &
10028 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
10029 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
10030
10031 hwrm_cfa_adv_qcaps_exit:
10032 hwrm_req_drop(bp, req);
10033 return rc;
10034 }
10035
__bnxt_alloc_fw_health(struct bnxt * bp)10036 static int __bnxt_alloc_fw_health(struct bnxt *bp)
10037 {
10038 if (bp->fw_health)
10039 return 0;
10040
10041 bp->fw_health = kzalloc_obj(*bp->fw_health);
10042 if (!bp->fw_health)
10043 return -ENOMEM;
10044
10045 mutex_init(&bp->fw_health->lock);
10046 return 0;
10047 }
10048
bnxt_alloc_fw_health(struct bnxt * bp)10049 static int bnxt_alloc_fw_health(struct bnxt *bp)
10050 {
10051 int rc;
10052
10053 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10054 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10055 return 0;
10056
10057 rc = __bnxt_alloc_fw_health(bp);
10058 if (rc) {
10059 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10060 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10061 return rc;
10062 }
10063
10064 return 0;
10065 }
10066
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)10067 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
10068 {
10069 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
10070 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
10071 BNXT_FW_HEALTH_WIN_MAP_OFF);
10072 }
10073
bnxt_inv_fw_health_reg(struct bnxt * bp)10074 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
10075 {
10076 struct bnxt_fw_health *fw_health = bp->fw_health;
10077 u32 reg_type;
10078
10079 if (!fw_health)
10080 return;
10081
10082 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
10083 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
10084 fw_health->status_reliable = false;
10085
10086 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
10087 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
10088 fw_health->resets_reliable = false;
10089 }
10090
bnxt_try_map_fw_health_reg(struct bnxt * bp)10091 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
10092 {
10093 void __iomem *hs;
10094 u32 status_loc;
10095 u32 reg_type;
10096 u32 sig;
10097
10098 if (bp->fw_health)
10099 bp->fw_health->status_reliable = false;
10100
10101 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
10102 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
10103
10104 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
10105 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
10106 if (!bp->chip_num) {
10107 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
10108 bp->chip_num = readl(bp->bar0 +
10109 BNXT_FW_HEALTH_WIN_BASE +
10110 BNXT_GRC_REG_CHIP_NUM);
10111 }
10112 if (!BNXT_CHIP_P5_PLUS(bp))
10113 return;
10114
10115 status_loc = BNXT_GRC_REG_STATUS_P5 |
10116 BNXT_FW_HEALTH_REG_TYPE_BAR0;
10117 } else {
10118 status_loc = readl(hs + offsetof(struct hcomm_status,
10119 fw_status_loc));
10120 }
10121
10122 if (__bnxt_alloc_fw_health(bp)) {
10123 netdev_warn(bp->dev, "no memory for firmware status checks\n");
10124 return;
10125 }
10126
10127 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
10128 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
10129 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
10130 __bnxt_map_fw_health_reg(bp, status_loc);
10131 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
10132 BNXT_FW_HEALTH_WIN_OFF(status_loc);
10133 }
10134
10135 bp->fw_health->status_reliable = true;
10136 }
10137
bnxt_map_fw_health_regs(struct bnxt * bp)10138 static int bnxt_map_fw_health_regs(struct bnxt *bp)
10139 {
10140 struct bnxt_fw_health *fw_health = bp->fw_health;
10141 u32 reg_base = 0xffffffff;
10142 int i;
10143
10144 bp->fw_health->status_reliable = false;
10145 bp->fw_health->resets_reliable = false;
10146 /* Only pre-map the monitoring GRC registers using window 3 */
10147 for (i = 0; i < 4; i++) {
10148 u32 reg = fw_health->regs[i];
10149
10150 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
10151 continue;
10152 if (reg_base == 0xffffffff)
10153 reg_base = reg & BNXT_GRC_BASE_MASK;
10154 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
10155 return -ERANGE;
10156 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
10157 }
10158 bp->fw_health->status_reliable = true;
10159 bp->fw_health->resets_reliable = true;
10160 if (reg_base == 0xffffffff)
10161 return 0;
10162
10163 __bnxt_map_fw_health_reg(bp, reg_base);
10164 return 0;
10165 }
10166
bnxt_remap_fw_health_regs(struct bnxt * bp)10167 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
10168 {
10169 if (!bp->fw_health)
10170 return;
10171
10172 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
10173 bp->fw_health->status_reliable = true;
10174 bp->fw_health->resets_reliable = true;
10175 } else {
10176 bnxt_try_map_fw_health_reg(bp);
10177 }
10178 }
10179
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)10180 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10181 {
10182 struct bnxt_fw_health *fw_health = bp->fw_health;
10183 struct hwrm_error_recovery_qcfg_output *resp;
10184 struct hwrm_error_recovery_qcfg_input *req;
10185 int rc, i;
10186
10187 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10188 return 0;
10189
10190 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10191 if (rc)
10192 return rc;
10193
10194 resp = hwrm_req_hold(bp, req);
10195 rc = hwrm_req_send(bp, req);
10196 if (rc)
10197 goto err_recovery_out;
10198 fw_health->flags = le32_to_cpu(resp->flags);
10199 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10200 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10201 rc = -EINVAL;
10202 goto err_recovery_out;
10203 }
10204 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10205 fw_health->master_func_wait_dsecs =
10206 le32_to_cpu(resp->master_func_wait_period);
10207 fw_health->normal_func_wait_dsecs =
10208 le32_to_cpu(resp->normal_func_wait_period);
10209 fw_health->post_reset_wait_dsecs =
10210 le32_to_cpu(resp->master_func_wait_period_after_reset);
10211 fw_health->post_reset_max_wait_dsecs =
10212 le32_to_cpu(resp->max_bailout_time_after_reset);
10213 fw_health->regs[BNXT_FW_HEALTH_REG] =
10214 le32_to_cpu(resp->fw_health_status_reg);
10215 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10216 le32_to_cpu(resp->fw_heartbeat_reg);
10217 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10218 le32_to_cpu(resp->fw_reset_cnt_reg);
10219 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10220 le32_to_cpu(resp->reset_inprogress_reg);
10221 fw_health->fw_reset_inprog_reg_mask =
10222 le32_to_cpu(resp->reset_inprogress_reg_mask);
10223 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10224 if (fw_health->fw_reset_seq_cnt >= 16) {
10225 rc = -EINVAL;
10226 goto err_recovery_out;
10227 }
10228 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10229 fw_health->fw_reset_seq_regs[i] =
10230 le32_to_cpu(resp->reset_reg[i]);
10231 fw_health->fw_reset_seq_vals[i] =
10232 le32_to_cpu(resp->reset_reg_val[i]);
10233 fw_health->fw_reset_seq_delay_msec[i] =
10234 resp->delay_after_reset[i];
10235 }
10236 err_recovery_out:
10237 hwrm_req_drop(bp, req);
10238 if (!rc)
10239 rc = bnxt_map_fw_health_regs(bp);
10240 if (rc)
10241 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10242 return rc;
10243 }
10244
bnxt_hwrm_func_reset(struct bnxt * bp)10245 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10246 {
10247 struct hwrm_func_reset_input *req;
10248 int rc;
10249
10250 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10251 if (rc)
10252 return rc;
10253
10254 req->enables = 0;
10255 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10256 return hwrm_req_send(bp, req);
10257 }
10258
bnxt_nvm_cfg_ver_get(struct bnxt * bp)10259 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10260 {
10261 struct hwrm_nvm_get_dev_info_output nvm_info;
10262
10263 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10264 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10265 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10266 nvm_info.nvm_cfg_ver_upd);
10267 }
10268
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)10269 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10270 {
10271 struct hwrm_queue_qportcfg_output *resp;
10272 struct hwrm_queue_qportcfg_input *req;
10273 u8 i, j, *qptr;
10274 bool no_rdma;
10275 int rc = 0;
10276
10277 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10278 if (rc)
10279 return rc;
10280
10281 resp = hwrm_req_hold(bp, req);
10282 rc = hwrm_req_send(bp, req);
10283 if (rc)
10284 goto qportcfg_exit;
10285
10286 if (!resp->max_configurable_queues) {
10287 rc = -EINVAL;
10288 goto qportcfg_exit;
10289 }
10290 bp->max_tc = resp->max_configurable_queues;
10291 bp->max_lltc = resp->max_configurable_lossless_queues;
10292 if (bp->max_tc > BNXT_MAX_QUEUE)
10293 bp->max_tc = BNXT_MAX_QUEUE;
10294
10295 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10296 qptr = &resp->queue_id0;
10297 for (i = 0, j = 0; i < bp->max_tc; i++) {
10298 bp->q_info[j].queue_id = *qptr;
10299 bp->q_ids[i] = *qptr++;
10300 bp->q_info[j].queue_profile = *qptr++;
10301 bp->tc_to_qidx[j] = j;
10302 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10303 (no_rdma && BNXT_PF(bp)))
10304 j++;
10305 }
10306 bp->max_q = bp->max_tc;
10307 bp->max_tc = max_t(u8, j, 1);
10308
10309 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10310 bp->max_tc = 1;
10311
10312 if (bp->max_lltc > bp->max_tc)
10313 bp->max_lltc = bp->max_tc;
10314
10315 qportcfg_exit:
10316 hwrm_req_drop(bp, req);
10317 return rc;
10318 }
10319
bnxt_hwrm_poll(struct bnxt * bp)10320 static int bnxt_hwrm_poll(struct bnxt *bp)
10321 {
10322 struct hwrm_ver_get_input *req;
10323 int rc;
10324
10325 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10326 if (rc)
10327 return rc;
10328
10329 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10330 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10331 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10332
10333 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10334 rc = hwrm_req_send(bp, req);
10335 return rc;
10336 }
10337
bnxt_hwrm_ver_get(struct bnxt * bp)10338 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10339 {
10340 struct hwrm_ver_get_output *resp;
10341 struct hwrm_ver_get_input *req;
10342 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10343 u32 dev_caps_cfg, hwrm_ver;
10344 int rc, len, max_tmo_secs;
10345
10346 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10347 if (rc)
10348 return rc;
10349
10350 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10351 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10352 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10353 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10354 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10355
10356 resp = hwrm_req_hold(bp, req);
10357 rc = hwrm_req_send(bp, req);
10358 if (rc)
10359 goto hwrm_ver_get_exit;
10360
10361 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10362
10363 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10364 resp->hwrm_intf_min_8b << 8 |
10365 resp->hwrm_intf_upd_8b;
10366 if (resp->hwrm_intf_maj_8b < 1) {
10367 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10368 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10369 resp->hwrm_intf_upd_8b);
10370 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10371 }
10372
10373 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10374 HWRM_VERSION_UPDATE;
10375
10376 if (bp->hwrm_spec_code > hwrm_ver)
10377 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10378 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10379 HWRM_VERSION_UPDATE);
10380 else
10381 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10382 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10383 resp->hwrm_intf_upd_8b);
10384
10385 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10386 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10387 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10388 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10389 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10390 len = FW_VER_STR_LEN;
10391 } else {
10392 fw_maj = resp->hwrm_fw_maj_8b;
10393 fw_min = resp->hwrm_fw_min_8b;
10394 fw_bld = resp->hwrm_fw_bld_8b;
10395 fw_rsv = resp->hwrm_fw_rsvd_8b;
10396 len = BC_HWRM_STR_LEN;
10397 }
10398 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10399 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10400 fw_rsv);
10401
10402 if (strlen(resp->active_pkg_name)) {
10403 int fw_ver_len = strlen(bp->fw_ver_str);
10404
10405 snprintf(bp->fw_ver_str + fw_ver_len,
10406 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10407 resp->active_pkg_name);
10408 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10409 }
10410
10411 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10412 if (!bp->hwrm_cmd_timeout)
10413 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10414 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10415 if (!bp->hwrm_cmd_max_timeout)
10416 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10417 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10418 #ifdef CONFIG_DETECT_HUNG_TASK
10419 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10420 max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10421 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10422 max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10423 }
10424 #endif
10425
10426 if (resp->hwrm_intf_maj_8b >= 1) {
10427 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10428 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10429 }
10430 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10431 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10432
10433 bp->chip_num = le16_to_cpu(resp->chip_num);
10434 bp->chip_rev = resp->chip_rev;
10435 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10436 !resp->chip_metal)
10437 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10438
10439 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10440 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10441 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10442 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10443
10444 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10445 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10446
10447 if (dev_caps_cfg &
10448 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10449 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10450
10451 if (dev_caps_cfg &
10452 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10453 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10454
10455 if (dev_caps_cfg &
10456 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10457 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10458
10459 hwrm_ver_get_exit:
10460 hwrm_req_drop(bp, req);
10461 return rc;
10462 }
10463
bnxt_hwrm_fw_set_time(struct bnxt * bp)10464 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10465 {
10466 struct hwrm_fw_set_time_input *req;
10467 struct tm tm;
10468 time64_t now = ktime_get_real_seconds();
10469 int rc;
10470
10471 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10472 bp->hwrm_spec_code < 0x10400)
10473 return -EOPNOTSUPP;
10474
10475 time64_to_tm(now, 0, &tm);
10476 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10477 if (rc)
10478 return rc;
10479
10480 req->year = cpu_to_le16(1900 + tm.tm_year);
10481 req->month = 1 + tm.tm_mon;
10482 req->day = tm.tm_mday;
10483 req->hour = tm.tm_hour;
10484 req->minute = tm.tm_min;
10485 req->second = tm.tm_sec;
10486 return hwrm_req_send(bp, req);
10487 }
10488
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10489 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10490 {
10491 u64 sw_tmp;
10492
10493 hw &= mask;
10494 sw_tmp = (*sw & ~mask) | hw;
10495 if (hw < (*sw & mask))
10496 sw_tmp += mask + 1;
10497 WRITE_ONCE(*sw, sw_tmp);
10498 }
10499
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10500 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10501 int count, bool ignore_zero)
10502 {
10503 int i;
10504
10505 for (i = 0; i < count; i++) {
10506 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10507
10508 if (ignore_zero && !hw)
10509 continue;
10510
10511 if (masks[i] == -1ULL)
10512 sw_stats[i] = hw;
10513 else
10514 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10515 }
10516 }
10517
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10518 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10519 {
10520 if (!stats->hw_stats)
10521 return;
10522
10523 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10524 stats->hw_masks, stats->len / 8, false);
10525 }
10526
bnxt_accumulate_all_stats(struct bnxt * bp)10527 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10528 {
10529 struct bnxt_stats_mem *ring0_stats;
10530 bool ignore_zero = false;
10531 int i;
10532
10533 /* Chip bug. Counter intermittently becomes 0. */
10534 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10535 ignore_zero = true;
10536
10537 for (i = 0; i < bp->cp_nr_rings; i++) {
10538 struct bnxt_napi *bnapi = bp->bnapi[i];
10539 struct bnxt_cp_ring_info *cpr;
10540 struct bnxt_stats_mem *stats;
10541
10542 cpr = &bnapi->cp_ring;
10543 stats = &cpr->stats;
10544 if (!i)
10545 ring0_stats = stats;
10546 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10547 ring0_stats->hw_masks,
10548 ring0_stats->len / 8, ignore_zero);
10549 }
10550 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10551 struct bnxt_stats_mem *stats = &bp->port_stats;
10552 __le64 *hw_stats = stats->hw_stats;
10553 u64 *sw_stats = stats->sw_stats;
10554 u64 *masks = stats->hw_masks;
10555 int cnt;
10556
10557 cnt = sizeof(struct rx_port_stats) / 8;
10558 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10559
10560 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10561 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10562 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10563 cnt = sizeof(struct tx_port_stats) / 8;
10564 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10565 }
10566 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10567 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10568 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10569 }
10570 }
10571
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10572 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10573 {
10574 struct hwrm_port_qstats_input *req;
10575 struct bnxt_pf_info *pf = &bp->pf;
10576 int rc;
10577
10578 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10579 return 0;
10580
10581 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10582 return -EOPNOTSUPP;
10583
10584 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10585 if (rc)
10586 return rc;
10587
10588 req->flags = flags;
10589 req->port_id = cpu_to_le16(pf->port_id);
10590 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10591 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10592 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10593 return hwrm_req_send(bp, req);
10594 }
10595
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10596 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10597 {
10598 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10599 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10600 struct hwrm_port_qstats_ext_output *resp_qs;
10601 struct hwrm_port_qstats_ext_input *req_qs;
10602 struct bnxt_pf_info *pf = &bp->pf;
10603 u32 tx_stat_size;
10604 int rc;
10605
10606 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10607 return 0;
10608
10609 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10610 return -EOPNOTSUPP;
10611
10612 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10613 if (rc)
10614 return rc;
10615
10616 req_qs->flags = flags;
10617 req_qs->port_id = cpu_to_le16(pf->port_id);
10618 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10619 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10620 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10621 sizeof(struct tx_port_stats_ext) : 0;
10622 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10623 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10624 resp_qs = hwrm_req_hold(bp, req_qs);
10625 rc = hwrm_req_send(bp, req_qs);
10626 if (!rc) {
10627 bp->fw_rx_stats_ext_size =
10628 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10629 if (BNXT_FW_MAJ(bp) < 220 &&
10630 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10631 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10632
10633 bp->fw_tx_stats_ext_size = tx_stat_size ?
10634 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10635 } else {
10636 bp->fw_rx_stats_ext_size = 0;
10637 bp->fw_tx_stats_ext_size = 0;
10638 }
10639 hwrm_req_drop(bp, req_qs);
10640
10641 if (flags)
10642 return rc;
10643
10644 if (bp->fw_tx_stats_ext_size <=
10645 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10646 bp->pri2cos_valid = 0;
10647 return rc;
10648 }
10649
10650 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10651 if (rc)
10652 return rc;
10653
10654 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10655
10656 resp_qc = hwrm_req_hold(bp, req_qc);
10657 rc = hwrm_req_send(bp, req_qc);
10658 if (!rc) {
10659 u8 *pri2cos;
10660 int i, j;
10661
10662 pri2cos = &resp_qc->pri0_cos_queue_id;
10663 for (i = 0; i < 8; i++) {
10664 u8 queue_id = pri2cos[i];
10665 u8 queue_idx;
10666
10667 /* Per port queue IDs start from 0, 10, 20, etc */
10668 queue_idx = queue_id % 10;
10669 if (queue_idx > BNXT_MAX_QUEUE) {
10670 bp->pri2cos_valid = false;
10671 hwrm_req_drop(bp, req_qc);
10672 return rc;
10673 }
10674 for (j = 0; j < bp->max_q; j++) {
10675 if (bp->q_ids[j] == queue_id)
10676 bp->pri2cos_idx[i] = queue_idx;
10677 }
10678 }
10679 bp->pri2cos_valid = true;
10680 }
10681 hwrm_req_drop(bp, req_qc);
10682
10683 return rc;
10684 }
10685
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10686 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10687 {
10688 bnxt_hwrm_tunnel_dst_port_free(bp,
10689 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10690 bnxt_hwrm_tunnel_dst_port_free(bp,
10691 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10692 }
10693
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10694 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10695 {
10696 int rc, i;
10697 u32 tpa_flags = 0;
10698
10699 if (set_tpa)
10700 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10701 else if (BNXT_NO_FW_ACCESS(bp))
10702 return 0;
10703 for (i = 0; i < bp->nr_vnics; i++) {
10704 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10705 if (rc) {
10706 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10707 i, rc);
10708 return rc;
10709 }
10710 }
10711 return 0;
10712 }
10713
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10714 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10715 {
10716 int i;
10717
10718 for (i = 0; i < bp->nr_vnics; i++)
10719 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10720 }
10721
bnxt_clear_vnic(struct bnxt * bp)10722 static void bnxt_clear_vnic(struct bnxt *bp)
10723 {
10724 if (!bp->vnic_info)
10725 return;
10726
10727 bnxt_hwrm_clear_vnic_filter(bp);
10728 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10729 /* clear all RSS setting before free vnic ctx */
10730 bnxt_hwrm_clear_vnic_rss(bp);
10731 bnxt_hwrm_vnic_ctx_free(bp);
10732 }
10733 /* before free the vnic, undo the vnic tpa settings */
10734 if (bp->flags & BNXT_FLAG_TPA)
10735 bnxt_set_tpa(bp, false);
10736 bnxt_hwrm_vnic_free(bp);
10737 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10738 bnxt_hwrm_vnic_ctx_free(bp);
10739 }
10740
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10741 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10742 bool irq_re_init)
10743 {
10744 bnxt_clear_vnic(bp);
10745 bnxt_hwrm_ring_free(bp, close_path);
10746 bnxt_hwrm_ring_grp_free(bp);
10747 if (irq_re_init) {
10748 bnxt_hwrm_stat_ctx_free(bp);
10749 bnxt_hwrm_free_tunnel_ports(bp);
10750 }
10751 }
10752
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10753 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10754 {
10755 struct hwrm_func_cfg_input *req;
10756 u8 evb_mode;
10757 int rc;
10758
10759 if (br_mode == BRIDGE_MODE_VEB)
10760 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10761 else if (br_mode == BRIDGE_MODE_VEPA)
10762 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10763 else
10764 return -EINVAL;
10765
10766 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10767 if (rc)
10768 return rc;
10769
10770 req->fid = cpu_to_le16(0xffff);
10771 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10772 req->evb_mode = evb_mode;
10773 return hwrm_req_send(bp, req);
10774 }
10775
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10776 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10777 {
10778 struct hwrm_func_cfg_input *req;
10779 int rc;
10780
10781 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10782 return 0;
10783
10784 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10785 if (rc)
10786 return rc;
10787
10788 req->fid = cpu_to_le16(0xffff);
10789 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10790 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10791 if (size == 128)
10792 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10793
10794 return hwrm_req_send(bp, req);
10795 }
10796
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10797 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10798 {
10799 int rc;
10800
10801 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10802 goto skip_rss_ctx;
10803
10804 /* allocate context for vnic */
10805 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10806 if (rc) {
10807 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10808 vnic->vnic_id, rc);
10809 goto vnic_setup_err;
10810 }
10811 bp->rsscos_nr_ctxs++;
10812
10813 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10814 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10815 if (rc) {
10816 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10817 vnic->vnic_id, rc);
10818 goto vnic_setup_err;
10819 }
10820 bp->rsscos_nr_ctxs++;
10821 }
10822
10823 skip_rss_ctx:
10824 /* configure default vnic, ring grp */
10825 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10826 if (rc) {
10827 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10828 vnic->vnic_id, rc);
10829 goto vnic_setup_err;
10830 }
10831
10832 /* Enable RSS hashing on vnic */
10833 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10834 if (rc) {
10835 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10836 vnic->vnic_id, rc);
10837 goto vnic_setup_err;
10838 }
10839
10840 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10841 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10842 if (rc) {
10843 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10844 vnic->vnic_id, rc);
10845 }
10846 }
10847
10848 vnic_setup_err:
10849 return rc;
10850 }
10851
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10852 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10853 u8 valid)
10854 {
10855 struct hwrm_vnic_update_input *req;
10856 int rc;
10857
10858 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10859 if (rc)
10860 return rc;
10861
10862 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10863
10864 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10865 req->mru = cpu_to_le16(vnic->mru);
10866
10867 req->enables = cpu_to_le32(valid);
10868
10869 return hwrm_req_send(bp, req);
10870 }
10871
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10872 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10873 {
10874 int rc;
10875
10876 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10877 if (rc) {
10878 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10879 vnic->vnic_id, rc);
10880 return rc;
10881 }
10882 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10883 if (rc)
10884 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10885 vnic->vnic_id, rc);
10886 return rc;
10887 }
10888
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10889 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10890 {
10891 int rc, i, nr_ctxs;
10892
10893 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10894 for (i = 0; i < nr_ctxs; i++) {
10895 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10896 if (rc) {
10897 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10898 vnic->vnic_id, i, rc);
10899 break;
10900 }
10901 bp->rsscos_nr_ctxs++;
10902 }
10903 if (i < nr_ctxs)
10904 return -ENOMEM;
10905
10906 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10907 if (rc)
10908 return rc;
10909
10910 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10911 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10912 if (rc) {
10913 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10914 vnic->vnic_id, rc);
10915 }
10916 }
10917 return rc;
10918 }
10919
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10920 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10921 {
10922 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10923 return __bnxt_setup_vnic_p5(bp, vnic);
10924 else
10925 return __bnxt_setup_vnic(bp, vnic);
10926 }
10927
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10928 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10929 struct bnxt_vnic_info *vnic,
10930 u16 start_rx_ring_idx, int rx_rings)
10931 {
10932 int rc;
10933
10934 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10935 if (rc) {
10936 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10937 vnic->vnic_id, rc);
10938 return rc;
10939 }
10940 return bnxt_setup_vnic(bp, vnic);
10941 }
10942
bnxt_alloc_rfs_vnics(struct bnxt * bp)10943 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10944 {
10945 struct bnxt_vnic_info *vnic;
10946 int i, rc = 0;
10947
10948 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10949 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10950 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10951 }
10952
10953 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10954 return 0;
10955
10956 for (i = 0; i < bp->rx_nr_rings; i++) {
10957 u16 vnic_id = i + 1;
10958 u16 ring_id = i;
10959
10960 if (vnic_id >= bp->nr_vnics)
10961 break;
10962
10963 vnic = &bp->vnic_info[vnic_id];
10964 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10965 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10966 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10967 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10968 break;
10969 }
10970 return rc;
10971 }
10972
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10973 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10974 bool all)
10975 {
10976 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10977 struct bnxt_filter_base *usr_fltr, *tmp;
10978 struct bnxt_ntuple_filter *ntp_fltr;
10979 int i;
10980
10981 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10982 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10983 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10984 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10985 }
10986 if (!all)
10987 return;
10988
10989 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10990 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10991 usr_fltr->fw_vnic_id == rss_ctx->index) {
10992 ntp_fltr = container_of(usr_fltr,
10993 struct bnxt_ntuple_filter,
10994 base);
10995 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10996 bnxt_del_ntp_filter(bp, ntp_fltr);
10997 bnxt_del_one_usr_fltr(bp, usr_fltr);
10998 }
10999 }
11000
11001 if (vnic->rss_table)
11002 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
11003 vnic->rss_table,
11004 vnic->rss_table_dma_addr);
11005 bp->num_rss_ctx--;
11006 }
11007
bnxt_vnic_has_rx_ring(struct bnxt * bp,struct bnxt_vnic_info * vnic,int rxr_id)11008 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
11009 int rxr_id)
11010 {
11011 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
11012 int i, vnic_rx;
11013
11014 /* Ntuple VNIC always has all the rx rings. Any change of ring id
11015 * must be updated because a future filter may use it.
11016 */
11017 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
11018 return true;
11019
11020 for (i = 0; i < tbl_size; i++) {
11021 if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
11022 vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
11023 else
11024 vnic_rx = bp->rss_indir_tbl[i];
11025
11026 if (rxr_id == vnic_rx)
11027 return true;
11028 }
11029
11030 return false;
11031 }
11032
bnxt_set_vnic_mru_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 mru,int rxr_id)11033 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
11034 u16 mru, int rxr_id)
11035 {
11036 int rc;
11037
11038 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
11039 return 0;
11040
11041 if (mru) {
11042 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
11043 if (rc) {
11044 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
11045 vnic->vnic_id, rc);
11046 return rc;
11047 }
11048 }
11049 vnic->mru = mru;
11050 bnxt_hwrm_vnic_update(bp, vnic,
11051 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
11052
11053 return 0;
11054 }
11055
bnxt_set_rss_ctx_vnic_mru(struct bnxt * bp,u16 mru,int rxr_id)11056 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
11057 {
11058 struct ethtool_rxfh_context *ctx;
11059 unsigned long context;
11060 int rc;
11061
11062 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11063 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11064 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
11065
11066 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
11067 if (rc)
11068 return rc;
11069 }
11070
11071 return 0;
11072 }
11073
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)11074 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
11075 {
11076 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
11077 struct ethtool_rxfh_context *ctx;
11078 unsigned long context;
11079
11080 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11081 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11082 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
11083
11084 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
11085 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
11086 __bnxt_setup_vnic_p5(bp, vnic)) {
11087 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
11088 rss_ctx->index);
11089 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
11090 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
11091 }
11092 }
11093 }
11094
bnxt_clear_rss_ctxs(struct bnxt * bp)11095 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
11096 {
11097 struct ethtool_rxfh_context *ctx;
11098 unsigned long context;
11099
11100 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11101 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11102
11103 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
11104 }
11105 }
11106
11107 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)11108 static bool bnxt_promisc_ok(struct bnxt *bp)
11109 {
11110 #ifdef CONFIG_BNXT_SRIOV
11111 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
11112 return false;
11113 #endif
11114 return true;
11115 }
11116
bnxt_setup_nitroa0_vnic(struct bnxt * bp)11117 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
11118 {
11119 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
11120 unsigned int rc = 0;
11121
11122 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
11123 if (rc) {
11124 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11125 rc);
11126 return rc;
11127 }
11128
11129 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
11130 if (rc) {
11131 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11132 rc);
11133 return rc;
11134 }
11135 return rc;
11136 }
11137
11138 static int bnxt_cfg_rx_mode(struct bnxt *, struct netdev_hw_addr_list *, bool);
11139 static bool bnxt_mc_list_updated(struct bnxt *, u32 *,
11140 const struct netdev_hw_addr_list *);
11141
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)11142 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
11143 {
11144 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
11145 int rc = 0;
11146 unsigned int rx_nr_rings = bp->rx_nr_rings;
11147
11148 if (irq_re_init) {
11149 rc = bnxt_hwrm_stat_ctx_alloc(bp);
11150 if (rc) {
11151 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
11152 rc);
11153 goto err_out;
11154 }
11155 }
11156
11157 rc = bnxt_hwrm_ring_alloc(bp);
11158 if (rc) {
11159 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
11160 goto err_out;
11161 }
11162
11163 rc = bnxt_hwrm_ring_grp_alloc(bp);
11164 if (rc) {
11165 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
11166 goto err_out;
11167 }
11168
11169 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11170 rx_nr_rings--;
11171
11172 /* default vnic 0 */
11173 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
11174 if (rc) {
11175 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
11176 goto err_out;
11177 }
11178
11179 if (BNXT_VF(bp))
11180 bnxt_hwrm_func_qcfg(bp);
11181
11182 rc = bnxt_setup_vnic(bp, vnic);
11183 if (rc)
11184 goto err_out;
11185 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11186 bnxt_hwrm_update_rss_hash_cfg(bp);
11187
11188 if (bp->flags & BNXT_FLAG_RFS) {
11189 rc = bnxt_alloc_rfs_vnics(bp);
11190 if (rc)
11191 goto err_out;
11192 }
11193
11194 if (bp->flags & BNXT_FLAG_TPA) {
11195 rc = bnxt_set_tpa(bp, true);
11196 if (rc)
11197 goto err_out;
11198 }
11199
11200 if (BNXT_VF(bp))
11201 bnxt_update_vf_mac(bp);
11202
11203 /* Filter for default vnic 0 */
11204 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11205 if (rc) {
11206 if (BNXT_VF(bp) && rc == -ENODEV)
11207 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11208 else
11209 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11210 goto err_out;
11211 }
11212 vnic->uc_filter_count = 1;
11213
11214 vnic->rx_mask = 0;
11215 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11216 goto skip_rx_mask;
11217
11218 if (bp->dev->flags & IFF_BROADCAST)
11219 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11220
11221 if (bp->dev->flags & IFF_PROMISC)
11222 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11223
11224 if (bp->dev->flags & IFF_ALLMULTI) {
11225 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11226 vnic->mc_list_count = 0;
11227 } else if (bp->dev->flags & IFF_MULTICAST) {
11228 u32 mask = 0;
11229
11230 bnxt_mc_list_updated(bp, &mask, &bp->dev->mc);
11231 vnic->rx_mask |= mask;
11232 }
11233
11234 rc = bnxt_cfg_rx_mode(bp, &bp->dev->uc, true);
11235 if (rc)
11236 goto err_out;
11237
11238 skip_rx_mask:
11239 rc = bnxt_hwrm_set_coal(bp);
11240 if (rc)
11241 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11242 rc);
11243
11244 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11245 rc = bnxt_setup_nitroa0_vnic(bp);
11246 if (rc)
11247 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11248 rc);
11249 }
11250
11251 if (BNXT_VF(bp)) {
11252 bnxt_hwrm_func_qcfg(bp);
11253 netdev_update_features(bp->dev);
11254 }
11255
11256 return 0;
11257
11258 err_out:
11259 bnxt_hwrm_resource_free(bp, 0, true);
11260
11261 return rc;
11262 }
11263
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)11264 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11265 {
11266 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11267 return 0;
11268 }
11269
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)11270 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11271 {
11272 bnxt_init_cp_rings(bp);
11273 bnxt_init_rx_rings(bp);
11274 bnxt_init_tx_rings(bp);
11275 bnxt_init_ring_grps(bp, irq_re_init);
11276 bnxt_init_vnics(bp);
11277
11278 return bnxt_init_chip(bp, irq_re_init);
11279 }
11280
bnxt_set_real_num_queues(struct bnxt * bp)11281 static int bnxt_set_real_num_queues(struct bnxt *bp)
11282 {
11283 int rc;
11284 struct net_device *dev = bp->dev;
11285
11286 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11287 bp->tx_nr_rings_xdp);
11288 if (rc)
11289 return rc;
11290
11291 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11292 if (rc)
11293 return rc;
11294
11295 #ifdef CONFIG_RFS_ACCEL
11296 if (bp->flags & BNXT_FLAG_RFS)
11297 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11298 #endif
11299
11300 return rc;
11301 }
11302
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)11303 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11304 bool shared)
11305 {
11306 int _rx = *rx, _tx = *tx;
11307
11308 if (shared) {
11309 *rx = min_t(int, _rx, max);
11310 *tx = min_t(int, _tx, max);
11311 } else {
11312 if (max < 2)
11313 return -ENOMEM;
11314
11315 while (_rx + _tx > max) {
11316 if (_rx > _tx && _rx > 1)
11317 _rx--;
11318 else if (_tx > 1)
11319 _tx--;
11320 }
11321 *rx = _rx;
11322 *tx = _tx;
11323 }
11324 return 0;
11325 }
11326
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)11327 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11328 {
11329 return (tx - tx_xdp) / tx_sets + tx_xdp;
11330 }
11331
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)11332 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11333 {
11334 int tcs = bp->num_tc;
11335
11336 if (!tcs)
11337 tcs = 1;
11338 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11339 }
11340
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)11341 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11342 {
11343 int tcs = bp->num_tc;
11344
11345 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11346 bp->tx_nr_rings_xdp;
11347 }
11348
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)11349 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11350 bool sh)
11351 {
11352 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11353
11354 if (tx_cp != *tx) {
11355 int tx_saved = tx_cp, rc;
11356
11357 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11358 if (rc)
11359 return rc;
11360 if (tx_cp != tx_saved)
11361 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
11362 return 0;
11363 }
11364 return __bnxt_trim_rings(bp, rx, tx, max, sh);
11365 }
11366
bnxt_setup_msix(struct bnxt * bp)11367 static void bnxt_setup_msix(struct bnxt *bp)
11368 {
11369 const int len = sizeof(bp->irq_tbl[0].name);
11370 struct net_device *dev = bp->dev;
11371 int tcs, i;
11372
11373 tcs = bp->num_tc;
11374 if (tcs) {
11375 int i, off, count;
11376
11377 for (i = 0; i < tcs; i++) {
11378 count = bp->tx_nr_rings_per_tc;
11379 off = BNXT_TC_TO_RING_BASE(bp, i);
11380 netdev_set_tc_queue(dev, i, count, off);
11381 }
11382 }
11383
11384 for (i = 0; i < bp->cp_nr_rings; i++) {
11385 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11386 char *attr;
11387
11388 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11389 attr = "TxRx";
11390 else if (i < bp->rx_nr_rings)
11391 attr = "rx";
11392 else
11393 attr = "tx";
11394
11395 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11396 attr, i);
11397 bp->irq_tbl[map_idx].handler = bnxt_msix;
11398 }
11399 }
11400
11401 static int bnxt_init_int_mode(struct bnxt *bp);
11402
bnxt_change_msix(struct bnxt * bp,int total)11403 static int bnxt_change_msix(struct bnxt *bp, int total)
11404 {
11405 struct msi_map map;
11406 int i;
11407
11408 /* add MSIX to the end if needed */
11409 for (i = bp->total_irqs; i < total; i++) {
11410 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11411 if (map.index < 0)
11412 return bp->total_irqs;
11413 bp->irq_tbl[i].vector = map.virq;
11414 bp->total_irqs++;
11415 }
11416
11417 /* trim MSIX from the end if needed */
11418 for (i = bp->total_irqs; i > total; i--) {
11419 map.index = i - 1;
11420 map.virq = bp->irq_tbl[i - 1].vector;
11421 pci_msix_free_irq(bp->pdev, map);
11422 bp->total_irqs--;
11423 }
11424 return bp->total_irqs;
11425 }
11426
bnxt_setup_int_mode(struct bnxt * bp)11427 static int bnxt_setup_int_mode(struct bnxt *bp)
11428 {
11429 int rc;
11430
11431 if (!bp->irq_tbl) {
11432 rc = bnxt_init_int_mode(bp);
11433 if (rc || !bp->irq_tbl)
11434 return rc ?: -ENODEV;
11435 }
11436
11437 bnxt_setup_msix(bp);
11438
11439 rc = bnxt_set_real_num_queues(bp);
11440 return rc;
11441 }
11442
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11443 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11444 {
11445 return bp->hw_resc.max_rsscos_ctxs;
11446 }
11447
bnxt_get_max_func_vnics(struct bnxt * bp)11448 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11449 {
11450 return bp->hw_resc.max_vnics;
11451 }
11452
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11453 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11454 {
11455 return bp->hw_resc.max_stat_ctxs;
11456 }
11457
bnxt_get_max_func_cp_rings(struct bnxt * bp)11458 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11459 {
11460 return bp->hw_resc.max_cp_rings;
11461 }
11462
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11463 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11464 {
11465 unsigned int cp = bp->hw_resc.max_cp_rings;
11466
11467 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11468 cp -= bnxt_get_ulp_msix_num(bp);
11469
11470 return cp;
11471 }
11472
bnxt_get_max_func_irqs(struct bnxt * bp)11473 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11474 {
11475 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11476
11477 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11478 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11479
11480 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11481 }
11482
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11483 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11484 {
11485 bp->hw_resc.max_irqs = max_irqs;
11486 }
11487
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11488 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11489 {
11490 unsigned int cp;
11491
11492 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11493 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11494 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11495 else
11496 return cp - bp->cp_nr_rings;
11497 }
11498
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11499 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11500 {
11501 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11502 }
11503
bnxt_get_avail_msix(struct bnxt * bp,int num)11504 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11505 {
11506 int max_irq = bnxt_get_max_func_irqs(bp);
11507 int total_req = bp->cp_nr_rings + num;
11508
11509 if (max_irq < total_req) {
11510 num = max_irq - bp->cp_nr_rings;
11511 if (num <= 0)
11512 return 0;
11513 }
11514 return num;
11515 }
11516
bnxt_get_num_msix(struct bnxt * bp)11517 static int bnxt_get_num_msix(struct bnxt *bp)
11518 {
11519 if (!BNXT_NEW_RM(bp))
11520 return bnxt_get_max_func_irqs(bp);
11521
11522 return bnxt_nq_rings_in_use(bp);
11523 }
11524
bnxt_init_int_mode(struct bnxt * bp)11525 static int bnxt_init_int_mode(struct bnxt *bp)
11526 {
11527 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11528
11529 total_vecs = bnxt_get_num_msix(bp);
11530 max = bnxt_get_max_func_irqs(bp);
11531 if (total_vecs > max)
11532 total_vecs = max;
11533
11534 if (!total_vecs)
11535 return 0;
11536
11537 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11538 min = 2;
11539
11540 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11541 PCI_IRQ_MSIX);
11542 ulp_msix = bnxt_get_ulp_msix_num(bp);
11543 if (total_vecs < 0 || total_vecs < ulp_msix) {
11544 rc = -ENODEV;
11545 goto msix_setup_exit;
11546 }
11547
11548 tbl_size = total_vecs;
11549 if (pci_msix_can_alloc_dyn(bp->pdev))
11550 tbl_size = max;
11551 bp->irq_tbl = kzalloc_objs(*bp->irq_tbl, tbl_size);
11552 if (bp->irq_tbl) {
11553 for (i = 0; i < total_vecs; i++)
11554 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11555
11556 bp->total_irqs = total_vecs;
11557 /* Trim rings based upon num of vectors allocated */
11558 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11559 total_vecs - ulp_msix, min == 1);
11560 if (rc)
11561 goto msix_setup_exit;
11562
11563 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11564 bp->cp_nr_rings = (min == 1) ?
11565 max_t(int, tx_cp, bp->rx_nr_rings) :
11566 tx_cp + bp->rx_nr_rings;
11567
11568 } else {
11569 rc = -ENOMEM;
11570 goto msix_setup_exit;
11571 }
11572 return 0;
11573
11574 msix_setup_exit:
11575 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11576 kfree(bp->irq_tbl);
11577 bp->irq_tbl = NULL;
11578 pci_free_irq_vectors(bp->pdev);
11579 return rc;
11580 }
11581
bnxt_clear_int_mode(struct bnxt * bp)11582 static void bnxt_clear_int_mode(struct bnxt *bp)
11583 {
11584 pci_free_irq_vectors(bp->pdev);
11585
11586 kfree(bp->irq_tbl);
11587 bp->irq_tbl = NULL;
11588 }
11589
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11590 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11591 {
11592 struct bnxt_en_dev *edev = bp->edev[BNXT_AUXDEV_RDMA];
11593 bool irq_cleared = false;
11594 bool irq_change = false;
11595 int tcs = bp->num_tc;
11596 int irqs_required;
11597 int rc;
11598
11599 if (!bnxt_need_reserve_rings(bp))
11600 return 0;
11601
11602 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(edev)) {
11603 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11604
11605 if (ulp_msix > bp->ulp_num_msix_want)
11606 ulp_msix = bp->ulp_num_msix_want;
11607 irqs_required = ulp_msix + bp->cp_nr_rings;
11608 } else {
11609 irqs_required = bnxt_get_num_msix(bp);
11610 }
11611
11612 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11613 irq_change = true;
11614 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11615 bnxt_ulp_irq_stop(bp);
11616 bnxt_clear_int_mode(bp);
11617 irq_cleared = true;
11618 }
11619 }
11620 rc = __bnxt_reserve_rings(bp);
11621 if (irq_cleared) {
11622 if (!rc)
11623 rc = bnxt_init_int_mode(bp);
11624 bnxt_ulp_irq_restart(bp, rc);
11625 } else if (irq_change && !rc) {
11626 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11627 rc = -ENOSPC;
11628 }
11629 if (rc) {
11630 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11631 return rc;
11632 }
11633 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11634 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11635 netdev_err(bp->dev, "tx ring reservation failure\n");
11636 netdev_reset_tc(bp->dev);
11637 bp->num_tc = 0;
11638 if (bp->tx_nr_rings_xdp)
11639 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11640 else
11641 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11642 return -ENOMEM;
11643 }
11644 return 0;
11645 }
11646
bnxt_tx_queue_stop(struct bnxt * bp,int idx)11647 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11648 {
11649 struct bnxt_tx_ring_info *txr;
11650 struct netdev_queue *txq;
11651 struct bnxt_napi *bnapi;
11652 int i;
11653
11654 bnapi = bp->bnapi[idx];
11655 bnxt_for_each_napi_tx(i, bnapi, txr) {
11656 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11657 synchronize_net();
11658
11659 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11660 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11661 if (txq) {
11662 __netif_tx_lock_bh(txq);
11663 netif_tx_stop_queue(txq);
11664 __netif_tx_unlock_bh(txq);
11665 }
11666 }
11667
11668 if (!bp->tph_mode)
11669 continue;
11670
11671 bnxt_hwrm_tx_ring_free(bp, txr, true);
11672 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11673 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11674 bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11675 }
11676 }
11677
bnxt_tx_queue_start(struct bnxt * bp,int idx)11678 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11679 {
11680 struct bnxt_tx_ring_info *txr;
11681 struct netdev_queue *txq;
11682 struct bnxt_napi *bnapi;
11683 int rc, i;
11684
11685 bnapi = bp->bnapi[idx];
11686 /* All rings have been reserved and previously allocated.
11687 * Reallocating with the same parameters should never fail.
11688 */
11689 bnxt_for_each_napi_tx(i, bnapi, txr) {
11690 if (!bp->tph_mode)
11691 goto start_tx;
11692
11693 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11694 if (rc)
11695 return rc;
11696
11697 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11698 if (rc)
11699 return rc;
11700
11701 txr->tx_prod = 0;
11702 txr->tx_cons = 0;
11703 txr->tx_hw_cons = 0;
11704 start_tx:
11705 WRITE_ONCE(txr->dev_state, 0);
11706 synchronize_net();
11707
11708 if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11709 continue;
11710
11711 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11712 if (txq)
11713 netif_tx_start_queue(txq);
11714 }
11715
11716 return 0;
11717 }
11718
bnxt_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)11719 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11720 const cpumask_t *mask)
11721 {
11722 struct bnxt_irq *irq;
11723 u16 tag;
11724 int err;
11725
11726 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11727
11728 if (!irq->bp->tph_mode)
11729 return;
11730
11731 cpumask_copy(irq->cpu_mask, mask);
11732
11733 if (irq->ring_nr >= irq->bp->rx_nr_rings)
11734 return;
11735
11736 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11737 cpumask_first(irq->cpu_mask), &tag))
11738 return;
11739
11740 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11741 return;
11742
11743 netdev_lock(irq->bp->dev);
11744 if (netif_running(irq->bp->dev)) {
11745 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11746 if (err)
11747 netdev_err(irq->bp->dev,
11748 "RX queue restart failed: err=%d\n", err);
11749 }
11750 netdev_unlock(irq->bp->dev);
11751 }
11752
bnxt_irq_affinity_release(struct kref * ref)11753 static void bnxt_irq_affinity_release(struct kref *ref)
11754 {
11755 struct irq_affinity_notify *notify =
11756 container_of(ref, struct irq_affinity_notify, kref);
11757 struct bnxt_irq *irq;
11758
11759 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11760
11761 if (!irq->bp->tph_mode)
11762 return;
11763
11764 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11765 netdev_err(irq->bp->dev,
11766 "Setting ST=0 for MSIX entry %d failed\n",
11767 irq->msix_nr);
11768 return;
11769 }
11770 }
11771
bnxt_release_irq_notifier(struct bnxt_irq * irq)11772 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11773 {
11774 irq_set_affinity_notifier(irq->vector, NULL);
11775 }
11776
bnxt_register_irq_notifier(struct bnxt * bp,struct bnxt_irq * irq)11777 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11778 {
11779 struct irq_affinity_notify *notify;
11780
11781 irq->bp = bp;
11782
11783 /* Nothing to do if TPH is not enabled */
11784 if (!bp->tph_mode)
11785 return;
11786
11787 /* Register IRQ affinity notifier */
11788 notify = &irq->affinity_notify;
11789 notify->irq = irq->vector;
11790 notify->notify = bnxt_irq_affinity_notify;
11791 notify->release = bnxt_irq_affinity_release;
11792
11793 irq_set_affinity_notifier(irq->vector, notify);
11794 }
11795
bnxt_free_irq(struct bnxt * bp)11796 static void bnxt_free_irq(struct bnxt *bp)
11797 {
11798 struct bnxt_irq *irq;
11799 int i;
11800
11801 #ifdef CONFIG_RFS_ACCEL
11802 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11803 bp->dev->rx_cpu_rmap = NULL;
11804 #endif
11805 if (!bp->irq_tbl || !bp->bnapi)
11806 return;
11807
11808 for (i = 0; i < bp->cp_nr_rings; i++) {
11809 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11810
11811 irq = &bp->irq_tbl[map_idx];
11812 if (irq->requested) {
11813 if (irq->have_cpumask) {
11814 irq_update_affinity_hint(irq->vector, NULL);
11815 free_cpumask_var(irq->cpu_mask);
11816 irq->have_cpumask = 0;
11817 }
11818
11819 bnxt_release_irq_notifier(irq);
11820
11821 free_irq(irq->vector, bp->bnapi[i]);
11822 }
11823
11824 irq->requested = 0;
11825 }
11826
11827 /* Disable TPH support */
11828 pcie_disable_tph(bp->pdev);
11829 bp->tph_mode = 0;
11830 }
11831
bnxt_request_irq(struct bnxt * bp)11832 static int bnxt_request_irq(struct bnxt *bp)
11833 {
11834 struct cpu_rmap *rmap = NULL;
11835 int i, j, rc = 0;
11836 unsigned long flags = 0;
11837
11838 rc = bnxt_setup_int_mode(bp);
11839 if (rc) {
11840 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11841 rc);
11842 return rc;
11843 }
11844 #ifdef CONFIG_RFS_ACCEL
11845 rmap = bp->dev->rx_cpu_rmap;
11846 #endif
11847
11848 /* Enable TPH support as part of IRQ request */
11849 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11850 if (!rc)
11851 bp->tph_mode = PCI_TPH_ST_IV_MODE;
11852
11853 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11854 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11855 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11856
11857 if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11858 rmap && bp->bnapi[i]->rx_ring) {
11859 rc = irq_cpu_rmap_add(rmap, irq->vector);
11860 if (rc)
11861 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11862 j);
11863 j++;
11864 }
11865
11866 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11867 bp->bnapi[i]);
11868 if (rc)
11869 break;
11870
11871 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11872 irq->requested = 1;
11873
11874 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11875 int numa_node = dev_to_node(&bp->pdev->dev);
11876 u16 tag;
11877
11878 irq->have_cpumask = 1;
11879 irq->msix_nr = map_idx;
11880 irq->ring_nr = i;
11881 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11882 irq->cpu_mask);
11883 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11884 if (rc) {
11885 netdev_warn(bp->dev,
11886 "Update affinity hint failed, IRQ = %d\n",
11887 irq->vector);
11888 break;
11889 }
11890
11891 bnxt_register_irq_notifier(bp, irq);
11892
11893 /* Init ST table entry */
11894 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11895 cpumask_first(irq->cpu_mask),
11896 &tag))
11897 continue;
11898
11899 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11900 }
11901 }
11902 return rc;
11903 }
11904
bnxt_del_napi(struct bnxt * bp)11905 static void bnxt_del_napi(struct bnxt *bp)
11906 {
11907 int i;
11908
11909 if (!bp->bnapi)
11910 return;
11911
11912 for (i = 0; i < bp->rx_nr_rings; i++)
11913 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11914 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11915 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11916
11917 for (i = 0; i < bp->cp_nr_rings; i++) {
11918 struct bnxt_napi *bnapi = bp->bnapi[i];
11919
11920 __netif_napi_del_locked(&bnapi->napi);
11921 }
11922 /* We called __netif_napi_del_locked(), we need
11923 * to respect an RCU grace period before freeing napi structures.
11924 */
11925 synchronize_net();
11926 }
11927
bnxt_init_napi(struct bnxt * bp)11928 static void bnxt_init_napi(struct bnxt *bp)
11929 {
11930 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11931 unsigned int cp_nr_rings = bp->cp_nr_rings;
11932 struct bnxt_napi *bnapi;
11933 int i;
11934
11935 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11936 poll_fn = bnxt_poll_p5;
11937 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11938 cp_nr_rings--;
11939
11940 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11941
11942 for (i = 0; i < cp_nr_rings; i++) {
11943 bnapi = bp->bnapi[i];
11944 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11945 bnapi->index);
11946 }
11947 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11948 bnapi = bp->bnapi[cp_nr_rings];
11949 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11950 }
11951 }
11952
bnxt_disable_napi(struct bnxt * bp)11953 static void bnxt_disable_napi(struct bnxt *bp)
11954 {
11955 int i;
11956
11957 if (!bp->bnapi ||
11958 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11959 return;
11960
11961 for (i = 0; i < bp->cp_nr_rings; i++) {
11962 struct bnxt_napi *bnapi = bp->bnapi[i];
11963 struct bnxt_cp_ring_info *cpr;
11964
11965 cpr = &bnapi->cp_ring;
11966 if (bnapi->tx_fault)
11967 cpr->sw_stats->tx.tx_resets++;
11968 if (bnapi->in_reset)
11969 cpr->sw_stats->rx.rx_resets++;
11970 napi_disable_locked(&bnapi->napi);
11971 }
11972 }
11973
bnxt_enable_napi(struct bnxt * bp)11974 static void bnxt_enable_napi(struct bnxt *bp)
11975 {
11976 int i;
11977
11978 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11979 for (i = 0; i < bp->cp_nr_rings; i++) {
11980 struct bnxt_napi *bnapi = bp->bnapi[i];
11981 struct bnxt_cp_ring_info *cpr;
11982
11983 bnapi->tx_fault = 0;
11984
11985 cpr = &bnapi->cp_ring;
11986 bnapi->in_reset = false;
11987
11988 if (bnapi->rx_ring) {
11989 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11990 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11991 }
11992 napi_enable_locked(&bnapi->napi);
11993 }
11994 }
11995
bnxt_tx_disable(struct bnxt * bp)11996 void bnxt_tx_disable(struct bnxt *bp)
11997 {
11998 int i;
11999 struct bnxt_tx_ring_info *txr;
12000
12001 if (bp->tx_ring) {
12002 for (i = 0; i < bp->tx_nr_rings; i++) {
12003 txr = &bp->tx_ring[i];
12004 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
12005 }
12006 }
12007 /* Make sure napi polls see @dev_state change */
12008 synchronize_net();
12009 /* Drop carrier first to prevent TX timeout */
12010 netif_carrier_off(bp->dev);
12011 /* Stop all TX queues */
12012 netif_tx_disable(bp->dev);
12013 }
12014
bnxt_tx_enable(struct bnxt * bp)12015 void bnxt_tx_enable(struct bnxt *bp)
12016 {
12017 int i;
12018 struct bnxt_tx_ring_info *txr;
12019
12020 for (i = 0; i < bp->tx_nr_rings; i++) {
12021 txr = &bp->tx_ring[i];
12022 WRITE_ONCE(txr->dev_state, 0);
12023 }
12024 /* Make sure napi polls see @dev_state change */
12025 synchronize_net();
12026 netif_tx_wake_all_queues(bp->dev);
12027 if (BNXT_LINK_IS_UP(bp))
12028 netif_carrier_on(bp->dev);
12029 }
12030
bnxt_report_fec(struct bnxt_link_info * link_info)12031 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
12032 {
12033 u8 active_fec = link_info->active_fec_sig_mode &
12034 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
12035
12036 switch (active_fec) {
12037 default:
12038 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
12039 return "None";
12040 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
12041 return "Clause 74 BaseR";
12042 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
12043 return "Clause 91 RS(528,514)";
12044 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
12045 return "Clause 91 RS544_1XN";
12046 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
12047 return "Clause 91 RS(544,514)";
12048 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
12049 return "Clause 91 RS272_1XN";
12050 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
12051 return "Clause 91 RS(272,257)";
12052 }
12053 }
12054
bnxt_link_down_reason(struct bnxt_link_info * link_info)12055 static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
12056 {
12057 u8 reason = link_info->link_down_reason;
12058
12059 /* Multiple bits can be set, we report 1 bit only in order of
12060 * priority.
12061 */
12062 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
12063 return "(Remote fault)";
12064 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
12065 return "(OTP Speed limit violation)";
12066 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
12067 return "(Cable removed)";
12068 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
12069 return "(Module fault)";
12070 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
12071 return "(BMC request down)";
12072 return "";
12073 }
12074
bnxt_report_link(struct bnxt * bp)12075 void bnxt_report_link(struct bnxt *bp)
12076 {
12077 if (BNXT_LINK_IS_UP(bp)) {
12078 const char *signal = "";
12079 const char *flow_ctrl;
12080 const char *duplex;
12081 u32 speed;
12082 u16 fec;
12083
12084 netif_carrier_on(bp->dev);
12085 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
12086 if (speed == SPEED_UNKNOWN) {
12087 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
12088 return;
12089 }
12090 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
12091 duplex = "full";
12092 else
12093 duplex = "half";
12094 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
12095 flow_ctrl = "ON - receive & transmit";
12096 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
12097 flow_ctrl = "ON - transmit";
12098 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
12099 flow_ctrl = "ON - receive";
12100 else
12101 flow_ctrl = "none";
12102 if (bp->link_info.phy_qcfg_resp.option_flags &
12103 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
12104 u8 sig_mode = bp->link_info.active_fec_sig_mode &
12105 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
12106 switch (sig_mode) {
12107 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
12108 signal = "(NRZ) ";
12109 break;
12110 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
12111 signal = "(PAM4 56Gbps) ";
12112 break;
12113 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
12114 signal = "(PAM4 112Gbps) ";
12115 break;
12116 default:
12117 break;
12118 }
12119 }
12120 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
12121 speed, signal, duplex, flow_ctrl);
12122 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
12123 netdev_info(bp->dev, "EEE is %s\n",
12124 bp->eee.eee_active ? "active" :
12125 "not active");
12126 fec = bp->link_info.fec_cfg;
12127 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
12128 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
12129 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
12130 bnxt_report_fec(&bp->link_info));
12131 } else {
12132 char *str = bnxt_link_down_reason(&bp->link_info);
12133
12134 netif_carrier_off(bp->dev);
12135 netdev_err(bp->dev, "NIC Link is Down %s\n", str);
12136 }
12137 }
12138
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)12139 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
12140 {
12141 if (!resp->supported_speeds_auto_mode &&
12142 !resp->supported_speeds_force_mode &&
12143 !resp->supported_pam4_speeds_auto_mode &&
12144 !resp->supported_pam4_speeds_force_mode &&
12145 !resp->supported_speeds2_auto_mode &&
12146 !resp->supported_speeds2_force_mode)
12147 return true;
12148 return false;
12149 }
12150
bnxt_hwrm_phy_qcaps(struct bnxt * bp)12151 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
12152 {
12153 struct bnxt_link_info *link_info = &bp->link_info;
12154 struct hwrm_port_phy_qcaps_output *resp;
12155 struct hwrm_port_phy_qcaps_input *req;
12156 int rc = 0;
12157
12158 if (bp->hwrm_spec_code < 0x10201)
12159 return 0;
12160
12161 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
12162 if (rc)
12163 return rc;
12164
12165 resp = hwrm_req_hold(bp, req);
12166 rc = hwrm_req_send(bp, req);
12167 if (rc)
12168 goto hwrm_phy_qcaps_exit;
12169
12170 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
12171 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
12172 struct ethtool_keee *eee = &bp->eee;
12173 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
12174
12175 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
12176 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
12177 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
12178 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
12179 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
12180 }
12181
12182 if (bp->hwrm_spec_code >= 0x10a01) {
12183 if (bnxt_phy_qcaps_no_speed(resp)) {
12184 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
12185 netdev_warn(bp->dev, "Ethernet link disabled\n");
12186 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
12187 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
12188 netdev_info(bp->dev, "Ethernet link enabled\n");
12189 /* Phy re-enabled, reprobe the speeds */
12190 link_info->support_auto_speeds = 0;
12191 link_info->support_pam4_auto_speeds = 0;
12192 link_info->support_auto_speeds2 = 0;
12193 }
12194 }
12195 if (resp->supported_speeds_auto_mode)
12196 link_info->support_auto_speeds =
12197 le16_to_cpu(resp->supported_speeds_auto_mode);
12198 if (resp->supported_pam4_speeds_auto_mode)
12199 link_info->support_pam4_auto_speeds =
12200 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12201 if (resp->supported_speeds2_auto_mode)
12202 link_info->support_auto_speeds2 =
12203 le16_to_cpu(resp->supported_speeds2_auto_mode);
12204
12205 bp->port_count = resp->port_cnt;
12206
12207 hwrm_phy_qcaps_exit:
12208 hwrm_req_drop(bp, req);
12209 return rc;
12210 }
12211
bnxt_hwrm_mac_qcaps(struct bnxt * bp)12212 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12213 {
12214 struct hwrm_port_mac_qcaps_output *resp;
12215 struct hwrm_port_mac_qcaps_input *req;
12216 int rc;
12217
12218 if (bp->hwrm_spec_code < 0x10a03)
12219 return;
12220
12221 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12222 if (rc)
12223 return;
12224
12225 resp = hwrm_req_hold(bp, req);
12226 rc = hwrm_req_send_silent(bp, req);
12227 if (!rc)
12228 bp->mac_flags = resp->flags;
12229 hwrm_req_drop(bp, req);
12230 }
12231
bnxt_support_dropped(u16 advertising,u16 supported)12232 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12233 {
12234 u16 diff = advertising ^ supported;
12235
12236 return ((supported | diff) != supported);
12237 }
12238
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)12239 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12240 {
12241 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12242
12243 /* Check if any advertised speeds are no longer supported. The caller
12244 * holds the link_lock mutex, so we can modify link_info settings.
12245 */
12246 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12247 if (bnxt_support_dropped(link_info->advertising,
12248 link_info->support_auto_speeds2)) {
12249 link_info->advertising = link_info->support_auto_speeds2;
12250 return true;
12251 }
12252 return false;
12253 }
12254 if (bnxt_support_dropped(link_info->advertising,
12255 link_info->support_auto_speeds)) {
12256 link_info->advertising = link_info->support_auto_speeds;
12257 return true;
12258 }
12259 if (bnxt_support_dropped(link_info->advertising_pam4,
12260 link_info->support_pam4_auto_speeds)) {
12261 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12262 return true;
12263 }
12264 return false;
12265 }
12266
bnxt_update_link(struct bnxt * bp,bool chng_link_state)12267 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12268 {
12269 struct bnxt_link_info *link_info = &bp->link_info;
12270 struct hwrm_port_phy_qcfg_output *resp;
12271 struct hwrm_port_phy_qcfg_input *req;
12272 u8 link_state = link_info->link_state;
12273 bool support_changed;
12274 int rc;
12275
12276 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12277 if (rc)
12278 return rc;
12279
12280 resp = hwrm_req_hold(bp, req);
12281 rc = hwrm_req_send(bp, req);
12282 if (rc) {
12283 hwrm_req_drop(bp, req);
12284 if (BNXT_VF(bp) && rc == -ENODEV) {
12285 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12286 rc = 0;
12287 }
12288 return rc;
12289 }
12290
12291 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12292 link_info->phy_link_status = resp->link;
12293 link_info->duplex = resp->duplex_cfg;
12294 if (bp->hwrm_spec_code >= 0x10800)
12295 link_info->duplex = resp->duplex_state;
12296 link_info->pause = resp->pause;
12297 link_info->auto_mode = resp->auto_mode;
12298 link_info->auto_pause_setting = resp->auto_pause;
12299 link_info->lp_pause = resp->link_partner_adv_pause;
12300 link_info->force_pause_setting = resp->force_pause;
12301 link_info->duplex_setting = resp->duplex_cfg;
12302 if (link_info->phy_link_status == BNXT_LINK_LINK) {
12303 link_info->link_speed = le16_to_cpu(resp->link_speed);
12304 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12305 link_info->active_lanes = resp->active_lanes;
12306 } else {
12307 link_info->link_speed = 0;
12308 link_info->active_lanes = 0;
12309 }
12310 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12311 link_info->force_pam4_link_speed =
12312 le16_to_cpu(resp->force_pam4_link_speed);
12313 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12314 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12315 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12316 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12317 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12318 link_info->auto_pam4_link_speeds =
12319 le16_to_cpu(resp->auto_pam4_link_speed_mask);
12320 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12321 link_info->lp_auto_link_speeds =
12322 le16_to_cpu(resp->link_partner_adv_speeds);
12323 link_info->lp_auto_pam4_link_speeds =
12324 resp->link_partner_pam4_adv_speeds;
12325 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12326 link_info->phy_ver[0] = resp->phy_maj;
12327 link_info->phy_ver[1] = resp->phy_min;
12328 link_info->phy_ver[2] = resp->phy_bld;
12329 link_info->media_type = resp->media_type;
12330 link_info->phy_type = resp->phy_type;
12331 link_info->transceiver = resp->xcvr_pkg_type;
12332 link_info->phy_addr = resp->eee_config_phy_addr &
12333 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12334 link_info->module_status = resp->module_status;
12335 link_info->link_down_reason = resp->link_down_reason;
12336
12337 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12338 struct ethtool_keee *eee = &bp->eee;
12339 u16 fw_speeds;
12340
12341 eee->eee_active = 0;
12342 if (resp->eee_config_phy_addr &
12343 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12344 eee->eee_active = 1;
12345 fw_speeds = le16_to_cpu(
12346 resp->link_partner_adv_eee_link_speed_mask);
12347 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12348 }
12349
12350 /* Pull initial EEE config */
12351 if (!chng_link_state) {
12352 if (resp->eee_config_phy_addr &
12353 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12354 eee->eee_enabled = 1;
12355
12356 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12357 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12358
12359 if (resp->eee_config_phy_addr &
12360 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12361 __le32 tmr;
12362
12363 eee->tx_lpi_enabled = 1;
12364 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12365 eee->tx_lpi_timer = le32_to_cpu(tmr) &
12366 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12367 }
12368 }
12369 }
12370
12371 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12372 if (bp->hwrm_spec_code >= 0x10504) {
12373 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12374 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12375 }
12376 /* TODO: need to add more logic to report VF link */
12377 if (chng_link_state) {
12378 if (link_info->phy_link_status == BNXT_LINK_LINK)
12379 link_info->link_state = BNXT_LINK_STATE_UP;
12380 else
12381 link_info->link_state = BNXT_LINK_STATE_DOWN;
12382 if (link_state != link_info->link_state)
12383 bnxt_report_link(bp);
12384 } else {
12385 /* always link down if not require to update link state */
12386 link_info->link_state = BNXT_LINK_STATE_DOWN;
12387 }
12388 hwrm_req_drop(bp, req);
12389
12390 if (!BNXT_PHY_CFG_ABLE(bp))
12391 return 0;
12392
12393 support_changed = bnxt_support_speed_dropped(link_info);
12394 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12395 bnxt_hwrm_set_link_setting(bp, true, false);
12396 return 0;
12397 }
12398
bnxt_get_port_module_status(struct bnxt * bp)12399 static void bnxt_get_port_module_status(struct bnxt *bp)
12400 {
12401 struct bnxt_link_info *link_info = &bp->link_info;
12402 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12403 u8 module_status;
12404
12405 if (bnxt_update_link(bp, true))
12406 return;
12407
12408 module_status = link_info->module_status;
12409 switch (module_status) {
12410 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12411 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12412 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12413 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12414 bp->pf.port_id);
12415 if (bp->hwrm_spec_code >= 0x10201) {
12416 netdev_warn(bp->dev, "Module part number %s\n",
12417 resp->phy_vendor_partnumber);
12418 }
12419 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12420 netdev_warn(bp->dev, "TX is disabled\n");
12421 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12422 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12423 }
12424 }
12425
12426 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12427 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12428 {
12429 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12430 if (bp->hwrm_spec_code >= 0x10201)
12431 req->auto_pause =
12432 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12433 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12434 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12435 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12436 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12437 req->enables |=
12438 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12439 } else {
12440 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12441 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12442 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12443 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12444 req->enables |=
12445 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12446 if (bp->hwrm_spec_code >= 0x10201) {
12447 req->auto_pause = req->force_pause;
12448 req->enables |= cpu_to_le32(
12449 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12450 }
12451 }
12452 }
12453
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12454 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12455 {
12456 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12457 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12458 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12459 req->enables |=
12460 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12461 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12462 } else if (bp->link_info.advertising) {
12463 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12464 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12465 }
12466 if (bp->link_info.advertising_pam4) {
12467 req->enables |=
12468 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12469 req->auto_link_pam4_speed_mask =
12470 cpu_to_le16(bp->link_info.advertising_pam4);
12471 }
12472 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12473 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12474 } else {
12475 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12476 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12477 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12478 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12479 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12480 (u32)bp->link_info.req_link_speed);
12481 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12482 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12483 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12484 } else {
12485 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12486 }
12487 }
12488
12489 /* tell chimp that the setting takes effect immediately */
12490 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12491 }
12492
bnxt_hwrm_set_pause(struct bnxt * bp)12493 int bnxt_hwrm_set_pause(struct bnxt *bp)
12494 {
12495 struct hwrm_port_phy_cfg_input *req;
12496 int rc;
12497
12498 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12499 if (rc)
12500 return rc;
12501
12502 bnxt_hwrm_set_pause_common(bp, req);
12503
12504 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12505 bp->link_info.force_link_chng)
12506 bnxt_hwrm_set_link_common(bp, req);
12507
12508 rc = hwrm_req_send(bp, req);
12509 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12510 /* since changing of pause setting doesn't trigger any link
12511 * change event, the driver needs to update the current pause
12512 * result upon successfully return of the phy_cfg command
12513 */
12514 bp->link_info.pause =
12515 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12516 bp->link_info.auto_pause_setting = 0;
12517 if (!bp->link_info.force_link_chng)
12518 bnxt_report_link(bp);
12519 }
12520 bp->link_info.force_link_chng = false;
12521 return rc;
12522 }
12523
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12524 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12525 struct hwrm_port_phy_cfg_input *req)
12526 {
12527 struct ethtool_keee *eee = &bp->eee;
12528
12529 if (eee->eee_enabled) {
12530 u16 eee_speeds;
12531 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12532
12533 if (eee->tx_lpi_enabled)
12534 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12535 else
12536 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12537
12538 req->flags |= cpu_to_le32(flags);
12539 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12540 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12541 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12542 } else {
12543 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12544 }
12545 }
12546
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)12547 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12548 {
12549 struct hwrm_port_phy_cfg_input *req;
12550 int rc;
12551
12552 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12553 if (rc)
12554 return rc;
12555
12556 if (set_pause)
12557 bnxt_hwrm_set_pause_common(bp, req);
12558
12559 bnxt_hwrm_set_link_common(bp, req);
12560
12561 if (set_eee)
12562 bnxt_hwrm_set_eee(bp, req);
12563 return hwrm_req_send(bp, req);
12564 }
12565
bnxt_hwrm_shutdown_link(struct bnxt * bp)12566 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12567 {
12568 struct hwrm_port_phy_cfg_input *req;
12569 int rc;
12570
12571 if (!BNXT_SINGLE_PF(bp))
12572 return 0;
12573
12574 if (pci_num_vf(bp->pdev) &&
12575 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12576 return 0;
12577
12578 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12579 if (rc)
12580 return rc;
12581
12582 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12583 rc = hwrm_req_send(bp, req);
12584 if (!rc) {
12585 mutex_lock(&bp->link_lock);
12586 /* Device is not obliged link down in certain scenarios, even
12587 * when forced. Setting the state unknown is consistent with
12588 * driver startup and will force link state to be reported
12589 * during subsequent open based on PORT_PHY_QCFG.
12590 */
12591 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12592 mutex_unlock(&bp->link_lock);
12593 }
12594 return rc;
12595 }
12596
bnxt_fw_reset_via_optee(struct bnxt * bp)12597 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12598 {
12599 #ifdef CONFIG_TEE_BNXT_FW
12600 int rc = tee_bnxt_fw_load();
12601
12602 if (rc)
12603 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12604
12605 return rc;
12606 #else
12607 netdev_err(bp->dev, "OP-TEE not supported\n");
12608 return -ENODEV;
12609 #endif
12610 }
12611
bnxt_try_recover_fw(struct bnxt * bp)12612 static int bnxt_try_recover_fw(struct bnxt *bp)
12613 {
12614 if (bp->fw_health && bp->fw_health->status_reliable) {
12615 int retry = 0, rc;
12616 u32 sts;
12617
12618 do {
12619 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12620 rc = bnxt_hwrm_poll(bp);
12621 if (!BNXT_FW_IS_BOOTING(sts) &&
12622 !BNXT_FW_IS_RECOVERING(sts))
12623 break;
12624 retry++;
12625 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12626
12627 if (!BNXT_FW_IS_HEALTHY(sts)) {
12628 netdev_err(bp->dev,
12629 "Firmware not responding, status: 0x%x\n",
12630 sts);
12631 rc = -ENODEV;
12632 }
12633 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12634 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12635 return bnxt_fw_reset_via_optee(bp);
12636 }
12637 return rc;
12638 }
12639
12640 return -ENODEV;
12641 }
12642
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12643 void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12644 {
12645 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12646
12647 if (!BNXT_NEW_RM(bp))
12648 return; /* no resource reservations required */
12649
12650 hw_resc->resv_cp_rings = 0;
12651 hw_resc->resv_stat_ctxs = 0;
12652 hw_resc->resv_irqs = 0;
12653 hw_resc->resv_tx_rings = 0;
12654 hw_resc->resv_rx_rings = 0;
12655 hw_resc->resv_hw_ring_grps = 0;
12656 hw_resc->resv_vnics = 0;
12657 hw_resc->resv_rsscos_ctxs = 0;
12658 if (!fw_reset) {
12659 bp->tx_nr_rings = 0;
12660 bp->rx_nr_rings = 0;
12661 }
12662 }
12663
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12664 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12665 {
12666 int rc;
12667
12668 if (!BNXT_NEW_RM(bp))
12669 return 0; /* no resource reservations required */
12670
12671 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12672 if (rc)
12673 netdev_err(bp->dev, "resc_qcaps failed\n");
12674
12675 bnxt_clear_reservations(bp, fw_reset);
12676
12677 return rc;
12678 }
12679
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12680 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12681 {
12682 struct hwrm_func_drv_if_change_output *resp;
12683 struct hwrm_func_drv_if_change_input *req;
12684 bool resc_reinit = false;
12685 bool caps_change = false;
12686 int rc, retry = 0;
12687 bool fw_reset;
12688 u32 flags = 0;
12689
12690 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12691 bp->fw_reset_state = 0;
12692
12693 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12694 return 0;
12695
12696 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12697 if (rc)
12698 return rc;
12699
12700 if (up)
12701 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12702 resp = hwrm_req_hold(bp, req);
12703
12704 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12705 while (retry < BNXT_FW_IF_RETRY) {
12706 rc = hwrm_req_send(bp, req);
12707 if (rc != -EAGAIN)
12708 break;
12709
12710 msleep(50);
12711 retry++;
12712 }
12713
12714 if (rc == -EAGAIN) {
12715 hwrm_req_drop(bp, req);
12716 return rc;
12717 } else if (!rc) {
12718 flags = le32_to_cpu(resp->flags);
12719 } else if (up) {
12720 rc = bnxt_try_recover_fw(bp);
12721 fw_reset = true;
12722 }
12723 hwrm_req_drop(bp, req);
12724 if (rc)
12725 return rc;
12726
12727 if (!up) {
12728 bnxt_inv_fw_health_reg(bp);
12729 return 0;
12730 }
12731
12732 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12733 resc_reinit = true;
12734 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12735 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12736 fw_reset = true;
12737 else
12738 bnxt_remap_fw_health_regs(bp);
12739
12740 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12741 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12742 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12743 return -ENODEV;
12744 }
12745 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12746 caps_change = true;
12747
12748 if (resc_reinit || fw_reset || caps_change) {
12749 if (fw_reset || caps_change) {
12750 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12751 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12752 bnxt_ulp_irq_stop(bp);
12753 bnxt_free_ctx_mem(bp, false);
12754 bnxt_dcb_free(bp);
12755 rc = bnxt_fw_init_one(bp);
12756 if (rc) {
12757 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12758 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12759 return rc;
12760 }
12761 /* IRQ will be initialized later in bnxt_request_irq()*/
12762 bnxt_clear_int_mode(bp);
12763 }
12764 rc = bnxt_cancel_reservations(bp, fw_reset);
12765 }
12766 return rc;
12767 }
12768
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12769 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12770 {
12771 struct hwrm_port_led_qcaps_output *resp;
12772 struct hwrm_port_led_qcaps_input *req;
12773 struct bnxt_pf_info *pf = &bp->pf;
12774 int rc;
12775
12776 bp->num_leds = 0;
12777 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12778 return 0;
12779
12780 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12781 if (rc)
12782 return rc;
12783
12784 req->port_id = cpu_to_le16(pf->port_id);
12785 resp = hwrm_req_hold(bp, req);
12786 rc = hwrm_req_send(bp, req);
12787 if (rc) {
12788 hwrm_req_drop(bp, req);
12789 return rc;
12790 }
12791 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12792 int i;
12793
12794 bp->num_leds = resp->num_leds;
12795 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12796 bp->num_leds);
12797 for (i = 0; i < bp->num_leds; i++) {
12798 struct bnxt_led_info *led = &bp->leds[i];
12799 __le16 caps = led->led_state_caps;
12800
12801 if (!led->led_group_id ||
12802 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12803 bp->num_leds = 0;
12804 break;
12805 }
12806 }
12807 }
12808 hwrm_req_drop(bp, req);
12809 return 0;
12810 }
12811
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12812 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12813 {
12814 struct hwrm_wol_filter_alloc_output *resp;
12815 struct hwrm_wol_filter_alloc_input *req;
12816 int rc;
12817
12818 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12819 if (rc)
12820 return rc;
12821
12822 req->port_id = cpu_to_le16(bp->pf.port_id);
12823 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12824 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12825 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12826
12827 resp = hwrm_req_hold(bp, req);
12828 rc = hwrm_req_send(bp, req);
12829 if (!rc)
12830 bp->wol_filter_id = resp->wol_filter_id;
12831 hwrm_req_drop(bp, req);
12832 return rc;
12833 }
12834
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12835 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12836 {
12837 struct hwrm_wol_filter_free_input *req;
12838 int rc;
12839
12840 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12841 if (rc)
12842 return rc;
12843
12844 req->port_id = cpu_to_le16(bp->pf.port_id);
12845 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12846 req->wol_filter_id = bp->wol_filter_id;
12847
12848 return hwrm_req_send(bp, req);
12849 }
12850
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12851 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12852 {
12853 struct hwrm_wol_filter_qcfg_output *resp;
12854 struct hwrm_wol_filter_qcfg_input *req;
12855 u16 next_handle = 0;
12856 int rc;
12857
12858 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12859 if (rc)
12860 return rc;
12861
12862 req->port_id = cpu_to_le16(bp->pf.port_id);
12863 req->handle = cpu_to_le16(handle);
12864 resp = hwrm_req_hold(bp, req);
12865 rc = hwrm_req_send(bp, req);
12866 if (!rc) {
12867 next_handle = le16_to_cpu(resp->next_handle);
12868 if (next_handle != 0) {
12869 if (resp->wol_type ==
12870 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12871 bp->wol = 1;
12872 bp->wol_filter_id = resp->wol_filter_id;
12873 }
12874 }
12875 }
12876 hwrm_req_drop(bp, req);
12877 return next_handle;
12878 }
12879
bnxt_get_wol_settings(struct bnxt * bp)12880 static void bnxt_get_wol_settings(struct bnxt *bp)
12881 {
12882 u16 handle = 0;
12883
12884 bp->wol = 0;
12885 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12886 return;
12887
12888 do {
12889 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12890 } while (handle && handle != 0xffff);
12891 }
12892
bnxt_eee_config_ok(struct bnxt * bp)12893 static bool bnxt_eee_config_ok(struct bnxt *bp)
12894 {
12895 struct ethtool_keee *eee = &bp->eee;
12896 struct bnxt_link_info *link_info = &bp->link_info;
12897
12898 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12899 return true;
12900
12901 if (eee->eee_enabled) {
12902 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12903 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12904
12905 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12906
12907 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12908 eee->eee_enabled = 0;
12909 return false;
12910 }
12911 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12912 linkmode_and(eee->advertised, advertising,
12913 eee->supported);
12914 return false;
12915 }
12916 }
12917 return true;
12918 }
12919
bnxt_update_phy_setting(struct bnxt * bp)12920 static int bnxt_update_phy_setting(struct bnxt *bp)
12921 {
12922 int rc;
12923 bool update_link = false;
12924 bool update_pause = false;
12925 bool update_eee = false;
12926 struct bnxt_link_info *link_info = &bp->link_info;
12927
12928 rc = bnxt_update_link(bp, true);
12929 if (rc) {
12930 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12931 rc);
12932 return rc;
12933 }
12934 if (!BNXT_SINGLE_PF(bp))
12935 return 0;
12936
12937 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12938 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12939 link_info->req_flow_ctrl)
12940 update_pause = true;
12941 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12942 link_info->force_pause_setting != link_info->req_flow_ctrl)
12943 update_pause = true;
12944 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12945 if (BNXT_AUTO_MODE(link_info->auto_mode))
12946 update_link = true;
12947 if (bnxt_force_speed_updated(link_info))
12948 update_link = true;
12949 if (link_info->req_duplex != link_info->duplex_setting)
12950 update_link = true;
12951 } else {
12952 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12953 update_link = true;
12954 if (bnxt_auto_speed_updated(link_info))
12955 update_link = true;
12956 }
12957
12958 /* The last close may have shutdown the link, so need to call
12959 * PHY_CFG to bring it back up.
12960 */
12961 if (!BNXT_LINK_IS_UP(bp))
12962 update_link = true;
12963
12964 if (!bnxt_eee_config_ok(bp))
12965 update_eee = true;
12966
12967 if (update_link)
12968 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12969 else if (update_pause)
12970 rc = bnxt_hwrm_set_pause(bp);
12971 if (rc) {
12972 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12973 rc);
12974 return rc;
12975 }
12976
12977 return rc;
12978 }
12979
12980 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12981
bnxt_reinit_after_abort(struct bnxt * bp)12982 static int bnxt_reinit_after_abort(struct bnxt *bp)
12983 {
12984 int rc;
12985
12986 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12987 return -EBUSY;
12988
12989 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12990 return -ENODEV;
12991
12992 rc = bnxt_fw_init_one(bp);
12993 if (!rc) {
12994 bnxt_clear_int_mode(bp);
12995 rc = bnxt_init_int_mode(bp);
12996 if (!rc) {
12997 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12998 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12999 }
13000 }
13001 return rc;
13002 }
13003
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)13004 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
13005 {
13006 struct bnxt_ntuple_filter *ntp_fltr;
13007 struct bnxt_l2_filter *l2_fltr;
13008
13009 if (list_empty(&fltr->list))
13010 return;
13011
13012 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
13013 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
13014 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
13015 atomic_inc(&l2_fltr->refcnt);
13016 ntp_fltr->l2_fltr = l2_fltr;
13017 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
13018 bnxt_del_ntp_filter(bp, ntp_fltr);
13019 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
13020 fltr->sw_id);
13021 }
13022 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
13023 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
13024 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
13025 bnxt_del_l2_filter(bp, l2_fltr);
13026 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
13027 fltr->sw_id);
13028 }
13029 }
13030 }
13031
bnxt_cfg_usr_fltrs(struct bnxt * bp)13032 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
13033 {
13034 struct bnxt_filter_base *usr_fltr, *tmp;
13035
13036 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
13037 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
13038 }
13039
bnxt_set_xps_mapping(struct bnxt * bp)13040 static int bnxt_set_xps_mapping(struct bnxt *bp)
13041 {
13042 int numa_node = dev_to_node(&bp->pdev->dev);
13043 unsigned int q_idx, map_idx, cpu, i;
13044 const struct cpumask *cpu_mask_ptr;
13045 int nr_cpus = num_online_cpus();
13046 cpumask_t *q_map;
13047 int rc = 0;
13048
13049 q_map = kzalloc_objs(*q_map, bp->tx_nr_rings_per_tc);
13050 if (!q_map)
13051 return -ENOMEM;
13052
13053 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
13054 * Each TC has the same number of TX queues. The nth TX queue for each
13055 * TC will have the same CPU mask.
13056 */
13057 for (i = 0; i < nr_cpus; i++) {
13058 map_idx = i % bp->tx_nr_rings_per_tc;
13059 cpu = cpumask_local_spread(i, numa_node);
13060 cpu_mask_ptr = get_cpu_mask(cpu);
13061 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
13062 }
13063
13064 /* Register CPU mask for each TX queue except the ones marked for XDP */
13065 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
13066 map_idx = q_idx % bp->tx_nr_rings_per_tc;
13067 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
13068 if (rc) {
13069 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
13070 q_idx);
13071 break;
13072 }
13073 }
13074
13075 kfree(q_map);
13076
13077 return rc;
13078 }
13079
bnxt_tx_nr_rings(struct bnxt * bp)13080 static int bnxt_tx_nr_rings(struct bnxt *bp)
13081 {
13082 return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
13083 bp->tx_nr_rings_per_tc;
13084 }
13085
bnxt_tx_nr_rings_per_tc(struct bnxt * bp)13086 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
13087 {
13088 return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
13089 }
13090
bnxt_set_xdp_tx_rings(struct bnxt * bp)13091 static void bnxt_set_xdp_tx_rings(struct bnxt *bp)
13092 {
13093 bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
13094 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
13095 }
13096
bnxt_adj_tx_rings(struct bnxt * bp)13097 static void bnxt_adj_tx_rings(struct bnxt *bp)
13098 {
13099 /* Make adjustments if reserved TX rings are less than requested */
13100 bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
13101 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
13102 if (bp->tx_nr_rings_xdp)
13103 bnxt_set_xdp_tx_rings(bp);
13104 }
13105
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13106 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13107 {
13108 int rc = 0;
13109
13110 netif_carrier_off(bp->dev);
13111 if (irq_re_init) {
13112 /* Reserve rings now if none were reserved at driver probe. */
13113 rc = bnxt_init_dflt_ring_mode(bp);
13114 if (rc) {
13115 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
13116 return rc;
13117 }
13118 }
13119 rc = bnxt_reserve_rings(bp, irq_re_init);
13120 if (rc)
13121 return rc;
13122
13123 bnxt_adj_tx_rings(bp);
13124 rc = bnxt_alloc_mem(bp, irq_re_init);
13125 if (rc) {
13126 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13127 goto open_err_free_mem;
13128 }
13129
13130 if (irq_re_init) {
13131 bnxt_init_napi(bp);
13132 rc = bnxt_request_irq(bp);
13133 if (rc) {
13134 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
13135 goto open_err_irq;
13136 }
13137 }
13138
13139 rc = bnxt_init_nic(bp, irq_re_init);
13140 if (rc) {
13141 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13142 goto open_err_irq;
13143 }
13144
13145 bnxt_enable_napi(bp);
13146 bnxt_debug_dev_init(bp);
13147
13148 if (link_re_init) {
13149 mutex_lock(&bp->link_lock);
13150 rc = bnxt_update_phy_setting(bp);
13151 mutex_unlock(&bp->link_lock);
13152 if (rc) {
13153 netdev_warn(bp->dev, "failed to update phy settings\n");
13154 if (BNXT_SINGLE_PF(bp)) {
13155 bp->link_info.phy_retry = true;
13156 bp->link_info.phy_retry_expires =
13157 jiffies + 5 * HZ;
13158 }
13159 }
13160 }
13161
13162 if (irq_re_init) {
13163 udp_tunnel_nic_reset_ntf(bp->dev);
13164 rc = bnxt_set_xps_mapping(bp);
13165 if (rc)
13166 netdev_warn(bp->dev, "failed to set xps mapping\n");
13167 }
13168
13169 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
13170 if (!static_key_enabled(&bnxt_xdp_locking_key))
13171 static_branch_enable(&bnxt_xdp_locking_key);
13172 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
13173 static_branch_disable(&bnxt_xdp_locking_key);
13174 }
13175 set_bit(BNXT_STATE_OPEN, &bp->state);
13176 bnxt_enable_int(bp);
13177 /* Enable TX queues */
13178 bnxt_tx_enable(bp);
13179 mod_timer(&bp->timer, jiffies + bp->current_interval);
13180 /* Poll link status and check for SFP+ module status */
13181 mutex_lock(&bp->link_lock);
13182 bnxt_get_port_module_status(bp);
13183 mutex_unlock(&bp->link_lock);
13184
13185 /* VF-reps may need to be re-opened after the PF is re-opened */
13186 if (BNXT_PF(bp))
13187 bnxt_vf_reps_open(bp);
13188 bnxt_ptp_init_rtc(bp, true);
13189 bnxt_ptp_cfg_tstamp_filters(bp);
13190 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13191 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
13192 bnxt_cfg_usr_fltrs(bp);
13193 return 0;
13194
13195 open_err_irq:
13196 bnxt_del_napi(bp);
13197
13198 open_err_free_mem:
13199 bnxt_free_skbs(bp);
13200 bnxt_free_irq(bp);
13201 bnxt_free_mem(bp, true);
13202 return rc;
13203 }
13204
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13205 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13206 {
13207 int rc = 0;
13208
13209 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13210 rc = -EIO;
13211 if (!rc)
13212 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13213 if (rc) {
13214 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13215 netif_close(bp->dev);
13216 }
13217 return rc;
13218 }
13219
13220 /* netdev instance lock held, open the NIC half way by allocating all
13221 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
13222 * for offline self tests.
13223 */
bnxt_half_open_nic(struct bnxt * bp)13224 int bnxt_half_open_nic(struct bnxt *bp)
13225 {
13226 int rc = 0;
13227
13228 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13229 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13230 rc = -ENODEV;
13231 goto half_open_err;
13232 }
13233
13234 rc = bnxt_alloc_mem(bp, true);
13235 if (rc) {
13236 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13237 goto half_open_err;
13238 }
13239 bnxt_init_napi(bp);
13240 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13241 rc = bnxt_init_nic(bp, true);
13242 if (rc) {
13243 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13244 bnxt_del_napi(bp);
13245 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13246 goto half_open_err;
13247 }
13248 return 0;
13249
13250 half_open_err:
13251 bnxt_free_skbs(bp);
13252 bnxt_free_mem(bp, true);
13253 netif_close(bp->dev);
13254 return rc;
13255 }
13256
13257 /* netdev instance lock held, this call can only be made after a previous
13258 * successful call to bnxt_half_open_nic().
13259 */
bnxt_half_close_nic(struct bnxt * bp)13260 void bnxt_half_close_nic(struct bnxt *bp)
13261 {
13262 bnxt_hwrm_resource_free(bp, false, true);
13263 bnxt_del_napi(bp);
13264 bnxt_free_skbs(bp);
13265 bnxt_free_mem(bp, true);
13266 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13267 }
13268
bnxt_reenable_sriov(struct bnxt * bp)13269 void bnxt_reenable_sriov(struct bnxt *bp)
13270 {
13271 if (BNXT_PF(bp)) {
13272 struct bnxt_pf_info *pf = &bp->pf;
13273 int n = pf->active_vfs;
13274
13275 if (n)
13276 bnxt_cfg_hw_sriov(bp, &n, true);
13277 }
13278 }
13279
bnxt_open(struct net_device * dev)13280 static int bnxt_open(struct net_device *dev)
13281 {
13282 struct bnxt *bp = netdev_priv(dev);
13283 int rc;
13284
13285 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13286 rc = bnxt_reinit_after_abort(bp);
13287 if (rc) {
13288 if (rc == -EBUSY)
13289 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13290 else
13291 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13292 return -ENODEV;
13293 }
13294 }
13295
13296 rc = bnxt_hwrm_if_change(bp, true);
13297 if (rc)
13298 return rc;
13299
13300 rc = __bnxt_open_nic(bp, true, true);
13301 if (rc) {
13302 bnxt_hwrm_if_change(bp, false);
13303 } else {
13304 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13305 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13306 bnxt_queue_sp_work(bp,
13307 BNXT_RESTART_ULP_SP_EVENT);
13308 }
13309 }
13310
13311 return rc;
13312 }
13313
bnxt_drv_busy(struct bnxt * bp)13314 static bool bnxt_drv_busy(struct bnxt *bp)
13315 {
13316 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13317 test_bit(BNXT_STATE_READ_STATS, &bp->state));
13318 }
13319
13320 static void bnxt_get_ring_stats(struct bnxt *bp,
13321 struct rtnl_link_stats64 *stats);
13322
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13323 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13324 bool link_re_init)
13325 {
13326 /* Close the VF-reps before closing PF */
13327 if (BNXT_PF(bp))
13328 bnxt_vf_reps_close(bp);
13329
13330 /* Change device state to avoid TX queue wake up's */
13331 bnxt_tx_disable(bp);
13332
13333 clear_bit(BNXT_STATE_OPEN, &bp->state);
13334 smp_mb__after_atomic();
13335 while (bnxt_drv_busy(bp))
13336 msleep(20);
13337
13338 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13339 bnxt_clear_rss_ctxs(bp);
13340 /* Flush rings and disable interrupts */
13341 bnxt_shutdown_nic(bp, irq_re_init);
13342
13343 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13344
13345 bnxt_debug_dev_exit(bp);
13346 bnxt_disable_napi(bp);
13347 timer_delete_sync(&bp->timer);
13348 bnxt_free_skbs(bp);
13349
13350 /* Save ring stats before shutdown */
13351 if (bp->bnapi && irq_re_init) {
13352 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13353 bnxt_get_ring_drv_stats(bp, &bp->ring_drv_stats_prev);
13354 }
13355 if (irq_re_init) {
13356 bnxt_free_irq(bp);
13357 bnxt_del_napi(bp);
13358 }
13359 bnxt_free_mem(bp, irq_re_init);
13360 }
13361
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13362 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13363 {
13364 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13365 /* If we get here, it means firmware reset is in progress
13366 * while we are trying to close. We can safely proceed with
13367 * the close because we are holding netdev instance lock.
13368 * Some firmware messages may fail as we proceed to close.
13369 * We set the ABORT_ERR flag here so that the FW reset thread
13370 * will later abort when it gets the netdev instance lock
13371 * and sees the flag.
13372 */
13373 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13374 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13375 }
13376
13377 #ifdef CONFIG_BNXT_SRIOV
13378 if (bp->sriov_cfg) {
13379 int rc;
13380
13381 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13382 !bp->sriov_cfg,
13383 BNXT_SRIOV_CFG_WAIT_TMO);
13384 if (!rc)
13385 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13386 else if (rc < 0)
13387 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13388 }
13389 #endif
13390 __bnxt_close_nic(bp, irq_re_init, link_re_init);
13391 }
13392
bnxt_close(struct net_device * dev)13393 static int bnxt_close(struct net_device *dev)
13394 {
13395 struct bnxt *bp = netdev_priv(dev);
13396
13397 bnxt_close_nic(bp, true, true);
13398 bnxt_hwrm_shutdown_link(bp);
13399 bnxt_hwrm_if_change(bp, false);
13400 return 0;
13401 }
13402
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)13403 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13404 u16 *val)
13405 {
13406 struct hwrm_port_phy_mdio_read_output *resp;
13407 struct hwrm_port_phy_mdio_read_input *req;
13408 int rc;
13409
13410 if (bp->hwrm_spec_code < 0x10a00)
13411 return -EOPNOTSUPP;
13412
13413 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13414 if (rc)
13415 return rc;
13416
13417 req->port_id = cpu_to_le16(bp->pf.port_id);
13418 req->phy_addr = phy_addr;
13419 req->reg_addr = cpu_to_le16(reg & 0x1f);
13420 if (mdio_phy_id_is_c45(phy_addr)) {
13421 req->cl45_mdio = 1;
13422 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13423 req->dev_addr = mdio_phy_id_devad(phy_addr);
13424 req->reg_addr = cpu_to_le16(reg);
13425 }
13426
13427 resp = hwrm_req_hold(bp, req);
13428 rc = hwrm_req_send(bp, req);
13429 if (!rc)
13430 *val = le16_to_cpu(resp->reg_data);
13431 hwrm_req_drop(bp, req);
13432 return rc;
13433 }
13434
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)13435 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13436 u16 val)
13437 {
13438 struct hwrm_port_phy_mdio_write_input *req;
13439 int rc;
13440
13441 if (bp->hwrm_spec_code < 0x10a00)
13442 return -EOPNOTSUPP;
13443
13444 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13445 if (rc)
13446 return rc;
13447
13448 req->port_id = cpu_to_le16(bp->pf.port_id);
13449 req->phy_addr = phy_addr;
13450 req->reg_addr = cpu_to_le16(reg & 0x1f);
13451 if (mdio_phy_id_is_c45(phy_addr)) {
13452 req->cl45_mdio = 1;
13453 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13454 req->dev_addr = mdio_phy_id_devad(phy_addr);
13455 req->reg_addr = cpu_to_le16(reg);
13456 }
13457 req->reg_data = cpu_to_le16(val);
13458
13459 return hwrm_req_send(bp, req);
13460 }
13461
13462 /* netdev instance lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13463 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13464 {
13465 struct mii_ioctl_data *mdio = if_mii(ifr);
13466 struct bnxt *bp = netdev_priv(dev);
13467 int rc;
13468
13469 switch (cmd) {
13470 case SIOCGMIIPHY:
13471 mdio->phy_id = bp->link_info.phy_addr;
13472
13473 fallthrough;
13474 case SIOCGMIIREG: {
13475 u16 mii_regval = 0;
13476
13477 if (!netif_running(dev))
13478 return -EAGAIN;
13479
13480 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13481 &mii_regval);
13482 mdio->val_out = mii_regval;
13483 return rc;
13484 }
13485
13486 case SIOCSMIIREG:
13487 if (!netif_running(dev))
13488 return -EAGAIN;
13489
13490 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13491 mdio->val_in);
13492
13493 default:
13494 /* do nothing */
13495 break;
13496 }
13497 return -EOPNOTSUPP;
13498 }
13499
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13500 static void bnxt_get_ring_stats(struct bnxt *bp,
13501 struct rtnl_link_stats64 *stats)
13502 {
13503 int i;
13504
13505 for (i = 0; i < bp->cp_nr_rings; i++) {
13506 struct bnxt_napi *bnapi = bp->bnapi[i];
13507 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13508 u64 *sw = cpr->stats.sw_stats;
13509
13510 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13511 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13512 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13513
13514 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13515 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13516 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13517
13518 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13519 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13520 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13521
13522 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13523 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13524 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13525
13526 stats->rx_missed_errors +=
13527 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13528
13529 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13530
13531 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13532
13533 stats->rx_dropped +=
13534 cpr->sw_stats->rx.rx_netpoll_discards +
13535 cpr->sw_stats->rx.rx_oom_discards;
13536 }
13537 }
13538
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13539 static void bnxt_add_prev_stats(struct bnxt *bp,
13540 struct rtnl_link_stats64 *stats)
13541 {
13542 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13543
13544 stats->rx_packets += prev_stats->rx_packets;
13545 stats->tx_packets += prev_stats->tx_packets;
13546 stats->rx_bytes += prev_stats->rx_bytes;
13547 stats->tx_bytes += prev_stats->tx_bytes;
13548 stats->rx_missed_errors += prev_stats->rx_missed_errors;
13549 stats->multicast += prev_stats->multicast;
13550 stats->rx_dropped += prev_stats->rx_dropped;
13551 stats->tx_dropped += prev_stats->tx_dropped;
13552 }
13553
13554 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)13555 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13556 {
13557 struct bnxt *bp = netdev_priv(dev);
13558
13559 set_bit(BNXT_STATE_READ_STATS, &bp->state);
13560 /* Make sure bnxt_close_nic() sees that we are reading stats before
13561 * we check the BNXT_STATE_OPEN flag.
13562 */
13563 smp_mb__after_atomic();
13564 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13565 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13566 *stats = bp->net_stats_prev;
13567 return;
13568 }
13569
13570 bnxt_get_ring_stats(bp, stats);
13571 bnxt_add_prev_stats(bp, stats);
13572
13573 if (bp->flags & BNXT_FLAG_PORT_STATS) {
13574 u64 *rx = bp->port_stats.sw_stats;
13575 u64 *tx = bp->port_stats.sw_stats +
13576 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13577
13578 stats->rx_crc_errors =
13579 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13580 stats->rx_frame_errors =
13581 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13582 stats->rx_length_errors =
13583 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13584 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13585 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13586 stats->rx_errors =
13587 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13588 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13589 stats->collisions =
13590 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13591 stats->tx_fifo_errors =
13592 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13593 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13594 }
13595 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13596 }
13597
bnxt_get_one_ring_drv_stats(struct bnxt * bp,struct bnxt_total_ring_drv_stats * stats,struct bnxt_cp_ring_info * cpr)13598 static void bnxt_get_one_ring_drv_stats(struct bnxt *bp,
13599 struct bnxt_total_ring_drv_stats *stats,
13600 struct bnxt_cp_ring_info *cpr)
13601 {
13602 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13603 u64 *hw_stats = cpr->stats.sw_stats;
13604
13605 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13606 stats->rx_total_resets += sw_stats->rx.rx_resets;
13607 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13608 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13609 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13610 stats->rx_total_ring_discards +=
13611 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13612 stats->rx_total_hw_gro_packets += sw_stats->rx.rx_hw_gro_packets;
13613 stats->rx_total_hw_gro_wire_packets += sw_stats->rx.rx_hw_gro_wire_packets;
13614 stats->tx_total_resets += sw_stats->tx.tx_resets;
13615 stats->tx_total_ring_discards +=
13616 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13617 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13618 }
13619
bnxt_get_ring_drv_stats(struct bnxt * bp,struct bnxt_total_ring_drv_stats * stats)13620 void bnxt_get_ring_drv_stats(struct bnxt *bp,
13621 struct bnxt_total_ring_drv_stats *stats)
13622 {
13623 int i;
13624
13625 for (i = 0; i < bp->cp_nr_rings; i++)
13626 bnxt_get_one_ring_drv_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13627 }
13628
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask,const struct netdev_hw_addr_list * mc)13629 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask,
13630 const struct netdev_hw_addr_list *mc)
13631 {
13632 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13633 struct netdev_hw_addr *ha;
13634 u8 *haddr;
13635 int mc_count = 0;
13636 bool update = false;
13637 int off = 0;
13638
13639 netdev_hw_addr_list_for_each(ha, mc) {
13640 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13641 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13642 vnic->mc_list_count = 0;
13643 return false;
13644 }
13645 haddr = ha->addr;
13646 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13647 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13648 update = true;
13649 }
13650 off += ETH_ALEN;
13651 mc_count++;
13652 }
13653 if (mc_count)
13654 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13655
13656 if (mc_count != vnic->mc_list_count) {
13657 vnic->mc_list_count = mc_count;
13658 update = true;
13659 }
13660 return update;
13661 }
13662
bnxt_uc_list_updated(struct bnxt * bp,const struct netdev_hw_addr_list * uc)13663 static bool bnxt_uc_list_updated(struct bnxt *bp,
13664 const struct netdev_hw_addr_list *uc)
13665 {
13666 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13667 struct netdev_hw_addr *ha;
13668 int off = 0;
13669
13670 if (netdev_hw_addr_list_count(uc) != (vnic->uc_filter_count - 1))
13671 return true;
13672
13673 netdev_hw_addr_list_for_each(ha, uc) {
13674 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13675 return true;
13676
13677 off += ETH_ALEN;
13678 }
13679 return false;
13680 }
13681
bnxt_set_rx_mode(struct net_device * dev,struct netdev_hw_addr_list * uc,struct netdev_hw_addr_list * mc)13682 static void bnxt_set_rx_mode(struct net_device *dev,
13683 struct netdev_hw_addr_list *uc,
13684 struct netdev_hw_addr_list *mc)
13685 {
13686 struct bnxt *bp = netdev_priv(dev);
13687 struct bnxt_vnic_info *vnic;
13688 bool mc_update = false;
13689 bool uc_update;
13690 u32 mask;
13691
13692 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13693 return;
13694
13695 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13696 mask = vnic->rx_mask;
13697 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13698 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13699 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13700 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13701
13702 if (dev->flags & IFF_PROMISC)
13703 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13704
13705 uc_update = bnxt_uc_list_updated(bp, uc);
13706
13707 if (dev->flags & IFF_BROADCAST)
13708 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13709 if (dev->flags & IFF_ALLMULTI) {
13710 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13711 vnic->mc_list_count = 0;
13712 } else if (dev->flags & IFF_MULTICAST) {
13713 mc_update = bnxt_mc_list_updated(bp, &mask, mc);
13714 }
13715
13716 if (mask != vnic->rx_mask || uc_update || mc_update) {
13717 vnic->rx_mask = mask;
13718
13719 bnxt_cfg_rx_mode(bp, uc, uc_update);
13720 }
13721 }
13722
bnxt_cfg_rx_mode(struct bnxt * bp,struct netdev_hw_addr_list * uc,bool uc_update)13723 static int bnxt_cfg_rx_mode(struct bnxt *bp, struct netdev_hw_addr_list *uc,
13724 bool uc_update)
13725 {
13726 struct net_device *dev = bp->dev;
13727 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13728 struct netdev_hw_addr *ha;
13729 int i, off = 0, rc;
13730
13731 if (!uc_update)
13732 goto skip_uc;
13733
13734 for (i = 1; i < vnic->uc_filter_count; i++) {
13735 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13736
13737 bnxt_hwrm_l2_filter_free(bp, fltr);
13738 bnxt_del_l2_filter(bp, fltr);
13739 }
13740
13741 vnic->uc_filter_count = 1;
13742
13743 netif_addr_lock_bh(dev);
13744 if (netdev_hw_addr_list_count(uc) > (BNXT_MAX_UC_ADDRS - 1)) {
13745 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13746 } else {
13747 netdev_hw_addr_list_for_each(ha, uc) {
13748 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13749 off += ETH_ALEN;
13750 vnic->uc_filter_count++;
13751 }
13752 }
13753 netif_addr_unlock_bh(dev);
13754
13755 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13756 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13757 if (rc) {
13758 if (BNXT_VF(bp) && rc == -ENODEV) {
13759 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13760 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13761 else
13762 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13763 rc = 0;
13764 } else {
13765 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13766 }
13767 vnic->uc_filter_count = i;
13768 return rc;
13769 }
13770 }
13771 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13772 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13773
13774 skip_uc:
13775 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13776 !bnxt_promisc_ok(bp))
13777 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13778 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13779 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13780 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13781 rc);
13782 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13783 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13784 vnic->mc_list_count = 0;
13785 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13786 }
13787 if (rc)
13788 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13789 rc);
13790
13791 return rc;
13792 }
13793
bnxt_can_reserve_rings(struct bnxt * bp)13794 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13795 {
13796 #ifdef CONFIG_BNXT_SRIOV
13797 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13798 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13799
13800 /* No minimum rings were provisioned by the PF. Don't
13801 * reserve rings by default when device is down.
13802 */
13803 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13804 return true;
13805
13806 if (!netif_running(bp->dev))
13807 return false;
13808 }
13809 #endif
13810 return true;
13811 }
13812
13813 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13814 static bool bnxt_rfs_supported(struct bnxt *bp)
13815 {
13816 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13817 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13818 return true;
13819 return false;
13820 }
13821 /* 212 firmware is broken for aRFS */
13822 if (BNXT_FW_MAJ(bp) == 212)
13823 return false;
13824 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13825 return true;
13826 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13827 return true;
13828 return false;
13829 }
13830
13831 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13832 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13833 {
13834 struct bnxt_hw_rings hwr = {0};
13835 int max_vnics, max_rss_ctxs;
13836
13837 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13838 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13839 return bnxt_rfs_supported(bp);
13840
13841 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13842 return false;
13843
13844 hwr.grp = bp->rx_nr_rings;
13845 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13846 if (new_rss_ctx)
13847 hwr.vnic++;
13848 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13849 max_vnics = bnxt_get_max_func_vnics(bp);
13850 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13851
13852 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13853 if (bp->rx_nr_rings > 1)
13854 netdev_warn(bp->dev,
13855 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13856 min(max_rss_ctxs - 1, max_vnics - 1));
13857 return false;
13858 }
13859
13860 if (!BNXT_NEW_RM(bp))
13861 return true;
13862
13863 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13864 * issue that will mess up the default VNIC if we reduce the
13865 * reservations.
13866 */
13867 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13868 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13869 return true;
13870
13871 bnxt_hwrm_reserve_rings(bp, &hwr);
13872 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13873 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13874 return true;
13875
13876 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13877 hwr.vnic = 1;
13878 hwr.rss_ctx = 0;
13879 bnxt_hwrm_reserve_rings(bp, &hwr);
13880 return false;
13881 }
13882
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13883 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13884 netdev_features_t features)
13885 {
13886 struct bnxt *bp = netdev_priv(dev);
13887 netdev_features_t vlan_features;
13888
13889 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13890 features &= ~NETIF_F_NTUPLE;
13891
13892 if ((features & NETIF_F_GSO_UDP_L4) &&
13893 !(bp->flags & BNXT_FLAG_UDP_GSO_CAP) &&
13894 bp->tx_ring_size < 2 * BNXT_SW_USO_MAX_DESCS)
13895 features &= ~NETIF_F_GSO_UDP_L4;
13896
13897 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13898 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13899
13900 if (!(features & NETIF_F_GRO))
13901 features &= ~NETIF_F_GRO_HW;
13902
13903 if (features & NETIF_F_GRO_HW)
13904 features &= ~NETIF_F_LRO;
13905
13906 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13907 * turned on or off together.
13908 */
13909 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13910 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13911 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13912 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13913 else if (vlan_features)
13914 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13915 }
13916 #ifdef CONFIG_BNXT_SRIOV
13917 if (BNXT_VF(bp) && bp->vf.vlan)
13918 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13919 #endif
13920 return features;
13921 }
13922
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13923 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13924 bool link_re_init, u32 flags, bool update_tpa)
13925 {
13926 bnxt_close_nic(bp, irq_re_init, link_re_init);
13927 bp->flags = flags;
13928 if (update_tpa)
13929 bnxt_set_ring_params(bp);
13930 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13931 }
13932
bnxt_set_features(struct net_device * dev,netdev_features_t features)13933 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13934 {
13935 bool update_tpa = false, update_ntuple = false;
13936 struct bnxt *bp = netdev_priv(dev);
13937 u32 flags = bp->flags;
13938 u32 changes;
13939 int rc = 0;
13940 bool re_init = false;
13941
13942 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
13943 bnxt_min_tx_desc_cnt(bp, features));
13944
13945 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13946 if (features & NETIF_F_GRO_HW)
13947 flags |= BNXT_FLAG_GRO;
13948 else if (features & NETIF_F_LRO)
13949 flags |= BNXT_FLAG_LRO;
13950
13951 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13952 flags &= ~BNXT_FLAG_TPA;
13953
13954 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13955 flags |= BNXT_FLAG_STRIP_VLAN;
13956
13957 if (features & NETIF_F_NTUPLE)
13958 flags |= BNXT_FLAG_RFS;
13959 else
13960 bnxt_clear_usr_fltrs(bp, true);
13961
13962 changes = flags ^ bp->flags;
13963 if (changes & BNXT_FLAG_TPA) {
13964 update_tpa = true;
13965 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13966 (flags & BNXT_FLAG_TPA) == 0 ||
13967 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13968 re_init = true;
13969 }
13970
13971 if (changes & ~BNXT_FLAG_TPA)
13972 re_init = true;
13973
13974 if (changes & BNXT_FLAG_RFS)
13975 update_ntuple = true;
13976
13977 if (flags != bp->flags) {
13978 u32 old_flags = bp->flags;
13979
13980 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13981 bp->flags = flags;
13982 if (update_tpa)
13983 bnxt_set_ring_params(bp);
13984 return rc;
13985 }
13986
13987 if (update_ntuple)
13988 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13989
13990 if (re_init)
13991 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13992
13993 if (update_tpa) {
13994 bp->flags = flags;
13995 rc = bnxt_set_tpa(bp,
13996 (flags & BNXT_FLAG_TPA) ?
13997 true : false);
13998 if (rc)
13999 bp->flags = old_flags;
14000 }
14001 }
14002 return rc;
14003 }
14004
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)14005 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
14006 u8 **nextp)
14007 {
14008 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
14009 int hdr_count = 0;
14010 u8 *nexthdr;
14011 int start;
14012
14013 /* Check that there are at most 2 IPv6 extension headers, no
14014 * fragment header, and each is <= 64 bytes.
14015 */
14016 start = nw_off + sizeof(*ip6h);
14017 nexthdr = &ip6h->nexthdr;
14018 while (ipv6_ext_hdr(*nexthdr)) {
14019 struct ipv6_opt_hdr *hp;
14020 int hdrlen;
14021
14022 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
14023 *nexthdr == NEXTHDR_FRAGMENT)
14024 return false;
14025 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
14026 skb_headlen(skb), NULL);
14027 if (!hp)
14028 return false;
14029 if (*nexthdr == NEXTHDR_AUTH)
14030 hdrlen = ipv6_authlen(hp);
14031 else
14032 hdrlen = ipv6_optlen(hp);
14033
14034 if (hdrlen > 64)
14035 return false;
14036
14037 hdr_count++;
14038 nexthdr = &hp->nexthdr;
14039 start += hdrlen;
14040 }
14041 if (nextp) {
14042 /* Caller will check inner protocol */
14043 if (skb->encapsulation) {
14044 *nextp = nexthdr;
14045 return true;
14046 }
14047 *nextp = NULL;
14048 }
14049 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
14050 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
14051 }
14052
14053 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)14054 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
14055 {
14056 struct udphdr *uh = udp_hdr(skb);
14057 __be16 udp_port = uh->dest;
14058
14059 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
14060 udp_port != bp->vxlan_gpe_port)
14061 return false;
14062 if (skb->inner_protocol == htons(ETH_P_TEB)) {
14063 struct ethhdr *eh = inner_eth_hdr(skb);
14064
14065 switch (eh->h_proto) {
14066 case htons(ETH_P_IP):
14067 return true;
14068 case htons(ETH_P_IPV6):
14069 return bnxt_exthdr_check(bp, skb,
14070 skb_inner_network_offset(skb),
14071 NULL);
14072 }
14073 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
14074 return true;
14075 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
14076 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
14077 NULL);
14078 }
14079 return false;
14080 }
14081
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)14082 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
14083 {
14084 switch (l4_proto) {
14085 case IPPROTO_UDP:
14086 return bnxt_udp_tunl_check(bp, skb);
14087 case IPPROTO_IPIP:
14088 return true;
14089 case IPPROTO_GRE: {
14090 switch (skb->inner_protocol) {
14091 default:
14092 return false;
14093 case htons(ETH_P_IP):
14094 return true;
14095 case htons(ETH_P_IPV6):
14096 fallthrough;
14097 }
14098 }
14099 case IPPROTO_IPV6:
14100 /* Check ext headers of inner ipv6 */
14101 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
14102 NULL);
14103 }
14104 return false;
14105 }
14106
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)14107 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
14108 struct net_device *dev,
14109 netdev_features_t features)
14110 {
14111 struct bnxt *bp = netdev_priv(dev);
14112 u8 *l4_proto;
14113
14114 features = vlan_features_check(skb, features);
14115 switch (vlan_get_protocol(skb)) {
14116 case htons(ETH_P_IP):
14117 if (!skb->encapsulation)
14118 return features;
14119 l4_proto = &ip_hdr(skb)->protocol;
14120 if (bnxt_tunl_check(bp, skb, *l4_proto))
14121 return features;
14122 break;
14123 case htons(ETH_P_IPV6):
14124 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
14125 &l4_proto))
14126 break;
14127 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
14128 return features;
14129 break;
14130 }
14131 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
14132 }
14133
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)14134 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
14135 u32 *reg_buf)
14136 {
14137 struct hwrm_dbg_read_direct_output *resp;
14138 struct hwrm_dbg_read_direct_input *req;
14139 __le32 *dbg_reg_buf;
14140 dma_addr_t mapping;
14141 int rc, i;
14142
14143 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
14144 if (rc)
14145 return rc;
14146
14147 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
14148 &mapping);
14149 if (!dbg_reg_buf) {
14150 rc = -ENOMEM;
14151 goto dbg_rd_reg_exit;
14152 }
14153
14154 req->host_dest_addr = cpu_to_le64(mapping);
14155
14156 resp = hwrm_req_hold(bp, req);
14157 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
14158 req->read_len32 = cpu_to_le32(num_words);
14159
14160 rc = hwrm_req_send(bp, req);
14161 if (rc || resp->error_code) {
14162 rc = -EIO;
14163 goto dbg_rd_reg_exit;
14164 }
14165 for (i = 0; i < num_words; i++)
14166 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
14167
14168 dbg_rd_reg_exit:
14169 hwrm_req_drop(bp, req);
14170 return rc;
14171 }
14172
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)14173 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
14174 u32 ring_id, u32 *prod, u32 *cons)
14175 {
14176 struct hwrm_dbg_ring_info_get_output *resp;
14177 struct hwrm_dbg_ring_info_get_input *req;
14178 int rc;
14179
14180 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
14181 if (rc)
14182 return rc;
14183
14184 req->ring_type = ring_type;
14185 req->fw_ring_id = cpu_to_le32(ring_id);
14186 resp = hwrm_req_hold(bp, req);
14187 rc = hwrm_req_send(bp, req);
14188 if (!rc) {
14189 *prod = le32_to_cpu(resp->producer_index);
14190 *cons = le32_to_cpu(resp->consumer_index);
14191 }
14192 hwrm_req_drop(bp, req);
14193 return rc;
14194 }
14195
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)14196 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
14197 {
14198 struct bnxt_tx_ring_info *txr;
14199 int i = bnapi->index, j;
14200
14201 bnxt_for_each_napi_tx(j, bnapi, txr)
14202 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14203 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14204 txr->tx_cons);
14205 }
14206
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)14207 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14208 {
14209 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14210 int i = bnapi->index;
14211
14212 if (!rxr)
14213 return;
14214
14215 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14216 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14217 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14218 rxr->rx_sw_agg_prod);
14219 }
14220
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)14221 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14222 {
14223 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
14224 int i = bnapi->index, j;
14225
14226 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14227 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14228 for (j = 0; j < cpr->cp_ring_count; j++) {
14229 cpr2 = &cpr->cp_ring_arr[j];
14230 if (!cpr2->bnapi)
14231 continue;
14232 netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
14233 i, j, cpr2->cp_ring_struct.fw_ring_id,
14234 cpr2->cp_raw_cons);
14235 }
14236 }
14237
bnxt_dbg_dump_states(struct bnxt * bp)14238 static void bnxt_dbg_dump_states(struct bnxt *bp)
14239 {
14240 int i;
14241 struct bnxt_napi *bnapi;
14242
14243 for (i = 0; i < bp->cp_nr_rings; i++) {
14244 bnapi = bp->bnapi[i];
14245 if (netif_msg_drv(bp)) {
14246 bnxt_dump_tx_sw_state(bnapi);
14247 bnxt_dump_rx_sw_state(bnapi);
14248 bnxt_dump_cp_sw_state(bnapi);
14249 }
14250 }
14251 }
14252
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)14253 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14254 {
14255 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14256 struct hwrm_ring_reset_input *req;
14257 struct bnxt_napi *bnapi = rxr->bnapi;
14258 struct bnxt_cp_ring_info *cpr;
14259 u16 cp_ring_id;
14260 int rc;
14261
14262 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14263 if (rc)
14264 return rc;
14265
14266 cpr = &bnapi->cp_ring;
14267 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14268 req->cmpl_ring = cpu_to_le16(cp_ring_id);
14269 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14270 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14271 return hwrm_req_send_silent(bp, req);
14272 }
14273
bnxt_reset_task(struct bnxt * bp,bool silent)14274 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14275 {
14276 if (!silent)
14277 bnxt_dbg_dump_states(bp);
14278 if (netif_running(bp->dev)) {
14279 bnxt_close_nic(bp, !silent, false);
14280 bnxt_open_nic(bp, !silent, false);
14281 }
14282 }
14283
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)14284 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14285 {
14286 struct bnxt *bp = netdev_priv(dev);
14287
14288 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
14289 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14290 }
14291
bnxt_fw_health_check(struct bnxt * bp)14292 static void bnxt_fw_health_check(struct bnxt *bp)
14293 {
14294 struct bnxt_fw_health *fw_health = bp->fw_health;
14295 struct pci_dev *pdev = bp->pdev;
14296 u32 val;
14297
14298 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14299 return;
14300
14301 /* Make sure it is enabled before checking the tmr_counter. */
14302 smp_rmb();
14303 if (fw_health->tmr_counter) {
14304 fw_health->tmr_counter--;
14305 return;
14306 }
14307
14308 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14309 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14310 fw_health->arrests++;
14311 goto fw_reset;
14312 }
14313
14314 fw_health->last_fw_heartbeat = val;
14315
14316 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14317 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14318 fw_health->discoveries++;
14319 goto fw_reset;
14320 }
14321
14322 fw_health->tmr_counter = fw_health->tmr_multiplier;
14323 return;
14324
14325 fw_reset:
14326 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14327 }
14328
bnxt_timer(struct timer_list * t)14329 static void bnxt_timer(struct timer_list *t)
14330 {
14331 struct bnxt *bp = timer_container_of(bp, t, timer);
14332 struct net_device *dev = bp->dev;
14333
14334 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14335 return;
14336
14337 if (atomic_read(&bp->intr_sem) != 0)
14338 goto bnxt_restart_timer;
14339
14340 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14341 bnxt_fw_health_check(bp);
14342
14343 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14344 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14345
14346 if (bnxt_tc_flower_enabled(bp))
14347 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14348
14349 #ifdef CONFIG_RFS_ACCEL
14350 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14351 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14352 #endif /*CONFIG_RFS_ACCEL*/
14353
14354 if (bp->link_info.phy_retry) {
14355 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14356 bp->link_info.phy_retry = false;
14357 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14358 } else {
14359 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14360 }
14361 }
14362
14363 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14364 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14365
14366 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14367 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14368
14369 bnxt_restart_timer:
14370 mod_timer(&bp->timer, jiffies + bp->current_interval);
14371 }
14372
bnxt_lock_sp(struct bnxt * bp)14373 static void bnxt_lock_sp(struct bnxt *bp)
14374 {
14375 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14376 * set. If the device is being closed, bnxt_close() may be holding
14377 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14378 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14379 * instance lock.
14380 */
14381 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14382 netdev_lock(bp->dev);
14383 }
14384
bnxt_unlock_sp(struct bnxt * bp)14385 static void bnxt_unlock_sp(struct bnxt *bp)
14386 {
14387 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14388 netdev_unlock(bp->dev);
14389 }
14390
14391 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)14392 static void bnxt_reset(struct bnxt *bp, bool silent)
14393 {
14394 bnxt_lock_sp(bp);
14395 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14396 bnxt_reset_task(bp, silent);
14397 bnxt_unlock_sp(bp);
14398 }
14399
14400 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)14401 static void bnxt_rx_ring_reset(struct bnxt *bp)
14402 {
14403 int i;
14404
14405 bnxt_lock_sp(bp);
14406 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14407 bnxt_unlock_sp(bp);
14408 return;
14409 }
14410 /* Disable and flush TPA before resetting the RX ring */
14411 if (bp->flags & BNXT_FLAG_TPA)
14412 bnxt_set_tpa(bp, false);
14413 for (i = 0; i < bp->rx_nr_rings; i++) {
14414 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14415 struct bnxt_cp_ring_info *cpr;
14416 int rc;
14417
14418 if (!rxr->bnapi->in_reset)
14419 continue;
14420
14421 rc = bnxt_hwrm_rx_ring_reset(bp, i);
14422 if (rc) {
14423 if (rc == -EINVAL || rc == -EOPNOTSUPP)
14424 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14425 else
14426 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14427 rc);
14428 bnxt_reset_task(bp, true);
14429 break;
14430 }
14431 bnxt_free_one_rx_ring_skbs(bp, rxr);
14432 rxr->rx_prod = 0;
14433 rxr->rx_agg_prod = 0;
14434 rxr->rx_sw_agg_prod = 0;
14435 rxr->rx_next_cons = 0;
14436 rxr->bnapi->in_reset = false;
14437 bnxt_alloc_one_rx_ring(bp, i);
14438 cpr = &rxr->bnapi->cp_ring;
14439 cpr->sw_stats->rx.rx_resets++;
14440 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14441 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14442 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14443 }
14444 if (bp->flags & BNXT_FLAG_TPA)
14445 bnxt_set_tpa(bp, true);
14446 bnxt_unlock_sp(bp);
14447 }
14448
bnxt_fw_fatal_close(struct bnxt * bp)14449 static void bnxt_fw_fatal_close(struct bnxt *bp)
14450 {
14451 bnxt_tx_disable(bp);
14452 bnxt_disable_napi(bp);
14453 bnxt_disable_int_sync(bp);
14454 bnxt_free_irq(bp);
14455 bnxt_clear_int_mode(bp);
14456 pci_disable_device(bp->pdev);
14457 }
14458
bnxt_fw_reset_close(struct bnxt * bp)14459 static void bnxt_fw_reset_close(struct bnxt *bp)
14460 {
14461 /* When firmware is in fatal state, quiesce device and disable
14462 * bus master to prevent any potential bad DMAs before freeing
14463 * kernel memory.
14464 */
14465 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14466 u16 val = 0;
14467
14468 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14469 if (val == 0xffff)
14470 bp->fw_reset_min_dsecs = 0;
14471 bnxt_fw_fatal_close(bp);
14472 }
14473 __bnxt_close_nic(bp, true, false);
14474 bnxt_vf_reps_free(bp);
14475 bnxt_clear_int_mode(bp);
14476 bnxt_hwrm_func_drv_unrgtr(bp);
14477 if (pci_is_enabled(bp->pdev))
14478 pci_disable_device(bp->pdev);
14479 bnxt_free_ctx_mem(bp, false);
14480 }
14481
is_bnxt_fw_ok(struct bnxt * bp)14482 static bool is_bnxt_fw_ok(struct bnxt *bp)
14483 {
14484 struct bnxt_fw_health *fw_health = bp->fw_health;
14485 bool no_heartbeat = false, has_reset = false;
14486 u32 val;
14487
14488 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14489 if (val == fw_health->last_fw_heartbeat)
14490 no_heartbeat = true;
14491
14492 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14493 if (val != fw_health->last_fw_reset_cnt)
14494 has_reset = true;
14495
14496 if (!no_heartbeat && has_reset)
14497 return true;
14498
14499 return false;
14500 }
14501
14502 /* netdev instance lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)14503 static void bnxt_force_fw_reset(struct bnxt *bp)
14504 {
14505 struct bnxt_fw_health *fw_health = bp->fw_health;
14506 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14507 u32 wait_dsecs;
14508
14509 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14510 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14511 return;
14512
14513 /* we have to serialize with bnxt_refclk_read()*/
14514 if (ptp) {
14515 unsigned long flags;
14516
14517 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14518 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14519 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14520 } else {
14521 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14522 }
14523 bnxt_fw_reset_close(bp);
14524 wait_dsecs = fw_health->master_func_wait_dsecs;
14525 if (fw_health->primary) {
14526 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14527 wait_dsecs = 0;
14528 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14529 } else {
14530 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14531 wait_dsecs = fw_health->normal_func_wait_dsecs;
14532 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14533 }
14534
14535 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14536 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14537 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14538 }
14539
bnxt_fw_exception(struct bnxt * bp)14540 void bnxt_fw_exception(struct bnxt *bp)
14541 {
14542 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14543 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14544 bnxt_ulp_stop(bp);
14545 bnxt_lock_sp(bp);
14546 bnxt_force_fw_reset(bp);
14547 bnxt_unlock_sp(bp);
14548 }
14549
14550 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14551 * < 0 on error.
14552 */
bnxt_get_registered_vfs(struct bnxt * bp)14553 static int bnxt_get_registered_vfs(struct bnxt *bp)
14554 {
14555 #ifdef CONFIG_BNXT_SRIOV
14556 int rc;
14557
14558 if (!BNXT_PF(bp))
14559 return 0;
14560
14561 rc = bnxt_hwrm_func_qcfg(bp);
14562 if (rc) {
14563 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14564 return rc;
14565 }
14566 if (bp->pf.registered_vfs)
14567 return bp->pf.registered_vfs;
14568 if (bp->sriov_cfg)
14569 return 1;
14570 #endif
14571 return 0;
14572 }
14573
bnxt_fw_reset(struct bnxt * bp)14574 void bnxt_fw_reset(struct bnxt *bp)
14575 {
14576 bnxt_ulp_stop(bp);
14577 bnxt_lock_sp(bp);
14578 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14579 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14580 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14581 int n = 0, tmo;
14582
14583 /* we have to serialize with bnxt_refclk_read()*/
14584 if (ptp) {
14585 unsigned long flags;
14586
14587 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14588 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14589 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14590 } else {
14591 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14592 }
14593 if (bp->pf.active_vfs &&
14594 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14595 n = bnxt_get_registered_vfs(bp);
14596 if (n < 0) {
14597 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14598 n);
14599 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14600 netif_close(bp->dev);
14601 goto fw_reset_exit;
14602 } else if (n > 0) {
14603 u16 vf_tmo_dsecs = n * 10;
14604
14605 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14606 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14607 bp->fw_reset_state =
14608 BNXT_FW_RESET_STATE_POLL_VF;
14609 bnxt_queue_fw_reset_work(bp, HZ / 10);
14610 goto fw_reset_exit;
14611 }
14612 bnxt_fw_reset_close(bp);
14613 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14614 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14615 tmo = HZ / 10;
14616 } else {
14617 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14618 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14619 }
14620 bnxt_queue_fw_reset_work(bp, tmo);
14621 }
14622 fw_reset_exit:
14623 bnxt_unlock_sp(bp);
14624 }
14625
bnxt_chk_missed_irq(struct bnxt * bp)14626 static void bnxt_chk_missed_irq(struct bnxt *bp)
14627 {
14628 int i;
14629
14630 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14631 return;
14632
14633 for (i = 0; i < bp->cp_nr_rings; i++) {
14634 struct bnxt_napi *bnapi = bp->bnapi[i];
14635 struct bnxt_cp_ring_info *cpr;
14636 u32 fw_ring_id;
14637 int j;
14638
14639 if (!bnapi)
14640 continue;
14641
14642 cpr = &bnapi->cp_ring;
14643 for (j = 0; j < cpr->cp_ring_count; j++) {
14644 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14645 u32 val[2];
14646
14647 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14648 continue;
14649
14650 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14651 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14652 continue;
14653 }
14654 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14655 bnxt_dbg_hwrm_ring_info_get(bp,
14656 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14657 fw_ring_id, &val[0], &val[1]);
14658 cpr->sw_stats->cmn.missed_irqs++;
14659 }
14660 }
14661 }
14662
14663 static void bnxt_cfg_ntp_filters(struct bnxt *);
14664
bnxt_init_ethtool_link_settings(struct bnxt * bp)14665 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14666 {
14667 struct bnxt_link_info *link_info = &bp->link_info;
14668
14669 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14670 link_info->autoneg = BNXT_AUTONEG_SPEED;
14671 if (bp->hwrm_spec_code >= 0x10201) {
14672 if (link_info->auto_pause_setting &
14673 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14674 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14675 } else {
14676 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14677 }
14678 bnxt_set_auto_speed(link_info);
14679 } else {
14680 bnxt_set_force_speed(link_info);
14681 link_info->req_duplex = link_info->duplex_setting;
14682 }
14683 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14684 link_info->req_flow_ctrl =
14685 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14686 else
14687 link_info->req_flow_ctrl = link_info->force_pause_setting;
14688 }
14689
bnxt_fw_echo_reply(struct bnxt * bp)14690 static void bnxt_fw_echo_reply(struct bnxt *bp)
14691 {
14692 struct bnxt_fw_health *fw_health = bp->fw_health;
14693 struct hwrm_func_echo_response_input *req;
14694 int rc;
14695
14696 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14697 if (rc)
14698 return;
14699 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14700 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14701 hwrm_req_send(bp, req);
14702 }
14703
bnxt_ulp_restart(struct bnxt * bp)14704 static void bnxt_ulp_restart(struct bnxt *bp)
14705 {
14706 bnxt_ulp_stop(bp);
14707 bnxt_ulp_start(bp);
14708 }
14709
bnxt_sp_task(struct work_struct * work)14710 static void bnxt_sp_task(struct work_struct *work)
14711 {
14712 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14713 struct net_device *dev = bp->dev;
14714
14715 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14716 smp_mb__after_atomic();
14717 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14718 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14719 return;
14720 }
14721
14722 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14723 bnxt_ulp_restart(bp);
14724 bnxt_reenable_sriov(bp);
14725 }
14726
14727 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14728 bnxt_cfg_ntp_filters(bp);
14729 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14730 bnxt_hwrm_exec_fwd_req(bp);
14731 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14732 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14733 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14734 bnxt_hwrm_port_qstats(bp, 0);
14735 bnxt_hwrm_port_qstats_ext(bp, 0);
14736 bnxt_accumulate_all_stats(bp);
14737 }
14738
14739 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14740 int rc;
14741
14742 mutex_lock(&bp->link_lock);
14743 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14744 &bp->sp_event))
14745 bnxt_hwrm_phy_qcaps(bp);
14746
14747 rc = bnxt_update_link(bp, true);
14748 if (rc)
14749 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14750 rc);
14751
14752 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14753 &bp->sp_event))
14754 bnxt_init_ethtool_link_settings(bp);
14755 mutex_unlock(&bp->link_lock);
14756 }
14757 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14758 int rc;
14759
14760 mutex_lock(&bp->link_lock);
14761 rc = bnxt_update_phy_setting(bp);
14762 mutex_unlock(&bp->link_lock);
14763 if (rc) {
14764 netdev_warn(bp->dev, "update phy settings retry failed\n");
14765 } else {
14766 bp->link_info.phy_retry = false;
14767 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14768 }
14769 }
14770 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14771 mutex_lock(&bp->link_lock);
14772 bnxt_get_port_module_status(bp);
14773 mutex_unlock(&bp->link_lock);
14774 }
14775
14776 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14777 bnxt_tc_flow_stats_work(bp);
14778
14779 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14780 bnxt_chk_missed_irq(bp);
14781
14782 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14783 bnxt_fw_echo_reply(bp);
14784
14785 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14786 bnxt_hwmon_notify_event(bp);
14787
14788 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14789 * must be the last functions to be called before exiting.
14790 */
14791 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) {
14792 bnxt_lock_sp(bp);
14793 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14794 bnxt_cfg_rx_mode(bp, &dev->uc, true);
14795 bnxt_unlock_sp(bp);
14796 }
14797
14798 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14799 bnxt_reset(bp, false);
14800
14801 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14802 bnxt_reset(bp, true);
14803
14804 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14805 bnxt_rx_ring_reset(bp);
14806
14807 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14808 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14809 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14810 bnxt_devlink_health_fw_report(bp);
14811 else
14812 bnxt_fw_reset(bp);
14813 }
14814
14815 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14816 if (!is_bnxt_fw_ok(bp))
14817 bnxt_devlink_health_fw_report(bp);
14818 }
14819
14820 smp_mb__before_atomic();
14821 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14822 }
14823
14824 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14825 int *max_cp);
14826
14827 /* Under netdev instance lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14828 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14829 int tx_xdp)
14830 {
14831 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14832 struct bnxt_hw_rings hwr = {0};
14833 int rx_rings = rx;
14834 int rc;
14835
14836 if (tcs)
14837 tx_sets = tcs;
14838
14839 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14840
14841 if (max_rx < rx_rings)
14842 return -ENOMEM;
14843
14844 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14845 rx_rings <<= 1;
14846
14847 hwr.rx = rx_rings;
14848 hwr.tx = tx * tx_sets + tx_xdp;
14849 if (max_tx < hwr.tx)
14850 return -ENOMEM;
14851
14852 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14853
14854 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14855 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14856 if (max_cp < hwr.cp)
14857 return -ENOMEM;
14858 hwr.stat = hwr.cp;
14859 if (BNXT_NEW_RM(bp)) {
14860 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14861 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14862 hwr.grp = rx;
14863 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14864 }
14865 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14866 hwr.cp_p5 = hwr.tx + rx;
14867 rc = bnxt_hwrm_check_rings(bp, &hwr);
14868 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14869 if (!bnxt_ulp_registered(bp->edev[BNXT_AUXDEV_RDMA])) {
14870 hwr.cp += bnxt_get_ulp_msix_num(bp);
14871 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14872 }
14873 if (hwr.cp > bp->total_irqs) {
14874 int total_msix = bnxt_change_msix(bp, hwr.cp);
14875
14876 if (total_msix < hwr.cp) {
14877 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14878 hwr.cp, total_msix);
14879 rc = -ENOSPC;
14880 }
14881 }
14882 }
14883 return rc;
14884 }
14885
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14886 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14887 {
14888 if (bp->bar2) {
14889 pci_iounmap(pdev, bp->bar2);
14890 bp->bar2 = NULL;
14891 }
14892
14893 if (bp->bar1) {
14894 pci_iounmap(pdev, bp->bar1);
14895 bp->bar1 = NULL;
14896 }
14897
14898 if (bp->bar0) {
14899 pci_iounmap(pdev, bp->bar0);
14900 bp->bar0 = NULL;
14901 }
14902 }
14903
bnxt_cleanup_pci(struct bnxt * bp)14904 static void bnxt_cleanup_pci(struct bnxt *bp)
14905 {
14906 bnxt_unmap_bars(bp, bp->pdev);
14907 pci_release_regions(bp->pdev);
14908 if (pci_is_enabled(bp->pdev))
14909 pci_disable_device(bp->pdev);
14910 }
14911
bnxt_init_dflt_coal(struct bnxt * bp)14912 static void bnxt_init_dflt_coal(struct bnxt *bp)
14913 {
14914 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14915 struct bnxt_coal *coal;
14916 u16 flags = 0;
14917
14918 if (coal_cap->cmpl_params &
14919 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14920 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14921
14922 /* Tick values in micro seconds.
14923 * 1 coal_buf x bufs_per_record = 1 completion record.
14924 */
14925 coal = &bp->rx_coal;
14926 coal->coal_ticks = 10;
14927 coal->coal_bufs = 30;
14928 coal->coal_ticks_irq = 1;
14929 coal->coal_bufs_irq = 2;
14930 coal->idle_thresh = 50;
14931 coal->bufs_per_record = 2;
14932 coal->budget = 64; /* NAPI budget */
14933 coal->flags = flags;
14934
14935 coal = &bp->tx_coal;
14936 coal->coal_ticks = 28;
14937 coal->coal_bufs = 30;
14938 coal->coal_ticks_irq = 2;
14939 coal->coal_bufs_irq = 2;
14940 coal->bufs_per_record = 1;
14941 coal->flags = flags;
14942
14943 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14944 }
14945
14946 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14947 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14948 {
14949 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14950
14951 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14952 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14953 return true;
14954 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14955 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14956 return true;
14957 return false;
14958 }
14959
bnxt_hwrm_pfcwd_qcaps(struct bnxt * bp)14960 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14961 {
14962 struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14963 struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14964 int rc;
14965
14966 bp->max_pfcwd_tmo_ms = 0;
14967 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14968 if (rc)
14969 return;
14970 resp = hwrm_req_hold(bp, req);
14971 rc = hwrm_req_send_silent(bp, req);
14972 if (!rc)
14973 bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14974 hwrm_req_drop(bp, req);
14975 }
14976
bnxt_fw_init_one_p1(struct bnxt * bp)14977 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14978 {
14979 int rc;
14980
14981 bp->fw_cap = 0;
14982 rc = bnxt_hwrm_ver_get(bp);
14983 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14984 * so wait before continuing with recovery.
14985 */
14986 if (rc)
14987 msleep(100);
14988 bnxt_try_map_fw_health_reg(bp);
14989 if (rc) {
14990 rc = bnxt_try_recover_fw(bp);
14991 if (rc)
14992 return rc;
14993 rc = bnxt_hwrm_ver_get(bp);
14994 if (rc)
14995 return rc;
14996 }
14997
14998 bnxt_nvm_cfg_ver_get(bp);
14999
15000 rc = bnxt_hwrm_func_reset(bp);
15001 if (rc)
15002 return -ENODEV;
15003
15004 bnxt_hwrm_fw_set_time(bp);
15005 return 0;
15006 }
15007
bnxt_fw_init_one_p2(struct bnxt * bp)15008 static int bnxt_fw_init_one_p2(struct bnxt *bp)
15009 {
15010 int rc;
15011
15012 /* Get the MAX capabilities for this function */
15013 rc = bnxt_hwrm_func_qcaps(bp);
15014 if (rc) {
15015 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
15016 rc);
15017 return -ENODEV;
15018 }
15019
15020 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
15021 if (rc)
15022 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
15023 rc);
15024
15025 if (bnxt_alloc_fw_health(bp)) {
15026 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
15027 } else {
15028 rc = bnxt_hwrm_error_recovery_qcfg(bp);
15029 if (rc)
15030 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
15031 rc);
15032 }
15033
15034 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
15035 if (rc)
15036 return -ENODEV;
15037
15038 rc = bnxt_alloc_crash_dump_mem(bp);
15039 if (rc)
15040 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
15041 rc);
15042 if (!rc) {
15043 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
15044 if (rc) {
15045 bnxt_free_crash_dump_mem(bp);
15046 netdev_warn(bp->dev,
15047 "hwrm crash dump mem failure rc: %d\n", rc);
15048 }
15049 }
15050
15051 if (bnxt_fw_pre_resv_vnics(bp))
15052 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
15053
15054 bnxt_hwrm_pfcwd_qcaps(bp);
15055 bnxt_hwrm_func_qcfg(bp);
15056 bnxt_hwrm_vnic_qcaps(bp);
15057 bnxt_hwrm_port_led_qcaps(bp);
15058 bnxt_ethtool_init(bp);
15059 if (bp->fw_cap & BNXT_FW_CAP_PTP)
15060 __bnxt_hwrm_ptp_qcfg(bp);
15061 bnxt_dcb_init(bp);
15062 bnxt_hwmon_init(bp);
15063 return 0;
15064 }
15065
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)15066 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
15067 {
15068 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
15069 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
15070 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
15071 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
15072 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
15073 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
15074 bp->rss_hash_delta = bp->rss_hash_cfg;
15075 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
15076 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
15077 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
15078 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
15079 }
15080 }
15081
bnxt_set_dflt_rfs(struct bnxt * bp)15082 static void bnxt_set_dflt_rfs(struct bnxt *bp)
15083 {
15084 struct net_device *dev = bp->dev;
15085
15086 dev->hw_features &= ~NETIF_F_NTUPLE;
15087 dev->features &= ~NETIF_F_NTUPLE;
15088 bp->flags &= ~BNXT_FLAG_RFS;
15089 if (bnxt_rfs_supported(bp)) {
15090 dev->hw_features |= NETIF_F_NTUPLE;
15091 if (bnxt_rfs_capable(bp, false)) {
15092 bp->flags |= BNXT_FLAG_RFS;
15093 dev->features |= NETIF_F_NTUPLE;
15094 }
15095 }
15096 }
15097
bnxt_fw_init_one_p3(struct bnxt * bp)15098 static void bnxt_fw_init_one_p3(struct bnxt *bp)
15099 {
15100 struct pci_dev *pdev = bp->pdev;
15101
15102 bnxt_set_dflt_rss_hash_type(bp);
15103 bnxt_set_dflt_rfs(bp);
15104
15105 bnxt_get_wol_settings(bp);
15106 if (bp->flags & BNXT_FLAG_WOL_CAP)
15107 device_set_wakeup_enable(&pdev->dev, bp->wol);
15108 else
15109 device_set_wakeup_capable(&pdev->dev, false);
15110
15111 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
15112 bnxt_hwrm_coal_params_qcaps(bp);
15113 }
15114
15115 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
15116
bnxt_fw_init_one(struct bnxt * bp)15117 int bnxt_fw_init_one(struct bnxt *bp)
15118 {
15119 int rc;
15120
15121 rc = bnxt_fw_init_one_p1(bp);
15122 if (rc) {
15123 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
15124 return rc;
15125 }
15126 rc = bnxt_fw_init_one_p2(bp);
15127 if (rc) {
15128 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
15129 return rc;
15130 }
15131 rc = bnxt_probe_phy(bp, false);
15132 if (rc)
15133 return rc;
15134 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
15135 if (rc)
15136 return rc;
15137
15138 bnxt_fw_init_one_p3(bp);
15139 return 0;
15140 }
15141
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)15142 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
15143 {
15144 struct bnxt_fw_health *fw_health = bp->fw_health;
15145 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
15146 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
15147 u32 reg_type, reg_off, delay_msecs;
15148
15149 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
15150 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
15151 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
15152 switch (reg_type) {
15153 case BNXT_FW_HEALTH_REG_TYPE_CFG:
15154 pci_write_config_dword(bp->pdev, reg_off, val);
15155 break;
15156 case BNXT_FW_HEALTH_REG_TYPE_GRC:
15157 writel(reg_off & BNXT_GRC_BASE_MASK,
15158 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
15159 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
15160 fallthrough;
15161 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
15162 writel(val, bp->bar0 + reg_off);
15163 break;
15164 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
15165 writel(val, bp->bar1 + reg_off);
15166 break;
15167 }
15168 if (delay_msecs) {
15169 pci_read_config_dword(bp->pdev, 0, &val);
15170 msleep(delay_msecs);
15171 }
15172 }
15173
bnxt_hwrm_reset_permitted(struct bnxt * bp)15174 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
15175 {
15176 struct hwrm_func_qcfg_output *resp;
15177 struct hwrm_func_qcfg_input *req;
15178 bool result = true; /* firmware will enforce if unknown */
15179
15180 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
15181 return result;
15182
15183 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
15184 return result;
15185
15186 req->fid = cpu_to_le16(0xffff);
15187 resp = hwrm_req_hold(bp, req);
15188 if (!hwrm_req_send(bp, req))
15189 result = !!(le16_to_cpu(resp->flags) &
15190 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
15191 hwrm_req_drop(bp, req);
15192 return result;
15193 }
15194
bnxt_reset_all(struct bnxt * bp)15195 static void bnxt_reset_all(struct bnxt *bp)
15196 {
15197 struct bnxt_fw_health *fw_health = bp->fw_health;
15198 int i, rc;
15199
15200 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15201 bnxt_fw_reset_via_optee(bp);
15202 bp->fw_reset_timestamp = jiffies;
15203 return;
15204 }
15205
15206 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
15207 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
15208 bnxt_fw_reset_writel(bp, i);
15209 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
15210 struct hwrm_fw_reset_input *req;
15211
15212 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15213 if (!rc) {
15214 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15215 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15216 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15217 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15218 rc = hwrm_req_send(bp, req);
15219 }
15220 if (rc != -ENODEV)
15221 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15222 }
15223 bp->fw_reset_timestamp = jiffies;
15224 }
15225
bnxt_fw_reset_timeout(struct bnxt * bp)15226 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15227 {
15228 return time_after(jiffies, bp->fw_reset_timestamp +
15229 (bp->fw_reset_max_dsecs * HZ / 10));
15230 }
15231
bnxt_fw_reset_abort(struct bnxt * bp,int rc)15232 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15233 {
15234 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15235 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15236 bnxt_dl_health_fw_status_update(bp, false);
15237 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15238 netif_close(bp->dev);
15239 }
15240
bnxt_fw_reset_task(struct work_struct * work)15241 static void bnxt_fw_reset_task(struct work_struct *work)
15242 {
15243 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15244 int rc = 0;
15245
15246 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15247 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15248 return;
15249 }
15250
15251 switch (bp->fw_reset_state) {
15252 case BNXT_FW_RESET_STATE_POLL_VF: {
15253 int n = bnxt_get_registered_vfs(bp);
15254 int tmo;
15255
15256 if (n < 0) {
15257 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15258 n, jiffies_to_msecs(jiffies -
15259 bp->fw_reset_timestamp));
15260 goto fw_reset_abort;
15261 } else if (n > 0) {
15262 if (bnxt_fw_reset_timeout(bp)) {
15263 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15264 bp->fw_reset_state = 0;
15265 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15266 n);
15267 goto ulp_start;
15268 }
15269 bnxt_queue_fw_reset_work(bp, HZ / 10);
15270 return;
15271 }
15272 bp->fw_reset_timestamp = jiffies;
15273 netdev_lock(bp->dev);
15274 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15275 bnxt_fw_reset_abort(bp, rc);
15276 netdev_unlock(bp->dev);
15277 goto ulp_start;
15278 }
15279 bnxt_fw_reset_close(bp);
15280 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15281 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15282 tmo = HZ / 10;
15283 } else {
15284 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15285 tmo = bp->fw_reset_min_dsecs * HZ / 10;
15286 }
15287 netdev_unlock(bp->dev);
15288 bnxt_queue_fw_reset_work(bp, tmo);
15289 return;
15290 }
15291 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15292 u32 val;
15293
15294 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15295 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15296 !bnxt_fw_reset_timeout(bp)) {
15297 bnxt_queue_fw_reset_work(bp, HZ / 5);
15298 return;
15299 }
15300
15301 if (!bp->fw_health->primary) {
15302 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15303
15304 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15305 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15306 return;
15307 }
15308 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15309 }
15310 fallthrough;
15311 case BNXT_FW_RESET_STATE_RESET_FW:
15312 bnxt_reset_all(bp);
15313 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15314 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15315 return;
15316 case BNXT_FW_RESET_STATE_ENABLE_DEV:
15317 bnxt_inv_fw_health_reg(bp);
15318 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15319 !bp->fw_reset_min_dsecs) {
15320 u16 val;
15321
15322 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15323 if (val == 0xffff) {
15324 if (bnxt_fw_reset_timeout(bp)) {
15325 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15326 rc = -ETIMEDOUT;
15327 goto fw_reset_abort;
15328 }
15329 bnxt_queue_fw_reset_work(bp, HZ / 1000);
15330 return;
15331 }
15332 }
15333 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15334 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15335 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15336 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15337 bnxt_dl_remote_reload(bp);
15338 if (pci_enable_device(bp->pdev)) {
15339 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15340 rc = -ENODEV;
15341 goto fw_reset_abort;
15342 }
15343 pci_set_master(bp->pdev);
15344 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15345 fallthrough;
15346 case BNXT_FW_RESET_STATE_POLL_FW:
15347 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15348 rc = bnxt_hwrm_poll(bp);
15349 if (rc) {
15350 if (bnxt_fw_reset_timeout(bp)) {
15351 netdev_err(bp->dev, "Firmware reset aborted\n");
15352 goto fw_reset_abort_status;
15353 }
15354 bnxt_queue_fw_reset_work(bp, HZ / 5);
15355 return;
15356 }
15357 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15358 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15359 fallthrough;
15360 case BNXT_FW_RESET_STATE_OPENING:
15361 while (!netdev_trylock(bp->dev)) {
15362 bnxt_queue_fw_reset_work(bp, HZ / 10);
15363 return;
15364 }
15365 rc = bnxt_open(bp->dev);
15366 if (rc) {
15367 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15368 bnxt_fw_reset_abort(bp, rc);
15369 netdev_unlock(bp->dev);
15370 goto ulp_start;
15371 }
15372
15373 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15374 bp->fw_health->enabled) {
15375 bp->fw_health->last_fw_reset_cnt =
15376 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15377 }
15378 bp->fw_reset_state = 0;
15379 /* Make sure fw_reset_state is 0 before clearing the flag */
15380 smp_mb__before_atomic();
15381 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15382 bnxt_ptp_reapply_pps(bp);
15383 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15384 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15385 bnxt_dl_health_fw_recovery_done(bp);
15386 bnxt_dl_health_fw_status_update(bp, true);
15387 }
15388 netdev_unlock(bp->dev);
15389 bnxt_ulp_start(bp);
15390 bnxt_reenable_sriov(bp);
15391 netdev_lock(bp->dev);
15392 bnxt_vf_reps_alloc(bp);
15393 bnxt_vf_reps_open(bp);
15394 netdev_unlock(bp->dev);
15395 break;
15396 }
15397 return;
15398
15399 fw_reset_abort_status:
15400 if (bp->fw_health->status_reliable ||
15401 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15402 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15403
15404 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15405 }
15406 fw_reset_abort:
15407 netdev_lock(bp->dev);
15408 bnxt_fw_reset_abort(bp, rc);
15409 netdev_unlock(bp->dev);
15410 ulp_start:
15411 if (!rc)
15412 bnxt_ulp_start(bp);
15413 }
15414
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)15415 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15416 {
15417 int rc;
15418 struct bnxt *bp = netdev_priv(dev);
15419
15420 SET_NETDEV_DEV(dev, &pdev->dev);
15421
15422 /* enable device (incl. PCI PM wakeup), and bus-mastering */
15423 rc = pci_enable_device(pdev);
15424 if (rc) {
15425 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15426 goto init_err;
15427 }
15428
15429 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15430 dev_err(&pdev->dev,
15431 "Cannot find PCI device base address, aborting\n");
15432 rc = -ENODEV;
15433 goto init_err_disable;
15434 }
15435
15436 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15437 if (rc) {
15438 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15439 goto init_err_disable;
15440 }
15441
15442 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15443 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15444 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15445 rc = -EIO;
15446 goto init_err_release;
15447 }
15448
15449 pci_set_master(pdev);
15450
15451 bp->dev = dev;
15452 bp->pdev = pdev;
15453
15454 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15455 * determines the BAR size.
15456 */
15457 bp->bar0 = pci_ioremap_bar(pdev, 0);
15458 if (!bp->bar0) {
15459 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15460 rc = -ENOMEM;
15461 goto init_err_release;
15462 }
15463
15464 bp->bar2 = pci_ioremap_bar(pdev, 4);
15465 if (!bp->bar2) {
15466 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15467 rc = -ENOMEM;
15468 goto init_err_release;
15469 }
15470
15471 INIT_WORK(&bp->sp_task, bnxt_sp_task);
15472 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15473
15474 spin_lock_init(&bp->ntp_fltr_lock);
15475 #if BITS_PER_LONG == 32
15476 spin_lock_init(&bp->db_lock);
15477 #endif
15478
15479 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15480 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15481
15482 timer_setup(&bp->timer, bnxt_timer, 0);
15483 bp->current_interval = BNXT_TIMER_INTERVAL;
15484
15485 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15486 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15487
15488 clear_bit(BNXT_STATE_OPEN, &bp->state);
15489 return 0;
15490
15491 init_err_release:
15492 bnxt_unmap_bars(bp, pdev);
15493 pci_release_regions(pdev);
15494
15495 init_err_disable:
15496 pci_disable_device(pdev);
15497
15498 init_err:
15499 return rc;
15500 }
15501
bnxt_change_mac_addr(struct net_device * dev,void * p)15502 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15503 {
15504 struct sockaddr *addr = p;
15505 struct bnxt *bp = netdev_priv(dev);
15506 int rc = 0;
15507
15508 netdev_assert_locked(dev);
15509
15510 if (!is_valid_ether_addr(addr->sa_data))
15511 return -EADDRNOTAVAIL;
15512
15513 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15514 return 0;
15515
15516 rc = bnxt_approve_mac(bp, addr->sa_data, true);
15517 if (rc)
15518 return rc;
15519
15520 eth_hw_addr_set(dev, addr->sa_data);
15521 bnxt_clear_usr_fltrs(bp, true);
15522 if (netif_running(dev)) {
15523 bnxt_close_nic(bp, false, false);
15524 rc = bnxt_open_nic(bp, false, false);
15525 }
15526
15527 return rc;
15528 }
15529
bnxt_change_mtu(struct net_device * dev,int new_mtu)15530 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15531 {
15532 struct bnxt *bp = netdev_priv(dev);
15533
15534 netdev_assert_locked(dev);
15535
15536 if (netif_running(dev))
15537 bnxt_close_nic(bp, true, false);
15538
15539 WRITE_ONCE(dev->mtu, new_mtu);
15540
15541 /* MTU change may change the AGG ring settings if an XDP multi-buffer
15542 * program is attached. We need to set the AGG rings settings and
15543 * rx_skb_func accordingly.
15544 */
15545 if (READ_ONCE(bp->xdp_prog))
15546 bnxt_set_rx_skb_mode(bp, true);
15547
15548 bnxt_set_ring_params(bp);
15549
15550 if (netif_running(dev))
15551 return bnxt_open_nic(bp, true, false);
15552
15553 return 0;
15554 }
15555
bnxt_set_cp_rings(struct bnxt * bp,bool sh)15556 void bnxt_set_cp_rings(struct bnxt *bp, bool sh)
15557 {
15558 int tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15559
15560 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15561 tx_cp + bp->rx_nr_rings;
15562 }
15563
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)15564 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15565 {
15566 struct bnxt *bp = netdev_priv(dev);
15567 bool sh = false;
15568 int rc;
15569
15570 if (tc > bp->max_tc) {
15571 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15572 tc, bp->max_tc);
15573 return -EINVAL;
15574 }
15575
15576 if (bp->num_tc == tc)
15577 return 0;
15578
15579 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15580 sh = true;
15581
15582 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15583 sh, tc, bp->tx_nr_rings_xdp);
15584 if (rc)
15585 return rc;
15586
15587 /* Needs to close the device and do hw resource re-allocations */
15588 if (netif_running(bp->dev))
15589 bnxt_close_nic(bp, true, false);
15590
15591 if (tc) {
15592 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15593 netdev_set_num_tc(dev, tc);
15594 bp->num_tc = tc;
15595 } else {
15596 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15597 netdev_reset_tc(dev);
15598 bp->num_tc = 0;
15599 }
15600 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15601 bnxt_set_cp_rings(bp, sh);
15602
15603 if (netif_running(bp->dev))
15604 return bnxt_open_nic(bp, true, false);
15605
15606 return 0;
15607 }
15608
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)15609 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15610 void *cb_priv)
15611 {
15612 struct bnxt *bp = cb_priv;
15613
15614 if (!bnxt_tc_flower_enabled(bp) ||
15615 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15616 return -EOPNOTSUPP;
15617
15618 switch (type) {
15619 case TC_SETUP_CLSFLOWER:
15620 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15621 default:
15622 return -EOPNOTSUPP;
15623 }
15624 }
15625
15626 LIST_HEAD(bnxt_block_cb_list);
15627
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)15628 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15629 void *type_data)
15630 {
15631 struct bnxt *bp = netdev_priv(dev);
15632
15633 switch (type) {
15634 case TC_SETUP_BLOCK:
15635 return flow_block_cb_setup_simple(type_data,
15636 &bnxt_block_cb_list,
15637 bnxt_setup_tc_block_cb,
15638 bp, bp, true);
15639 case TC_SETUP_QDISC_MQPRIO: {
15640 struct tc_mqprio_qopt *mqprio = type_data;
15641
15642 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15643
15644 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15645 }
15646 default:
15647 return -EOPNOTSUPP;
15648 }
15649 }
15650
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15651 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15652 const struct sk_buff *skb)
15653 {
15654 struct bnxt_vnic_info *vnic;
15655
15656 if (skb)
15657 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15658
15659 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15660 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15661 }
15662
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15663 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15664 u32 idx)
15665 {
15666 struct hlist_head *head;
15667 int bit_id;
15668
15669 spin_lock_bh(&bp->ntp_fltr_lock);
15670 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15671 if (bit_id < 0) {
15672 spin_unlock_bh(&bp->ntp_fltr_lock);
15673 return -ENOMEM;
15674 }
15675
15676 fltr->base.sw_id = (u16)bit_id;
15677 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15678 fltr->base.flags |= BNXT_ACT_RING_DST;
15679 head = &bp->ntp_fltr_hash_tbl[idx];
15680 hlist_add_head_rcu(&fltr->base.hash, head);
15681 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15682 bnxt_insert_usr_fltr(bp, &fltr->base);
15683 bp->ntp_fltr_count++;
15684 spin_unlock_bh(&bp->ntp_fltr_lock);
15685 return 0;
15686 }
15687
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15688 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15689 struct bnxt_ntuple_filter *f2)
15690 {
15691 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15692 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15693 struct flow_keys *keys1 = &f1->fkeys;
15694 struct flow_keys *keys2 = &f2->fkeys;
15695
15696 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15697 keys1->basic.ip_proto != keys2->basic.ip_proto)
15698 return false;
15699
15700 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15701 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15702 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15703 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15704 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15705 return false;
15706 } else {
15707 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15708 &keys2->addrs.v6addrs.src) ||
15709 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15710 &masks2->addrs.v6addrs.src) ||
15711 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15712 &keys2->addrs.v6addrs.dst) ||
15713 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15714 &masks2->addrs.v6addrs.dst))
15715 return false;
15716 }
15717
15718 return keys1->ports.src == keys2->ports.src &&
15719 masks1->ports.src == masks2->ports.src &&
15720 keys1->ports.dst == keys2->ports.dst &&
15721 masks1->ports.dst == masks2->ports.dst &&
15722 keys1->control.flags == keys2->control.flags &&
15723 f1->l2_fltr == f2->l2_fltr;
15724 }
15725
15726 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15727 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15728 struct bnxt_ntuple_filter *fltr, u32 idx)
15729 {
15730 struct bnxt_ntuple_filter *f;
15731 struct hlist_head *head;
15732
15733 head = &bp->ntp_fltr_hash_tbl[idx];
15734 hlist_for_each_entry_rcu(f, head, base.hash) {
15735 if (bnxt_fltr_match(f, fltr))
15736 return f;
15737 }
15738 return NULL;
15739 }
15740
15741 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15742 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15743 u16 rxq_index, u32 flow_id)
15744 {
15745 struct bnxt *bp = netdev_priv(dev);
15746 struct bnxt_ntuple_filter *fltr, *new_fltr;
15747 struct flow_keys *fkeys;
15748 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15749 struct bnxt_l2_filter *l2_fltr;
15750 int rc = 0, idx;
15751 u32 flags;
15752
15753 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15754 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15755 atomic_inc(&l2_fltr->refcnt);
15756 } else {
15757 struct bnxt_l2_key key;
15758
15759 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15760 key.vlan = 0;
15761 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15762 if (!l2_fltr)
15763 return -EINVAL;
15764 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15765 bnxt_del_l2_filter(bp, l2_fltr);
15766 return -EINVAL;
15767 }
15768 }
15769 new_fltr = kzalloc_obj(*new_fltr, GFP_ATOMIC);
15770 if (!new_fltr) {
15771 bnxt_del_l2_filter(bp, l2_fltr);
15772 return -ENOMEM;
15773 }
15774
15775 fkeys = &new_fltr->fkeys;
15776 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15777 rc = -EPROTONOSUPPORT;
15778 goto err_free;
15779 }
15780
15781 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15782 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15783 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15784 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15785 rc = -EPROTONOSUPPORT;
15786 goto err_free;
15787 }
15788 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15789 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15790 if (bp->hwrm_spec_code < 0x10601) {
15791 rc = -EPROTONOSUPPORT;
15792 goto err_free;
15793 }
15794 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15795 }
15796 flags = fkeys->control.flags;
15797 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15798 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15799 rc = -EPROTONOSUPPORT;
15800 goto err_free;
15801 }
15802 new_fltr->l2_fltr = l2_fltr;
15803
15804 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15805 rcu_read_lock();
15806 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15807 if (fltr) {
15808 rc = fltr->base.sw_id;
15809 rcu_read_unlock();
15810 goto err_free;
15811 }
15812 rcu_read_unlock();
15813
15814 new_fltr->flow_id = flow_id;
15815 new_fltr->base.rxq = rxq_index;
15816 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15817 if (!rc) {
15818 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15819 return new_fltr->base.sw_id;
15820 }
15821
15822 err_free:
15823 bnxt_del_l2_filter(bp, l2_fltr);
15824 kfree(new_fltr);
15825 return rc;
15826 }
15827 #endif
15828
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15829 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15830 {
15831 spin_lock_bh(&bp->ntp_fltr_lock);
15832 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15833 spin_unlock_bh(&bp->ntp_fltr_lock);
15834 return;
15835 }
15836 hlist_del_rcu(&fltr->base.hash);
15837 bnxt_del_one_usr_fltr(bp, &fltr->base);
15838 bp->ntp_fltr_count--;
15839 spin_unlock_bh(&bp->ntp_fltr_lock);
15840 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15841 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15842 kfree_rcu(fltr, base.rcu);
15843 }
15844
bnxt_cfg_ntp_filters(struct bnxt * bp)15845 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15846 {
15847 #ifdef CONFIG_RFS_ACCEL
15848 int i;
15849
15850 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15851 struct hlist_head *head;
15852 struct hlist_node *tmp;
15853 struct bnxt_ntuple_filter *fltr;
15854 int rc;
15855
15856 head = &bp->ntp_fltr_hash_tbl[i];
15857 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15858 bool del = false;
15859
15860 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15861 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15862 continue;
15863 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15864 fltr->flow_id,
15865 fltr->base.sw_id)) {
15866 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15867 fltr);
15868 del = true;
15869 }
15870 } else {
15871 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15872 fltr);
15873 if (rc)
15874 del = true;
15875 else
15876 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15877 }
15878
15879 if (del)
15880 bnxt_del_ntp_filter(bp, fltr);
15881 }
15882 }
15883 #endif
15884 }
15885
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15886 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15887 unsigned int entry, struct udp_tunnel_info *ti)
15888 {
15889 struct bnxt *bp = netdev_priv(netdev);
15890 unsigned int cmd;
15891
15892 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15893 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15894 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15895 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15896 else
15897 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15898
15899 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15900 }
15901
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15902 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15903 unsigned int entry, struct udp_tunnel_info *ti)
15904 {
15905 struct bnxt *bp = netdev_priv(netdev);
15906 unsigned int cmd;
15907
15908 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15909 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15910 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15911 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15912 else
15913 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15914
15915 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15916 }
15917
15918 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15919 .set_port = bnxt_udp_tunnel_set_port,
15920 .unset_port = bnxt_udp_tunnel_unset_port,
15921 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15922 .tables = {
15923 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15924 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15925 },
15926 }, bnxt_udp_tunnels_p7 = {
15927 .set_port = bnxt_udp_tunnel_set_port,
15928 .unset_port = bnxt_udp_tunnel_unset_port,
15929 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15930 .tables = {
15931 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15932 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15933 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15934 },
15935 };
15936
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15937 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15938 struct net_device *dev, u32 filter_mask,
15939 int nlflags)
15940 {
15941 struct bnxt *bp = netdev_priv(dev);
15942
15943 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15944 nlflags, filter_mask, NULL);
15945 }
15946
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15947 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15948 u16 flags, struct netlink_ext_ack *extack)
15949 {
15950 struct bnxt *bp = netdev_priv(dev);
15951 struct nlattr *attr, *br_spec;
15952 int rem, rc = 0;
15953
15954 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15955 return -EOPNOTSUPP;
15956
15957 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15958 if (!br_spec)
15959 return -EINVAL;
15960
15961 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15962 u16 mode;
15963
15964 mode = nla_get_u16(attr);
15965 if (mode == bp->br_mode)
15966 break;
15967
15968 rc = bnxt_hwrm_set_br_mode(bp, mode);
15969 if (!rc)
15970 bp->br_mode = mode;
15971 break;
15972 }
15973 return rc;
15974 }
15975
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15976 int bnxt_get_port_parent_id(struct net_device *dev,
15977 struct netdev_phys_item_id *ppid)
15978 {
15979 struct bnxt *bp = netdev_priv(dev);
15980
15981 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15982 return -EOPNOTSUPP;
15983
15984 /* The PF and it's VF-reps only support the switchdev framework */
15985 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15986 return -EOPNOTSUPP;
15987
15988 ppid->id_len = sizeof(bp->dsn);
15989 memcpy(ppid->id, bp->dsn, ppid->id_len);
15990
15991 return 0;
15992 }
15993
15994 static const struct net_device_ops bnxt_netdev_ops = {
15995 .ndo_open = bnxt_open,
15996 .ndo_start_xmit = bnxt_start_xmit,
15997 .ndo_stop = bnxt_close,
15998 .ndo_get_stats64 = bnxt_get_stats64,
15999 .ndo_set_rx_mode_async = bnxt_set_rx_mode,
16000 .ndo_eth_ioctl = bnxt_ioctl,
16001 .ndo_validate_addr = eth_validate_addr,
16002 .ndo_set_mac_address = bnxt_change_mac_addr,
16003 .ndo_change_mtu = bnxt_change_mtu,
16004 .ndo_fix_features = bnxt_fix_features,
16005 .ndo_set_features = bnxt_set_features,
16006 .ndo_features_check = bnxt_features_check,
16007 .ndo_tx_timeout = bnxt_tx_timeout,
16008 #ifdef CONFIG_BNXT_SRIOV
16009 .ndo_get_vf_config = bnxt_get_vf_config,
16010 .ndo_set_vf_mac = bnxt_set_vf_mac,
16011 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
16012 .ndo_set_vf_rate = bnxt_set_vf_bw,
16013 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
16014 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
16015 .ndo_set_vf_trust = bnxt_set_vf_trust,
16016 #endif
16017 .ndo_setup_tc = bnxt_setup_tc,
16018 #ifdef CONFIG_RFS_ACCEL
16019 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
16020 #endif
16021 .ndo_bpf = bnxt_xdp,
16022 .ndo_xdp_xmit = bnxt_xdp_xmit,
16023 .ndo_bridge_getlink = bnxt_bridge_getlink,
16024 .ndo_bridge_setlink = bnxt_bridge_setlink,
16025 .ndo_hwtstamp_get = bnxt_hwtstamp_get,
16026 .ndo_hwtstamp_set = bnxt_hwtstamp_set,
16027 };
16028
16029 static const struct xdp_metadata_ops bnxt_xdp_metadata_ops = {
16030 .xmo_rx_hash = bnxt_xdp_rx_hash,
16031 };
16032
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)16033 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
16034 struct netdev_queue_stats_rx *stats)
16035 {
16036 struct bnxt *bp = netdev_priv(dev);
16037 struct bnxt_cp_ring_info *cpr;
16038 u64 *sw;
16039
16040 if (!bp->bnapi)
16041 return;
16042
16043 cpr = &bp->bnapi[i]->cp_ring;
16044 sw = cpr->stats.sw_stats;
16045
16046 stats->packets = 0;
16047 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
16048 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
16049 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
16050
16051 stats->bytes = 0;
16052 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
16053 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
16054 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
16055
16056 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
16057 stats->hw_gro_packets = cpr->sw_stats->rx.rx_hw_gro_packets;
16058 stats->hw_gro_wire_packets = cpr->sw_stats->rx.rx_hw_gro_wire_packets;
16059 }
16060
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)16061 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
16062 struct netdev_queue_stats_tx *stats)
16063 {
16064 struct bnxt *bp = netdev_priv(dev);
16065 struct bnxt_napi *bnapi;
16066 u64 *sw;
16067
16068 if (!bp->tx_ring)
16069 return;
16070
16071 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
16072 sw = bnapi->cp_ring.stats.sw_stats;
16073
16074 stats->packets = 0;
16075 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
16076 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
16077 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
16078
16079 stats->bytes = 0;
16080 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
16081 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
16082 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
16083 }
16084
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)16085 static void bnxt_get_base_stats(struct net_device *dev,
16086 struct netdev_queue_stats_rx *rx,
16087 struct netdev_queue_stats_tx *tx)
16088 {
16089 struct bnxt *bp = netdev_priv(dev);
16090
16091 rx->packets = bp->net_stats_prev.rx_packets;
16092 rx->bytes = bp->net_stats_prev.rx_bytes;
16093 rx->alloc_fail = bp->ring_drv_stats_prev.rx_total_oom_discards;
16094 rx->hw_gro_packets = bp->ring_drv_stats_prev.rx_total_hw_gro_packets;
16095 rx->hw_gro_wire_packets = bp->ring_drv_stats_prev.rx_total_hw_gro_wire_packets;
16096
16097 tx->packets = bp->net_stats_prev.tx_packets;
16098 tx->bytes = bp->net_stats_prev.tx_bytes;
16099 }
16100
16101 static const struct netdev_stat_ops bnxt_stat_ops = {
16102 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
16103 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
16104 .get_base_stats = bnxt_get_base_stats,
16105 };
16106
bnxt_queue_default_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg)16107 static void bnxt_queue_default_qcfg(struct net_device *dev,
16108 struct netdev_queue_config *qcfg)
16109 {
16110 qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
16111 }
16112
bnxt_validate_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg,struct netlink_ext_ack * extack)16113 static int bnxt_validate_qcfg(struct net_device *dev,
16114 struct netdev_queue_config *qcfg,
16115 struct netlink_ext_ack *extack)
16116 {
16117 struct bnxt *bp = netdev_priv(dev);
16118
16119 /* Older chips need MSS calc so rx_page_size is not supported */
16120 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
16121 qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
16122 return -EINVAL;
16123
16124 if (!is_power_of_2(qcfg->rx_page_size))
16125 return -ERANGE;
16126
16127 if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
16128 qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
16129 return -ERANGE;
16130
16131 return 0;
16132 }
16133
bnxt_queue_mem_alloc(struct net_device * dev,struct netdev_queue_config * qcfg,void * qmem,int idx)16134 static int bnxt_queue_mem_alloc(struct net_device *dev,
16135 struct netdev_queue_config *qcfg,
16136 void *qmem, int idx)
16137 {
16138 struct bnxt_rx_ring_info *rxr, *clone;
16139 struct bnxt *bp = netdev_priv(dev);
16140 struct bnxt_ring_struct *ring;
16141 int rc;
16142
16143 if (!bp->rx_ring)
16144 return -ENETDOWN;
16145
16146 rxr = &bp->rx_ring[idx];
16147 clone = qmem;
16148 memcpy(clone, rxr, sizeof(*rxr));
16149 bnxt_init_rx_ring_struct(bp, clone);
16150 bnxt_reset_rx_ring_struct(bp, clone);
16151
16152 clone->rx_prod = 0;
16153 clone->rx_agg_prod = 0;
16154 clone->rx_sw_agg_prod = 0;
16155 clone->rx_next_cons = 0;
16156 clone->need_head_pool = false;
16157 clone->rx_page_size = qcfg->rx_page_size;
16158
16159 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
16160 if (rc)
16161 return rc;
16162
16163 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
16164 if (rc < 0)
16165 goto err_page_pool_destroy;
16166
16167 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
16168 MEM_TYPE_PAGE_POOL,
16169 clone->page_pool);
16170 if (rc)
16171 goto err_rxq_info_unreg;
16172
16173 ring = &clone->rx_ring_struct;
16174 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16175 if (rc)
16176 goto err_free_rx_ring;
16177
16178 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
16179 ring = &clone->rx_agg_ring_struct;
16180 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16181 if (rc)
16182 goto err_free_rx_agg_ring;
16183
16184 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
16185 if (rc)
16186 goto err_free_rx_agg_ring;
16187 }
16188
16189 if (bp->flags & BNXT_FLAG_TPA) {
16190 rc = bnxt_alloc_one_tpa_info(bp, clone);
16191 if (rc)
16192 goto err_free_tpa_info;
16193 }
16194
16195 bnxt_init_one_rx_ring_rxbd(bp, clone);
16196 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
16197
16198 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
16199 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16200 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
16201 if (bp->flags & BNXT_FLAG_TPA)
16202 bnxt_alloc_one_tpa_info_data(bp, clone);
16203
16204 return 0;
16205
16206 err_free_tpa_info:
16207 bnxt_free_one_tpa_info(bp, clone);
16208 err_free_rx_agg_ring:
16209 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
16210 err_free_rx_ring:
16211 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
16212 err_rxq_info_unreg:
16213 xdp_rxq_info_unreg(&clone->xdp_rxq);
16214 err_page_pool_destroy:
16215 page_pool_destroy(clone->page_pool);
16216 page_pool_destroy(clone->head_pool);
16217 clone->page_pool = NULL;
16218 clone->head_pool = NULL;
16219 return rc;
16220 }
16221
bnxt_queue_mem_free(struct net_device * dev,void * qmem)16222 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
16223 {
16224 struct bnxt_rx_ring_info *rxr = qmem;
16225 struct bnxt *bp = netdev_priv(dev);
16226 struct bnxt_ring_struct *ring;
16227
16228 bnxt_free_one_rx_ring_skbs(bp, rxr);
16229 bnxt_free_one_tpa_info(bp, rxr);
16230
16231 xdp_rxq_info_unreg(&rxr->xdp_rxq);
16232
16233 page_pool_destroy(rxr->page_pool);
16234 page_pool_destroy(rxr->head_pool);
16235 rxr->page_pool = NULL;
16236 rxr->head_pool = NULL;
16237
16238 ring = &rxr->rx_ring_struct;
16239 bnxt_free_ring(bp, &ring->ring_mem);
16240
16241 ring = &rxr->rx_agg_ring_struct;
16242 bnxt_free_ring(bp, &ring->ring_mem);
16243
16244 kfree(rxr->rx_agg_bmap);
16245 rxr->rx_agg_bmap = NULL;
16246 }
16247
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)16248 static void bnxt_copy_rx_ring(struct bnxt *bp,
16249 struct bnxt_rx_ring_info *dst,
16250 struct bnxt_rx_ring_info *src)
16251 {
16252 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
16253 struct bnxt_ring_struct *dst_ring, *src_ring;
16254 int i;
16255
16256 dst_ring = &dst->rx_ring_struct;
16257 dst_rmem = &dst_ring->ring_mem;
16258 src_ring = &src->rx_ring_struct;
16259 src_rmem = &src_ring->ring_mem;
16260
16261 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16262 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16263 WARN_ON(dst_rmem->flags != src_rmem->flags);
16264 WARN_ON(dst_rmem->depth != src_rmem->depth);
16265 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16266 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16267
16268 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16269 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16270 *dst_rmem->vmem = *src_rmem->vmem;
16271 for (i = 0; i < dst_rmem->nr_pages; i++) {
16272 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16273 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16274 }
16275
16276 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16277 return;
16278
16279 dst_ring = &dst->rx_agg_ring_struct;
16280 dst_rmem = &dst_ring->ring_mem;
16281 src_ring = &src->rx_agg_ring_struct;
16282 src_rmem = &src_ring->ring_mem;
16283
16284 dst->rx_page_size = src->rx_page_size;
16285
16286 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16287 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16288 WARN_ON(dst_rmem->flags != src_rmem->flags);
16289 WARN_ON(dst_rmem->depth != src_rmem->depth);
16290 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16291 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16292 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16293
16294 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16295 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16296 *dst_rmem->vmem = *src_rmem->vmem;
16297 for (i = 0; i < dst_rmem->nr_pages; i++) {
16298 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16299 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16300 }
16301
16302 dst->rx_agg_bmap = src->rx_agg_bmap;
16303 }
16304
bnxt_queue_start(struct net_device * dev,struct netdev_queue_config * qcfg,void * qmem,int idx)16305 static int bnxt_queue_start(struct net_device *dev,
16306 struct netdev_queue_config *qcfg,
16307 void *qmem, int idx)
16308 {
16309 struct bnxt *bp = netdev_priv(dev);
16310 struct bnxt_rx_ring_info *rxr, *clone;
16311 struct bnxt_cp_ring_info *cpr;
16312 struct bnxt_vnic_info *vnic;
16313 struct bnxt_napi *bnapi;
16314 int i, rc;
16315 u16 mru;
16316
16317 rxr = &bp->rx_ring[idx];
16318 clone = qmem;
16319
16320 rxr->rx_prod = clone->rx_prod;
16321 rxr->rx_agg_prod = clone->rx_agg_prod;
16322 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16323 rxr->rx_next_cons = clone->rx_next_cons;
16324 rxr->rx_tpa = clone->rx_tpa;
16325 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16326 rxr->page_pool = clone->page_pool;
16327 rxr->head_pool = clone->head_pool;
16328 rxr->xdp_rxq = clone->xdp_rxq;
16329 rxr->need_head_pool = clone->need_head_pool;
16330
16331 bnxt_copy_rx_ring(bp, rxr, clone);
16332
16333 bnapi = rxr->bnapi;
16334 cpr = &bnapi->cp_ring;
16335
16336 /* All rings have been reserved and previously allocated.
16337 * Reallocating with the same parameters should never fail.
16338 */
16339 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16340 if (rc)
16341 goto err_reset;
16342
16343 if (bp->tph_mode) {
16344 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16345 if (rc)
16346 goto err_reset;
16347 }
16348
16349 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16350 if (rc)
16351 goto err_reset;
16352
16353 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16354 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16355 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16356
16357 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16358 rc = bnxt_tx_queue_start(bp, idx);
16359 if (rc)
16360 goto err_reset;
16361 }
16362
16363 bnxt_enable_rx_page_pool(rxr);
16364 napi_enable_locked(&bnapi->napi);
16365 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16366
16367 mru = bp->dev->mtu + VLAN_ETH_HLEN;
16368 for (i = 0; i < bp->nr_vnics; i++) {
16369 vnic = &bp->vnic_info[i];
16370
16371 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16372 if (rc)
16373 return rc;
16374 }
16375 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16376
16377 err_reset:
16378 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16379 rc);
16380 napi_enable_locked(&bnapi->napi);
16381 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16382 bnxt_reset_task(bp, true);
16383 return rc;
16384 }
16385
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)16386 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16387 {
16388 struct bnxt *bp = netdev_priv(dev);
16389 struct bnxt_rx_ring_info *rxr;
16390 struct bnxt_cp_ring_info *cpr;
16391 struct bnxt_vnic_info *vnic;
16392 struct bnxt_napi *bnapi;
16393 int i;
16394
16395 for (i = 0; i < bp->nr_vnics; i++) {
16396 vnic = &bp->vnic_info[i];
16397
16398 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16399 }
16400 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16401 /* Make sure NAPI sees that the VNIC is disabled */
16402 synchronize_net();
16403 rxr = &bp->rx_ring[idx];
16404 bnapi = rxr->bnapi;
16405 cpr = &bnapi->cp_ring;
16406 cancel_work_sync(&cpr->dim.work);
16407 bnxt_hwrm_rx_ring_free(bp, rxr, false);
16408 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16409 page_pool_disable_direct_recycling(rxr->page_pool);
16410 if (bnxt_separate_head_pool(rxr))
16411 page_pool_disable_direct_recycling(rxr->head_pool);
16412
16413 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16414 bnxt_tx_queue_stop(bp, idx);
16415
16416 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16417 * completion is handled in NAPI to guarantee no more DMA on that ring
16418 * after seeing the completion.
16419 */
16420 napi_disable_locked(&bnapi->napi);
16421
16422 if (bp->tph_mode) {
16423 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16424 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16425 }
16426 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16427
16428 memcpy(qmem, rxr, sizeof(*rxr));
16429 bnxt_init_rx_ring_struct(bp, qmem);
16430
16431 return 0;
16432 }
16433
16434 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16435 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
16436 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
16437 .ndo_queue_mem_free = bnxt_queue_mem_free,
16438 .ndo_queue_start = bnxt_queue_start,
16439 .ndo_queue_stop = bnxt_queue_stop,
16440 .ndo_default_qcfg = bnxt_queue_default_qcfg,
16441 .ndo_validate_qcfg = bnxt_validate_qcfg,
16442 .supported_params = QCFG_RX_PAGE_SIZE,
16443 };
16444
16445 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
16446 .ndo_default_qcfg = bnxt_queue_default_qcfg,
16447 };
16448
bnxt_remove_one(struct pci_dev * pdev)16449 static void bnxt_remove_one(struct pci_dev *pdev)
16450 {
16451 struct net_device *dev = pci_get_drvdata(pdev);
16452 struct bnxt *bp = netdev_priv(dev);
16453
16454 if (BNXT_PF(bp))
16455 __bnxt_sriov_disable(bp);
16456
16457 bnxt_aux_devices_del(bp);
16458
16459 unregister_netdev(dev);
16460 bnxt_ptp_clear(bp);
16461
16462 bnxt_aux_devices_uninit(bp);
16463 bnxt_auxdev_id_free(bp, bp->auxdev_id);
16464
16465 bnxt_free_l2_filters(bp, true);
16466 bnxt_free_ntp_fltrs(bp, true);
16467 WARN_ON(bp->num_rss_ctx);
16468 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16469 /* Flush any pending tasks */
16470 cancel_work_sync(&bp->sp_task);
16471 cancel_delayed_work_sync(&bp->fw_reset_task);
16472 bp->sp_event = 0;
16473
16474 bnxt_dl_fw_reporters_destroy(bp);
16475 bnxt_dl_unregister(bp);
16476 bnxt_shutdown_tc(bp);
16477
16478 bnxt_clear_int_mode(bp);
16479 bnxt_hwrm_func_drv_unrgtr(bp);
16480 bnxt_free_hwrm_resources(bp);
16481 bnxt_hwmon_uninit(bp);
16482 bnxt_ethtool_free(bp);
16483 bnxt_dcb_free(bp);
16484 kfree(bp->ptp_cfg);
16485 bp->ptp_cfg = NULL;
16486 kfree(bp->fw_health);
16487 bp->fw_health = NULL;
16488 bnxt_cleanup_pci(bp);
16489 bnxt_free_ctx_mem(bp, true);
16490 bnxt_free_crash_dump_mem(bp);
16491 kfree(bp->rss_indir_tbl);
16492 bp->rss_indir_tbl = NULL;
16493 bnxt_free_port_stats(bp);
16494 free_netdev(dev);
16495 }
16496
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)16497 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16498 {
16499 int rc = 0;
16500 struct bnxt_link_info *link_info = &bp->link_info;
16501
16502 bp->phy_flags = 0;
16503 rc = bnxt_hwrm_phy_qcaps(bp);
16504 if (rc) {
16505 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16506 rc);
16507 return rc;
16508 }
16509 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16510 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16511 else
16512 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16513
16514 bp->mac_flags = 0;
16515 bnxt_hwrm_mac_qcaps(bp);
16516
16517 if (!fw_dflt)
16518 return 0;
16519
16520 mutex_lock(&bp->link_lock);
16521 rc = bnxt_update_link(bp, false);
16522 if (rc) {
16523 mutex_unlock(&bp->link_lock);
16524 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16525 rc);
16526 return rc;
16527 }
16528
16529 /* Older firmware does not have supported_auto_speeds, so assume
16530 * that all supported speeds can be autonegotiated.
16531 */
16532 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16533 link_info->support_auto_speeds = link_info->support_speeds;
16534
16535 bnxt_init_ethtool_link_settings(bp);
16536 mutex_unlock(&bp->link_lock);
16537 return 0;
16538 }
16539
bnxt_get_max_irq(struct pci_dev * pdev)16540 static int bnxt_get_max_irq(struct pci_dev *pdev)
16541 {
16542 u16 ctrl;
16543
16544 if (!pdev->msix_cap)
16545 return 1;
16546
16547 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16548 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16549 }
16550
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)16551 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16552 int *max_cp)
16553 {
16554 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16555 int max_ring_grps = 0, max_irq;
16556
16557 *max_tx = hw_resc->max_tx_rings;
16558 *max_rx = hw_resc->max_rx_rings;
16559 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16560 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16561 bnxt_get_ulp_msix_num_in_use(bp),
16562 hw_resc->max_stat_ctxs -
16563 bnxt_get_ulp_stat_ctxs_in_use(bp));
16564 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16565 *max_cp = min_t(int, *max_cp, max_irq);
16566 max_ring_grps = hw_resc->max_hw_ring_grps;
16567 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16568 *max_cp -= 1;
16569 *max_rx -= 2;
16570 }
16571 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16572 *max_rx >>= 1;
16573 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16574 int rc;
16575
16576 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16577 if (rc) {
16578 *max_rx = 0;
16579 *max_tx = 0;
16580 }
16581 /* On P5 chips, max_cp output param should be available NQs */
16582 *max_cp = max_irq;
16583 }
16584 *max_rx = min_t(int, *max_rx, max_ring_grps);
16585 }
16586
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16587 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16588 {
16589 int rx, tx, cp;
16590
16591 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
16592 *max_rx = rx;
16593 *max_tx = tx;
16594 if (!rx || !tx || !cp)
16595 return -ENOMEM;
16596
16597 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16598 }
16599
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16600 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16601 bool shared)
16602 {
16603 int rc;
16604
16605 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16606 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16607 /* Not enough rings, try disabling agg rings. */
16608 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16609 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16610 if (rc) {
16611 /* set BNXT_FLAG_AGG_RINGS back for consistency */
16612 bp->flags |= BNXT_FLAG_AGG_RINGS;
16613 return rc;
16614 }
16615 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16616 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16617 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16618 bnxt_set_ring_params(bp);
16619 }
16620
16621 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16622 int max_cp, max_stat, max_irq;
16623
16624 /* Reserve minimum resources for RoCE */
16625 max_cp = bnxt_get_max_func_cp_rings(bp);
16626 max_stat = bnxt_get_max_func_stat_ctxs(bp);
16627 max_irq = bnxt_get_max_func_irqs(bp);
16628 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16629 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16630 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16631 return 0;
16632
16633 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16634 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16635 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16636 max_cp = min_t(int, max_cp, max_irq);
16637 max_cp = min_t(int, max_cp, max_stat);
16638 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16639 if (rc)
16640 rc = 0;
16641 }
16642 return rc;
16643 }
16644
16645 /* In initial default shared ring setting, each shared ring must have a
16646 * RX/TX ring pair.
16647 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)16648 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16649 {
16650 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16651 bp->rx_nr_rings = bp->cp_nr_rings;
16652 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16653 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16654 }
16655
bnxt_adj_dflt_rings(struct bnxt * bp,bool sh)16656 static void bnxt_adj_dflt_rings(struct bnxt *bp, bool sh)
16657 {
16658 if (sh)
16659 bnxt_trim_dflt_sh_rings(bp);
16660 else
16661 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16662 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16663 if (sh && READ_ONCE(bp->xdp_prog)) {
16664 bnxt_set_xdp_tx_rings(bp);
16665 bnxt_set_cp_rings(bp, true);
16666 }
16667 }
16668
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)16669 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16670 {
16671 int dflt_rings, max_rx_rings, max_tx_rings, rc;
16672 int avail_msix;
16673
16674 if (!bnxt_can_reserve_rings(bp))
16675 return 0;
16676
16677 if (sh)
16678 bp->flags |= BNXT_FLAG_SHARED_RINGS;
16679 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16680 /* Reduce default rings on multi-port cards so that total default
16681 * rings do not exceed CPU count.
16682 */
16683 if (bp->port_count > 1) {
16684 int max_rings =
16685 max_t(int, num_online_cpus() / bp->port_count, 1);
16686
16687 dflt_rings = min_t(int, dflt_rings, max_rings);
16688 }
16689 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16690 if (rc)
16691 return rc;
16692 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16693 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16694
16695 bnxt_adj_dflt_rings(bp, sh);
16696
16697 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16698 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16699 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16700
16701 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16702 bnxt_set_dflt_ulp_stat_ctxs(bp);
16703 }
16704
16705 rc = __bnxt_reserve_rings(bp);
16706 if (rc && rc != -ENODEV)
16707 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16708
16709 bnxt_adj_tx_rings(bp);
16710 if (sh)
16711 bnxt_adj_dflt_rings(bp, true);
16712
16713 /* Rings may have been trimmed, re-reserve the trimmed rings. */
16714 if (bnxt_need_reserve_rings(bp)) {
16715 rc = __bnxt_reserve_rings(bp);
16716 if (rc && rc != -ENODEV)
16717 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16718 bnxt_adj_tx_rings(bp);
16719 }
16720 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16721 bp->rx_nr_rings++;
16722 bp->cp_nr_rings++;
16723 }
16724 if (rc) {
16725 bp->tx_nr_rings = 0;
16726 bp->rx_nr_rings = 0;
16727 }
16728 return rc;
16729 }
16730
bnxt_init_dflt_ring_mode(struct bnxt * bp)16731 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16732 {
16733 int rc;
16734
16735 if (bp->tx_nr_rings)
16736 return 0;
16737
16738 bnxt_ulp_irq_stop(bp);
16739 bnxt_clear_int_mode(bp);
16740 rc = bnxt_set_dflt_rings(bp, true);
16741 if (rc) {
16742 if (BNXT_VF(bp) && rc == -ENODEV)
16743 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16744 else
16745 netdev_err(bp->dev, "Not enough rings available.\n");
16746 goto init_dflt_ring_err;
16747 }
16748 rc = bnxt_init_int_mode(bp);
16749 if (rc)
16750 goto init_dflt_ring_err;
16751
16752 bnxt_adj_tx_rings(bp);
16753
16754 bnxt_set_dflt_rfs(bp);
16755
16756 init_dflt_ring_err:
16757 bnxt_ulp_irq_restart(bp, rc);
16758 return rc;
16759 }
16760
bnxt_restore_pf_fw_resources(struct bnxt * bp)16761 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16762 {
16763 int rc;
16764
16765 netdev_ops_assert_locked(bp->dev);
16766 bnxt_hwrm_func_qcaps(bp);
16767
16768 if (netif_running(bp->dev))
16769 __bnxt_close_nic(bp, true, false);
16770
16771 bnxt_ulp_irq_stop(bp);
16772 bnxt_clear_int_mode(bp);
16773 rc = bnxt_init_int_mode(bp);
16774 bnxt_ulp_irq_restart(bp, rc);
16775
16776 if (netif_running(bp->dev)) {
16777 if (rc)
16778 netif_close(bp->dev);
16779 else
16780 rc = bnxt_open_nic(bp, true, false);
16781 }
16782
16783 return rc;
16784 }
16785
bnxt_init_mac_addr(struct bnxt * bp)16786 static int bnxt_init_mac_addr(struct bnxt *bp)
16787 {
16788 int rc = 0;
16789
16790 if (BNXT_PF(bp)) {
16791 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16792 } else {
16793 #ifdef CONFIG_BNXT_SRIOV
16794 struct bnxt_vf_info *vf = &bp->vf;
16795 bool strict_approval = true;
16796
16797 if (is_valid_ether_addr(vf->mac_addr)) {
16798 /* overwrite netdev dev_addr with admin VF MAC */
16799 eth_hw_addr_set(bp->dev, vf->mac_addr);
16800 /* Older PF driver or firmware may not approve this
16801 * correctly.
16802 */
16803 strict_approval = false;
16804 } else {
16805 eth_hw_addr_random(bp->dev);
16806 }
16807 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16808 #endif
16809 }
16810 return rc;
16811 }
16812
bnxt_vpd_read_info(struct bnxt * bp)16813 static void bnxt_vpd_read_info(struct bnxt *bp)
16814 {
16815 struct pci_dev *pdev = bp->pdev;
16816 unsigned int vpd_size, kw_len;
16817 int pos, size;
16818 u8 *vpd_data;
16819
16820 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16821 if (IS_ERR(vpd_data)) {
16822 pci_warn(pdev, "Unable to read VPD\n");
16823 return;
16824 }
16825
16826 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16827 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16828 if (pos < 0)
16829 goto read_sn;
16830
16831 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16832 memcpy(bp->board_partno, &vpd_data[pos], size);
16833
16834 read_sn:
16835 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16836 PCI_VPD_RO_KEYWORD_SERIALNO,
16837 &kw_len);
16838 if (pos < 0)
16839 goto exit;
16840
16841 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16842 memcpy(bp->board_serialno, &vpd_data[pos], size);
16843 exit:
16844 kfree(vpd_data);
16845 }
16846
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16847 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16848 {
16849 struct pci_dev *pdev = bp->pdev;
16850 u64 qword;
16851
16852 qword = pci_get_dsn(pdev);
16853 if (!qword) {
16854 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16855 return -EOPNOTSUPP;
16856 }
16857
16858 put_unaligned_le64(qword, dsn);
16859
16860 bp->flags |= BNXT_FLAG_DSN_VALID;
16861 return 0;
16862 }
16863
bnxt_map_db_bar(struct bnxt * bp)16864 static int bnxt_map_db_bar(struct bnxt *bp)
16865 {
16866 if (!bp->db_size)
16867 return -ENODEV;
16868 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16869 if (!bp->bar1)
16870 return -ENOMEM;
16871 return 0;
16872 }
16873
bnxt_print_device_info(struct bnxt * bp)16874 void bnxt_print_device_info(struct bnxt *bp)
16875 {
16876 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16877 board_info[bp->board_idx].name,
16878 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16879
16880 pcie_print_link_status(bp->pdev);
16881 }
16882
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16883 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16884 {
16885 struct bnxt_hw_resc *hw_resc;
16886 struct net_device *dev;
16887 struct bnxt *bp;
16888 int rc, max_irqs;
16889
16890 if (pci_is_bridge(pdev))
16891 return -ENODEV;
16892
16893 if (!pdev->msix_cap) {
16894 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16895 return -ENODEV;
16896 }
16897
16898 /* Clear any pending DMA transactions from crash kernel
16899 * while loading driver in capture kernel.
16900 */
16901 if (is_kdump_kernel()) {
16902 pci_clear_master(pdev);
16903 pcie_flr(pdev);
16904 }
16905
16906 max_irqs = bnxt_get_max_irq(pdev);
16907 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16908 max_irqs);
16909 if (!dev)
16910 return -ENOMEM;
16911
16912 bp = netdev_priv(dev);
16913 bp->board_idx = ent->driver_data;
16914 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16915 bnxt_set_max_func_irqs(bp, max_irqs);
16916
16917 if (bnxt_vf_pciid(bp->board_idx))
16918 bp->flags |= BNXT_FLAG_VF;
16919
16920 /* No devlink port registration in case of a VF */
16921 if (BNXT_PF(bp))
16922 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16923
16924 rc = bnxt_init_board(pdev, dev);
16925 if (rc < 0)
16926 goto init_err_free;
16927
16928 dev->netdev_ops = &bnxt_netdev_ops;
16929 dev->xdp_metadata_ops = &bnxt_xdp_metadata_ops;
16930 dev->stat_ops = &bnxt_stat_ops;
16931 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16932 dev->ethtool_ops = &bnxt_ethtool_ops;
16933 pci_set_drvdata(pdev, dev);
16934
16935 rc = bnxt_alloc_hwrm_resources(bp);
16936 if (rc)
16937 goto init_err_pci_clean;
16938
16939 mutex_init(&bp->hwrm_cmd_lock);
16940 mutex_init(&bp->link_lock);
16941
16942 rc = bnxt_fw_init_one_p1(bp);
16943 if (rc)
16944 goto init_err_pci_clean;
16945
16946 if (BNXT_PF(bp))
16947 bnxt_vpd_read_info(bp);
16948
16949 if (BNXT_CHIP_P5_PLUS(bp)) {
16950 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16951 if (BNXT_CHIP_P7(bp))
16952 bp->flags |= BNXT_FLAG_CHIP_P7;
16953 }
16954
16955 rc = bnxt_alloc_rss_indir_tbl(bp);
16956 if (rc)
16957 goto init_err_pci_clean;
16958
16959 rc = bnxt_fw_init_one_p2(bp);
16960 if (rc)
16961 goto init_err_pci_clean;
16962
16963 rc = bnxt_map_db_bar(bp);
16964 if (rc) {
16965 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16966 rc);
16967 goto init_err_pci_clean;
16968 }
16969
16970 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16971 NETIF_F_TSO | NETIF_F_TSO6 |
16972 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16973 NETIF_F_GSO_IPXIP4 |
16974 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16975 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16976 NETIF_F_RXCSUM | NETIF_F_GRO;
16977 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16978
16979 if (BNXT_SUPPORTS_TPA(bp))
16980 dev->hw_features |= NETIF_F_LRO;
16981
16982 dev->hw_enc_features =
16983 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16984 NETIF_F_TSO | NETIF_F_TSO6 |
16985 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16986 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16987 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16988 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16989 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16990 if (bp->flags & BNXT_FLAG_CHIP_P7)
16991 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16992 else
16993 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16994
16995 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16996 NETIF_F_GSO_GRE_CSUM;
16997 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16998 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16999 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
17000 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
17001 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
17002 if (BNXT_SUPPORTS_TPA(bp))
17003 dev->hw_features |= NETIF_F_GRO_HW;
17004 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
17005 if (dev->features & NETIF_F_GRO_HW)
17006 dev->features &= ~NETIF_F_LRO;
17007 dev->priv_flags |= IFF_UNICAST_FLT;
17008
17009 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
17010 if (!(bp->flags & BNXT_FLAG_UDP_GSO_CAP)) {
17011 u16 max_segs = BNXT_SW_USO_MAX_SEGS;
17012
17013 if (bp->tso_max_segs)
17014 max_segs = min_t(u16, max_segs, bp->tso_max_segs);
17015 netif_set_tso_max_segs(dev, max_segs);
17016 } else if (bp->tso_max_segs) {
17017 netif_set_tso_max_segs(dev, bp->tso_max_segs);
17018 }
17019
17020 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
17021 NETDEV_XDP_ACT_RX_SG;
17022
17023 #ifdef CONFIG_BNXT_SRIOV
17024 init_waitqueue_head(&bp->sriov_cfg_wait);
17025 #endif
17026 if (BNXT_SUPPORTS_TPA(bp)) {
17027 bp->gro_func = bnxt_gro_func_5730x;
17028 if (BNXT_CHIP_P4(bp))
17029 bp->gro_func = bnxt_gro_func_5731x;
17030 else if (BNXT_CHIP_P5_PLUS(bp))
17031 bp->gro_func = bnxt_gro_func_5750x;
17032 }
17033 if (!BNXT_CHIP_P4_PLUS(bp))
17034 bp->flags |= BNXT_FLAG_DOUBLE_DB;
17035
17036 rc = bnxt_init_mac_addr(bp);
17037 if (rc) {
17038 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
17039 rc = -EADDRNOTAVAIL;
17040 goto init_err_pci_clean;
17041 }
17042
17043 if (BNXT_PF(bp)) {
17044 /* Read the adapter's DSN to use as the eswitch switch_id */
17045 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
17046 }
17047
17048 /* MTU range: 60 - FW defined max */
17049 dev->min_mtu = ETH_ZLEN;
17050 dev->max_mtu = bp->max_mtu;
17051
17052 rc = bnxt_probe_phy(bp, true);
17053 if (rc)
17054 goto init_err_pci_clean;
17055
17056 hw_resc = &bp->hw_resc;
17057 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
17058 BNXT_L2_FLTR_MAX_FLTR;
17059 /* Older firmware may not report these filters properly */
17060 if (bp->max_fltr < BNXT_MAX_FLTR)
17061 bp->max_fltr = BNXT_MAX_FLTR;
17062 bnxt_init_l2_fltr_tbl(bp);
17063 __bnxt_set_rx_skb_mode(bp, false);
17064 bnxt_set_tpa_flags(bp);
17065 bnxt_init_ring_params(bp);
17066 bnxt_set_ring_params(bp);
17067 mutex_init(&bp->auxdev_lock);
17068 if (!bnxt_auxdev_id_alloc(bp))
17069 bnxt_aux_devices_init(bp);
17070 rc = bnxt_set_dflt_rings(bp, true);
17071 if (rc) {
17072 if (BNXT_VF(bp) && rc == -ENODEV) {
17073 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
17074 } else {
17075 netdev_err(bp->dev, "Not enough rings available.\n");
17076 rc = -ENOMEM;
17077 }
17078 goto init_err_pci_clean;
17079 }
17080
17081 bnxt_fw_init_one_p3(bp);
17082
17083 bnxt_init_dflt_coal(bp);
17084
17085 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
17086 bp->flags |= BNXT_FLAG_STRIP_VLAN;
17087
17088 rc = bnxt_init_int_mode(bp);
17089 if (rc)
17090 goto init_err_pci_clean;
17091
17092 /* No TC has been set yet and rings may have been trimmed due to
17093 * limited MSIX, so we re-initialize the TX rings per TC.
17094 */
17095 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
17096
17097 if (BNXT_PF(bp)) {
17098 if (!bnxt_pf_wq) {
17099 bnxt_pf_wq =
17100 create_singlethread_workqueue("bnxt_pf_wq");
17101 if (!bnxt_pf_wq) {
17102 dev_err(&pdev->dev, "Unable to create workqueue.\n");
17103 rc = -ENOMEM;
17104 goto init_err_pci_clean;
17105 }
17106 }
17107 rc = bnxt_init_tc(bp);
17108 if (rc)
17109 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
17110 rc);
17111 }
17112
17113 bnxt_inv_fw_health_reg(bp);
17114 rc = bnxt_dl_register(bp);
17115 if (rc)
17116 goto init_err_dl;
17117
17118 INIT_LIST_HEAD(&bp->usr_fltr_list);
17119
17120 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
17121 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
17122
17123 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
17124 if (BNXT_SUPPORTS_QUEUE_API(bp))
17125 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
17126 dev->netmem_tx = true;
17127
17128 rc = register_netdev(dev);
17129 if (rc)
17130 goto init_err_cleanup;
17131
17132 bnxt_dl_fw_reporters_create(bp);
17133
17134 bnxt_aux_devices_add(bp);
17135
17136 bnxt_print_device_info(bp);
17137
17138 pci_save_state(pdev);
17139
17140 return 0;
17141 init_err_cleanup:
17142 bnxt_aux_devices_uninit(bp);
17143 bnxt_auxdev_id_free(bp, bp->auxdev_id);
17144 bnxt_dl_unregister(bp);
17145 init_err_dl:
17146 bnxt_shutdown_tc(bp);
17147 bnxt_clear_int_mode(bp);
17148
17149 init_err_pci_clean:
17150 bnxt_hwrm_func_drv_unrgtr(bp);
17151 bnxt_ptp_clear(bp);
17152 kfree(bp->ptp_cfg);
17153 bp->ptp_cfg = NULL;
17154 bnxt_free_hwrm_resources(bp);
17155 bnxt_hwmon_uninit(bp);
17156 bnxt_ethtool_free(bp);
17157 kfree(bp->fw_health);
17158 bp->fw_health = NULL;
17159 bnxt_cleanup_pci(bp);
17160 bnxt_free_ctx_mem(bp, true);
17161 bnxt_free_crash_dump_mem(bp);
17162 kfree(bp->rss_indir_tbl);
17163 bp->rss_indir_tbl = NULL;
17164
17165 init_err_free:
17166 free_netdev(dev);
17167 return rc;
17168 }
17169
bnxt_shutdown(struct pci_dev * pdev)17170 static void bnxt_shutdown(struct pci_dev *pdev)
17171 {
17172 struct net_device *dev = pci_get_drvdata(pdev);
17173 struct bnxt *bp;
17174
17175 if (!dev)
17176 return;
17177
17178 rtnl_lock();
17179 netdev_lock(dev);
17180 bp = netdev_priv(dev);
17181 if (!bp)
17182 goto shutdown_exit;
17183
17184 if (netif_running(dev))
17185 netif_close(dev);
17186
17187 if (bnxt_hwrm_func_drv_unrgtr(bp)) {
17188 pcie_flr(pdev);
17189 goto shutdown_exit;
17190 }
17191 bnxt_ptp_clear(bp);
17192 bnxt_clear_int_mode(bp);
17193 pci_disable_device(pdev);
17194
17195 if (system_state == SYSTEM_POWER_OFF) {
17196 pci_wake_from_d3(pdev, bp->wol);
17197 pci_set_power_state(pdev, PCI_D3hot);
17198 }
17199
17200 shutdown_exit:
17201 netdev_unlock(dev);
17202 rtnl_unlock();
17203 }
17204
17205 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)17206 static int bnxt_suspend(struct device *device)
17207 {
17208 struct net_device *dev = dev_get_drvdata(device);
17209 struct bnxt *bp = netdev_priv(dev);
17210 int rc = 0;
17211
17212 bnxt_ulp_stop(bp);
17213
17214 netdev_lock(dev);
17215 if (netif_running(dev)) {
17216 netif_device_detach(dev);
17217 rc = bnxt_close(dev);
17218 }
17219 bnxt_hwrm_func_drv_unrgtr(bp);
17220 bnxt_ptp_clear(bp);
17221 pci_disable_device(bp->pdev);
17222 bnxt_free_ctx_mem(bp, false);
17223 netdev_unlock(dev);
17224 return rc;
17225 }
17226
bnxt_resume(struct device * device)17227 static int bnxt_resume(struct device *device)
17228 {
17229 struct net_device *dev = dev_get_drvdata(device);
17230 struct bnxt *bp = netdev_priv(dev);
17231 int rc = 0;
17232
17233 netdev_lock(dev);
17234 rc = pci_enable_device(bp->pdev);
17235 if (rc) {
17236 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
17237 rc);
17238 goto resume_exit;
17239 }
17240 pci_set_master(bp->pdev);
17241 if (bnxt_hwrm_ver_get(bp)) {
17242 rc = -ENODEV;
17243 goto resume_exit;
17244 }
17245 rc = bnxt_hwrm_func_reset(bp);
17246 if (rc) {
17247 rc = -EBUSY;
17248 goto resume_exit;
17249 }
17250
17251 rc = bnxt_hwrm_func_qcaps(bp);
17252 if (rc)
17253 goto resume_exit;
17254
17255 bnxt_clear_reservations(bp, true);
17256
17257 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
17258 rc = -ENODEV;
17259 goto resume_exit;
17260 }
17261 if (bp->fw_crash_mem)
17262 bnxt_hwrm_crash_dump_mem_cfg(bp);
17263
17264 if (bnxt_ptp_init(bp)) {
17265 kfree(bp->ptp_cfg);
17266 bp->ptp_cfg = NULL;
17267 }
17268 bnxt_get_wol_settings(bp);
17269 if (netif_running(dev)) {
17270 rc = bnxt_open(dev);
17271 if (!rc)
17272 netif_device_attach(dev);
17273 }
17274
17275 resume_exit:
17276 netdev_unlock(bp->dev);
17277 if (!rc) {
17278 bnxt_ulp_start(bp);
17279 bnxt_reenable_sriov(bp);
17280 }
17281 return rc;
17282 }
17283
17284 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
17285 #define BNXT_PM_OPS (&bnxt_pm_ops)
17286
17287 #else
17288
17289 #define BNXT_PM_OPS NULL
17290
17291 #endif /* CONFIG_PM_SLEEP */
17292
17293 /**
17294 * bnxt_io_error_detected - called when PCI error is detected
17295 * @pdev: Pointer to PCI device
17296 * @state: The current pci connection state
17297 *
17298 * This function is called after a PCI bus error affecting
17299 * this device has been detected.
17300 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)17301 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17302 pci_channel_state_t state)
17303 {
17304 struct net_device *netdev = pci_get_drvdata(pdev);
17305 struct bnxt *bp = netdev_priv(netdev);
17306 bool abort = false;
17307
17308 netdev_info(netdev, "PCI I/O error detected\n");
17309
17310 bnxt_ulp_stop(bp);
17311
17312 netdev_lock(netdev);
17313 netif_device_detach(netdev);
17314
17315 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17316 netdev_err(bp->dev, "Firmware reset already in progress\n");
17317 abort = true;
17318 }
17319
17320 if (abort || state == pci_channel_io_perm_failure) {
17321 netdev_unlock(netdev);
17322 return PCI_ERS_RESULT_DISCONNECT;
17323 }
17324
17325 /* Link is not reliable anymore if state is pci_channel_io_frozen
17326 * so we disable bus master to prevent any potential bad DMAs before
17327 * freeing kernel memory.
17328 */
17329 if (state == pci_channel_io_frozen) {
17330 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17331 bnxt_fw_fatal_close(bp);
17332 }
17333
17334 if (netif_running(netdev))
17335 __bnxt_close_nic(bp, true, true);
17336
17337 if (pci_is_enabled(pdev))
17338 pci_disable_device(pdev);
17339 bnxt_free_ctx_mem(bp, false);
17340 netdev_unlock(netdev);
17341
17342 /* Request a slot reset. */
17343 return PCI_ERS_RESULT_NEED_RESET;
17344 }
17345
17346 /**
17347 * bnxt_io_slot_reset - called after the pci bus has been reset.
17348 * @pdev: Pointer to PCI device
17349 *
17350 * Restart the card from scratch, as if from a cold-boot.
17351 * At this point, the card has experienced a hard reset,
17352 * followed by fixups by BIOS, and has its config space
17353 * set up identically to what it was at cold boot.
17354 */
bnxt_io_slot_reset(struct pci_dev * pdev)17355 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17356 {
17357 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17358 struct net_device *netdev = pci_get_drvdata(pdev);
17359 struct bnxt *bp = netdev_priv(netdev);
17360 int retry = 0;
17361 int err = 0;
17362 int off;
17363
17364 netdev_info(bp->dev, "PCI Slot Reset\n");
17365
17366 if (test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) {
17367 /* After DPC, the chip should return CRS when the vendor ID
17368 * config register is read until it is ready. On all chips,
17369 * this is not happening reliably so add a 5-second delay as a
17370 * workaround.
17371 */
17372 msleep(5000);
17373 }
17374
17375 netdev_lock(netdev);
17376
17377 if (pci_enable_device(pdev)) {
17378 dev_err(&pdev->dev,
17379 "Cannot re-enable PCI device after reset.\n");
17380 } else {
17381 pci_set_master(pdev);
17382 /* Upon fatal error, our device internal logic that latches to
17383 * BAR value is getting reset and will restore only upon
17384 * rewriting the BARs.
17385 *
17386 * As pci_restore_state() does not re-write the BARs if the
17387 * value is same as saved value earlier, driver needs to
17388 * write the BARs to 0 to force restore, in case of fatal error.
17389 */
17390 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17391 &bp->state)) {
17392 for (off = PCI_BASE_ADDRESS_0;
17393 off <= PCI_BASE_ADDRESS_5; off += 4)
17394 pci_write_config_dword(bp->pdev, off, 0);
17395 }
17396 pci_restore_state(pdev);
17397 pci_save_state(pdev);
17398
17399 bnxt_inv_fw_health_reg(bp);
17400 bnxt_try_map_fw_health_reg(bp);
17401
17402 /* In some PCIe AER scenarios, firmware may take up to
17403 * 10 seconds to become ready in the worst case.
17404 */
17405 do {
17406 err = bnxt_try_recover_fw(bp);
17407 if (!err)
17408 break;
17409 retry++;
17410 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
17411
17412 if (err) {
17413 dev_err(&pdev->dev, "Firmware not ready\n");
17414 goto reset_exit;
17415 }
17416
17417 err = bnxt_hwrm_func_reset(bp);
17418 if (!err)
17419 result = PCI_ERS_RESULT_RECOVERED;
17420
17421 /* IRQ will be initialized later in bnxt_io_resume */
17422 bnxt_ulp_irq_stop(bp);
17423 bnxt_clear_int_mode(bp);
17424 }
17425
17426 reset_exit:
17427 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17428 bnxt_clear_reservations(bp, true);
17429 netdev_unlock(netdev);
17430
17431 return result;
17432 }
17433
17434 /**
17435 * bnxt_io_resume - called when traffic can start flowing again.
17436 * @pdev: Pointer to PCI device
17437 *
17438 * This callback is called when the error recovery driver tells
17439 * us that its OK to resume normal operation.
17440 */
bnxt_io_resume(struct pci_dev * pdev)17441 static void bnxt_io_resume(struct pci_dev *pdev)
17442 {
17443 struct net_device *netdev = pci_get_drvdata(pdev);
17444 struct bnxt *bp = netdev_priv(netdev);
17445 int err;
17446
17447 netdev_info(bp->dev, "PCI Slot Resume\n");
17448 netdev_lock(netdev);
17449
17450 err = bnxt_hwrm_func_qcaps(bp);
17451 if (!err) {
17452 if (netif_running(netdev)) {
17453 err = bnxt_open(netdev);
17454 } else {
17455 err = bnxt_reserve_rings(bp, true);
17456 if (!err)
17457 err = bnxt_init_int_mode(bp);
17458 }
17459 }
17460
17461 if (!err)
17462 netif_device_attach(netdev);
17463
17464 netdev_unlock(netdev);
17465 if (!err) {
17466 bnxt_ulp_start(bp);
17467 bnxt_reenable_sriov(bp);
17468 }
17469 }
17470
17471 static const struct pci_error_handlers bnxt_err_handler = {
17472 .error_detected = bnxt_io_error_detected,
17473 .slot_reset = bnxt_io_slot_reset,
17474 .resume = bnxt_io_resume
17475 };
17476
17477 static struct pci_driver bnxt_pci_driver = {
17478 .name = DRV_MODULE_NAME,
17479 .id_table = bnxt_pci_tbl,
17480 .probe = bnxt_init_one,
17481 .remove = bnxt_remove_one,
17482 .shutdown = bnxt_shutdown,
17483 .driver.pm = BNXT_PM_OPS,
17484 .err_handler = &bnxt_err_handler,
17485 #if defined(CONFIG_BNXT_SRIOV)
17486 .sriov_configure = bnxt_sriov_configure,
17487 #endif
17488 };
17489
bnxt_init(void)17490 static int __init bnxt_init(void)
17491 {
17492 int err;
17493
17494 bnxt_debug_init();
17495 err = pci_register_driver(&bnxt_pci_driver);
17496 if (err) {
17497 bnxt_debug_exit();
17498 return err;
17499 }
17500
17501 return 0;
17502 }
17503
bnxt_exit(void)17504 static void __exit bnxt_exit(void)
17505 {
17506 pci_unregister_driver(&bnxt_pci_driver);
17507 if (bnxt_pf_wq)
17508 destroy_workqueue(bnxt_pf_wq);
17509 bnxt_debug_exit();
17510 }
17511
17512 module_init(bnxt_init);
17513 module_exit(bnxt_exit);
17514