1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62 #include <linux/bnxt/ulp.h>
63
64 #include "bnxt.h"
65 #include "bnxt_hwrm.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77 #include "bnxt_gso.h"
78 #include <net/tso.h>
79
80 #define BNXT_TX_TIMEOUT (5 * HZ)
81 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
82 NETIF_MSG_TX_ERR)
83
84 MODULE_IMPORT_NS("NETDEV_INTERNAL");
85 MODULE_LICENSE("GPL");
86 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
87
88 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
89 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
90
91 #define BNXT_TX_PUSH_THRESH 164
92
93 /* indexed by enum board_idx */
94 static const struct {
95 char *name;
96 } board_info[] = {
97 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
98 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
99 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
100 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
101 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
102 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
103 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
104 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
105 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
106 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
108 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
109 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
110 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
111 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
112 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
113 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
114 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
115 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
116 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
117 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
118 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
119 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
120 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
121 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
122 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
123 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
124 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
125 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
126 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
127 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
128 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
129 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
130 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
131 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
132 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
133 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
134 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
135 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
136 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
137 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
138 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
139 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
140 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
141 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
142 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
143 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
144 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
145 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
146 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
147 [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
148 };
149
150 static const struct pci_device_id bnxt_pci_tbl[] = {
151 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
152 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
153 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
154 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
155 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
156 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
157 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
158 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
159 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
160 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
161 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
162 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
163 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
164 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
165 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
166 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
167 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
168 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
169 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
170 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
171 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
173 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
174 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
175 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
178 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
183 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
185 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
186 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
187 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
188 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
189 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
190 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
191 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
192 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
193 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
197 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
198 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
199 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
200 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
201 #ifdef CONFIG_BNXT_SRIOV
202 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
203 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
207 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
209 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
210 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
211 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
212 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
215 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
216 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
217 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
218 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
219 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
220 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
221 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
222 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
223 { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
224 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
225 #endif
226 { 0 }
227 };
228
229 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
230
231 static const u16 bnxt_vf_req_snif[] = {
232 HWRM_FUNC_CFG,
233 HWRM_FUNC_VF_CFG,
234 HWRM_PORT_PHY_QCFG,
235 HWRM_CFA_L2_FILTER_ALLOC,
236 };
237
238 static const u16 bnxt_async_events_arr[] = {
239 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
240 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
241 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
242 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
243 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
244 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
245 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
246 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
247 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
248 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
249 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
250 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
251 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
252 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
253 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
254 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
255 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
256 };
257
258 const u16 bnxt_bstore_to_trace[] = {
259 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
260 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
261 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
262 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
263 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
264 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
265 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
266 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
267 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
268 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
269 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
270 [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
271 [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
272 };
273
274 static struct workqueue_struct *bnxt_pf_wq;
275
276 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
277 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
278 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
279
280 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
281 .ports = {
282 .src = 0,
283 .dst = 0,
284 },
285 .addrs = {
286 .v6addrs = {
287 .src = BNXT_IPV6_MASK_NONE,
288 .dst = BNXT_IPV6_MASK_NONE,
289 },
290 },
291 };
292
293 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
294 .ports = {
295 .src = cpu_to_be16(0xffff),
296 .dst = cpu_to_be16(0xffff),
297 },
298 .addrs = {
299 .v6addrs = {
300 .src = BNXT_IPV6_MASK_ALL,
301 .dst = BNXT_IPV6_MASK_ALL,
302 },
303 },
304 };
305
306 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
307 .ports = {
308 .src = cpu_to_be16(0xffff),
309 .dst = cpu_to_be16(0xffff),
310 },
311 .addrs = {
312 .v4addrs = {
313 .src = cpu_to_be32(0xffffffff),
314 .dst = cpu_to_be32(0xffffffff),
315 },
316 },
317 };
318
bnxt_vf_pciid(enum board_idx idx)319 static bool bnxt_vf_pciid(enum board_idx idx)
320 {
321 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
322 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
323 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
324 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
325 idx == NETXTREME_E_P7_VF_HV);
326 }
327
328 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
329 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
330
331 #define BNXT_DB_CQ(db, idx) \
332 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
333
334 #define BNXT_DB_NQ_P5(db, idx) \
335 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
336 (db)->doorbell)
337
338 #define BNXT_DB_NQ_P7(db, idx) \
339 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
340 DB_RING_IDX(db, idx), (db)->doorbell)
341
342 #define BNXT_DB_CQ_ARM(db, idx) \
343 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
344
345 #define BNXT_DB_NQ_ARM_P5(db, idx) \
346 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
347 DB_RING_IDX(db, idx), (db)->doorbell)
348
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)349 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
350 {
351 if (bp->flags & BNXT_FLAG_CHIP_P7)
352 BNXT_DB_NQ_P7(db, idx);
353 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
354 BNXT_DB_NQ_P5(db, idx);
355 else
356 BNXT_DB_CQ(db, idx);
357 }
358
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)359 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
360 {
361 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
362 BNXT_DB_NQ_ARM_P5(db, idx);
363 else
364 BNXT_DB_CQ_ARM(db, idx);
365 }
366
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)367 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
368 {
369 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
370 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
371 DB_RING_IDX(db, idx), db->doorbell);
372 else
373 BNXT_DB_CQ(db, idx);
374 }
375
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)376 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
377 {
378 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
379 return;
380
381 if (BNXT_PF(bp))
382 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
383 else
384 schedule_delayed_work(&bp->fw_reset_task, delay);
385 }
386
__bnxt_queue_sp_work(struct bnxt * bp)387 static void __bnxt_queue_sp_work(struct bnxt *bp)
388 {
389 if (BNXT_PF(bp))
390 queue_work(bnxt_pf_wq, &bp->sp_task);
391 else
392 schedule_work(&bp->sp_task);
393 }
394
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)395 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
396 {
397 set_bit(event, &bp->sp_event);
398 __bnxt_queue_sp_work(bp);
399 }
400
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)401 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
402 {
403 if (!rxr->bnapi->in_reset) {
404 rxr->bnapi->in_reset = true;
405 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
406 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
407 else
408 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
409 __bnxt_queue_sp_work(bp);
410 }
411 rxr->rx_next_cons = 0xffff;
412 }
413
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)414 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
415 u16 curr)
416 {
417 struct bnxt_napi *bnapi = txr->bnapi;
418
419 if (bnapi->tx_fault)
420 return;
421
422 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
423 txr->txq_index, txr->tx_hw_cons,
424 txr->tx_cons, txr->tx_prod, curr);
425 WARN_ON_ONCE(1);
426 bnapi->tx_fault = 1;
427 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
428 }
429
430 const u16 bnxt_lhint_arr[] = {
431 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
432 TX_BD_FLAGS_LHINT_512_TO_1023,
433 TX_BD_FLAGS_LHINT_1024_TO_2047,
434 TX_BD_FLAGS_LHINT_1024_TO_2047,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
449 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
450 };
451
bnxt_xmit_get_cfa_action(struct sk_buff * skb)452 u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
453 {
454 struct metadata_dst *md_dst = skb_metadata_dst(skb);
455
456 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
457 return 0;
458
459 return md_dst->u.port_info.port_id;
460 }
461
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)462 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
463 u16 prod)
464 {
465 /* Sync BD data before updating doorbell */
466 wmb();
467 bnxt_db_write(bp, &txr->tx_db, prod);
468 txr->kick_pending = 0;
469 }
470
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)471 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
472 {
473 struct bnxt *bp = netdev_priv(dev);
474 struct tx_bd *txbd, *txbd0;
475 struct tx_bd_ext *txbd1;
476 struct netdev_queue *txq;
477 int i;
478 dma_addr_t mapping;
479 unsigned int length, pad = 0;
480 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
481 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
482 struct pci_dev *pdev = bp->pdev;
483 u16 prod, last_frag, txts_prod;
484 struct bnxt_tx_ring_info *txr;
485 struct bnxt_sw_tx_bd *tx_buf;
486 __le32 lflags = 0;
487 skb_frag_t *frag;
488
489 i = skb_get_queue_mapping(skb);
490 if (unlikely(i >= bp->tx_nr_rings)) {
491 dev_kfree_skb_any(skb);
492 dev_core_stats_tx_dropped_inc(dev);
493 return NETDEV_TX_OK;
494 }
495
496 txq = netdev_get_tx_queue(dev, i);
497 txr = &bp->tx_ring[bp->tx_ring_map[i]];
498 prod = txr->tx_prod;
499
500 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
501 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
502 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
503 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
504 if (skb_linearize(skb)) {
505 dev_kfree_skb_any(skb);
506 dev_core_stats_tx_dropped_inc(dev);
507 return NETDEV_TX_OK;
508 }
509 }
510 #endif
511 if (skb_is_gso(skb) &&
512 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) &&
513 !(bp->flags & BNXT_FLAG_UDP_GSO_CAP))
514 return bnxt_sw_udp_gso_xmit(bp, txr, txq, skb);
515
516 free_size = bnxt_tx_avail(bp, txr);
517 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
518 /* We must have raced with NAPI cleanup */
519 if (net_ratelimit() && txr->kick_pending)
520 netif_warn(bp, tx_err, dev,
521 "bnxt: ring busy w/ flush pending!\n");
522 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
523 bp->tx_wake_thresh))
524 return NETDEV_TX_BUSY;
525 }
526
527 length = skb->len;
528 len = skb_headlen(skb);
529 last_frag = skb_shinfo(skb)->nr_frags;
530
531 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
532
533 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
534 tx_buf->skb = skb;
535 tx_buf->nr_frags = last_frag;
536
537 vlan_tag_flags = 0;
538 cfa_action = bnxt_xmit_get_cfa_action(skb);
539 if (skb_vlan_tag_present(skb)) {
540 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
541 skb_vlan_tag_get(skb);
542 /* Currently supports 8021Q, 8021AD vlan offloads
543 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
544 */
545 if (skb->vlan_proto == htons(ETH_P_8021Q))
546 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
547 }
548
549 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
550 ptp->tx_tstamp_en) {
551 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
552 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
553 tx_buf->is_ts_pkt = 1;
554 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
555 } else if (!skb_is_gso(skb)) {
556 u16 seq_id, hdr_off;
557
558 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
559 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
560 if (vlan_tag_flags)
561 hdr_off += VLAN_HLEN;
562 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
563 tx_buf->is_ts_pkt = 1;
564 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
565
566 ptp->txts_req[txts_prod].tx_seqid = seq_id;
567 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
568 tx_buf->txts_prod = txts_prod;
569 }
570 }
571 }
572 if (unlikely(skb->no_fcs))
573 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
574
575 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
576 skb_frags_readable(skb) && !lflags) {
577 struct tx_push_buffer *tx_push_buf = txr->tx_push;
578 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
579 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
580 void __iomem *db = txr->tx_db.doorbell;
581 void *pdata = tx_push_buf->data;
582 u64 *end;
583 int j, push_len;
584
585 /* Set COAL_NOW to be ready quickly for the next push */
586 tx_push->tx_bd_len_flags_type =
587 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
588 TX_BD_TYPE_LONG_TX_BD |
589 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
590 TX_BD_FLAGS_COAL_NOW |
591 TX_BD_FLAGS_PACKET_END |
592 TX_BD_CNT(2));
593
594 if (skb->ip_summed == CHECKSUM_PARTIAL)
595 tx_push1->tx_bd_hsize_lflags =
596 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
597 else
598 tx_push1->tx_bd_hsize_lflags = 0;
599
600 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
601 tx_push1->tx_bd_cfa_action =
602 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
603
604 end = pdata + length;
605 end = PTR_ALIGN(end, 8) - 1;
606 *end = 0;
607
608 skb_copy_from_linear_data(skb, pdata, len);
609 pdata += len;
610 for (j = 0; j < last_frag; j++) {
611 void *fptr;
612
613 frag = &skb_shinfo(skb)->frags[j];
614 fptr = skb_frag_address_safe(frag);
615 if (!fptr)
616 goto normal_tx;
617
618 memcpy(pdata, fptr, skb_frag_size(frag));
619 pdata += skb_frag_size(frag);
620 }
621
622 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
623 txbd->tx_bd_haddr = txr->data_mapping;
624 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
625 prod = NEXT_TX(prod);
626 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
627 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
628 memcpy(txbd, tx_push1, sizeof(*txbd));
629 prod = NEXT_TX(prod);
630 tx_push->doorbell =
631 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
632 DB_RING_IDX(&txr->tx_db, prod));
633 WRITE_ONCE(txr->tx_prod, prod);
634
635 tx_buf->is_push = 1;
636 netdev_tx_sent_queue(txq, skb->len);
637 wmb(); /* Sync is_push and byte queue before pushing data */
638
639 push_len = (length + sizeof(*tx_push) + 7) / 8;
640 if (push_len > 16) {
641 __iowrite64_copy(db, tx_push_buf, 16);
642 __iowrite32_copy(db + 4, tx_push_buf + 1,
643 (push_len - 16) << 1);
644 } else {
645 __iowrite64_copy(db, tx_push_buf, push_len);
646 }
647
648 goto tx_done;
649 }
650
651 normal_tx:
652 if (length < BNXT_MIN_PKT_SIZE) {
653 pad = BNXT_MIN_PKT_SIZE - length;
654 if (skb_pad(skb, pad))
655 /* SKB already freed. */
656 goto tx_kick_pending;
657 length = BNXT_MIN_PKT_SIZE;
658 }
659
660 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
661
662 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
663 goto tx_free;
664
665 dma_unmap_addr_set(tx_buf, mapping, mapping);
666 dma_unmap_len_set(tx_buf, len, len);
667 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
668 TX_BD_CNT(last_frag + 2);
669
670 txbd->tx_bd_haddr = cpu_to_le64(mapping);
671 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
672
673 prod = NEXT_TX(prod);
674 txbd1 = bnxt_init_ext_bd(bp, txr, prod, lflags, vlan_tag_flags,
675 cfa_action);
676
677 if (skb_is_gso(skb)) {
678 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
679 u32 hdr_len;
680
681 if (skb->encapsulation) {
682 if (udp_gso)
683 hdr_len = skb_inner_transport_offset(skb) +
684 sizeof(struct udphdr);
685 else
686 hdr_len = skb_inner_tcp_all_headers(skb);
687 } else if (udp_gso) {
688 hdr_len = skb_transport_offset(skb) +
689 sizeof(struct udphdr);
690 } else {
691 hdr_len = skb_tcp_all_headers(skb);
692 }
693
694 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
695 TX_BD_FLAGS_T_IPID |
696 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
697 length = skb_shinfo(skb)->gso_size;
698 txbd1->tx_bd_mss = cpu_to_le32(length);
699 length += hdr_len;
700 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
701 txbd1->tx_bd_hsize_lflags |=
702 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
703 }
704
705 length >>= 9;
706 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
707 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
708 skb->len);
709 i = 0;
710 goto tx_dma_error;
711 }
712 flags |= bnxt_lhint_arr[length];
713 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
714
715 txbd0 = txbd;
716 for (i = 0; i < last_frag; i++) {
717 frag = &skb_shinfo(skb)->frags[i];
718 prod = NEXT_TX(prod);
719 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
720
721 len = skb_frag_size(frag);
722 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
723 DMA_TO_DEVICE);
724
725 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
726 goto tx_dma_error;
727
728 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
729 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
730 mapping, mapping);
731 dma_unmap_len_set(tx_buf, len, len);
732
733 txbd->tx_bd_haddr = cpu_to_le64(mapping);
734
735 flags = len << TX_BD_LEN_SHIFT;
736 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
737 }
738
739 flags &= ~TX_BD_LEN;
740 txbd->tx_bd_len_flags_type =
741 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
742 TX_BD_FLAGS_PACKET_END);
743
744 netdev_tx_sent_queue(txq, skb->len);
745
746 skb_tx_timestamp(skb);
747
748 prod = NEXT_TX(prod);
749 WRITE_ONCE(txr->tx_prod, prod);
750
751 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
752 bnxt_txr_db_kick(bp, txr, prod);
753 } else {
754 if (free_size >= bp->tx_wake_thresh)
755 txbd0->tx_bd_len_flags_type |=
756 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
757 txr->kick_pending = 1;
758 }
759
760 tx_done:
761
762 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
763 if (netdev_xmit_more() && !tx_buf->is_push) {
764 txbd0->tx_bd_len_flags_type &=
765 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
766 bnxt_txr_db_kick(bp, txr, prod);
767 }
768
769 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
770 bp->tx_wake_thresh);
771 }
772 return NETDEV_TX_OK;
773
774 tx_dma_error:
775 last_frag = i;
776
777 /* start back at beginning and unmap skb */
778 prod = txr->tx_prod;
779 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
780 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
781 skb_headlen(skb), DMA_TO_DEVICE);
782 prod = NEXT_TX(prod);
783
784 /* unmap remaining mapped pages */
785 for (i = 0; i < last_frag; i++) {
786 prod = NEXT_TX(prod);
787 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
788 frag = &skb_shinfo(skb)->frags[i];
789 netmem_dma_unmap_page_attrs(&pdev->dev,
790 dma_unmap_addr(tx_buf, mapping),
791 skb_frag_size(frag),
792 DMA_TO_DEVICE, 0);
793 }
794
795 tx_free:
796 dev_kfree_skb_any(skb);
797 tx_kick_pending:
798 if (BNXT_TX_PTP_IS_SET(lflags)) {
799 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
800 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
801 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
802 /* set SKB to err so PTP worker will clean up */
803 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
804 }
805 if (txr->kick_pending)
806 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
807 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
808 dev_core_stats_tx_dropped_inc(dev);
809 return NETDEV_TX_OK;
810 }
811
812 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)813 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
814 int budget)
815 {
816 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
817 struct pci_dev *pdev = bp->pdev;
818 u16 hw_cons = txr->tx_hw_cons;
819 unsigned int tx_bytes = 0;
820 u16 cons = txr->tx_cons;
821 unsigned int dma_len;
822 dma_addr_t dma_addr;
823 int tx_pkts = 0;
824 bool rc = false;
825
826 while (RING_TX(bp, cons) != hw_cons) {
827 struct bnxt_sw_tx_bd *tx_buf, *head_buf;
828 struct sk_buff *skb;
829 bool is_ts_pkt;
830 int j, last;
831
832 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
833 head_buf = tx_buf;
834 skb = tx_buf->skb;
835
836 if (unlikely(!skb)) {
837 bnxt_sched_reset_txr(bp, txr, cons);
838 return rc;
839 }
840
841 is_ts_pkt = tx_buf->is_ts_pkt;
842 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
843 rc = true;
844 break;
845 }
846
847 cons = NEXT_TX(cons);
848 tx_pkts++;
849 tx_bytes += skb->len;
850 tx_buf->skb = NULL;
851 tx_buf->is_ts_pkt = 0;
852
853 if (tx_buf->is_push) {
854 tx_buf->is_push = 0;
855 goto next_tx_int;
856 }
857
858 if (dma_unmap_len(tx_buf, len)) {
859 dma_addr = dma_unmap_addr(tx_buf, mapping);
860 dma_len = dma_unmap_len(tx_buf, len);
861
862 dma_unmap_single(&pdev->dev, dma_addr, dma_len,
863 DMA_TO_DEVICE);
864 }
865
866 last = tx_buf->nr_frags;
867
868 for (j = 0; j < last; j++) {
869 cons = NEXT_TX(cons);
870 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
871 if (dma_unmap_len(tx_buf, len)) {
872 dma_addr = dma_unmap_addr(tx_buf, mapping);
873 dma_len = dma_unmap_len(tx_buf, len);
874
875 netmem_dma_unmap_page_attrs(&pdev->dev,
876 dma_addr, dma_len,
877 DMA_TO_DEVICE, 0);
878 }
879 }
880
881 if (unlikely(head_buf->is_sw_gso)) {
882 u16 inline_cons = txr->tx_inline_cons + 1;
883
884 WRITE_ONCE(txr->tx_inline_cons, inline_cons);
885 if (head_buf->is_sw_gso == BNXT_SW_GSO_LAST) {
886 tso_dma_map_complete(&pdev->dev,
887 &head_buf->sw_gso_cstate);
888 } else {
889 tx_pkts--;
890 tx_bytes -= skb->len;
891 skb = NULL;
892 }
893 head_buf->is_sw_gso = 0;
894 }
895
896 if (unlikely(is_ts_pkt)) {
897 if (BNXT_CHIP_P5(bp)) {
898 /* PTP worker takes ownership of the skb */
899 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
900 skb = NULL;
901 }
902 }
903
904 next_tx_int:
905 cons = NEXT_TX(cons);
906
907 napi_consume_skb(skb, budget);
908 }
909
910 WRITE_ONCE(txr->tx_cons, cons);
911
912 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
913 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
914 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
915
916 return rc;
917 }
918
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)919 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
920 {
921 struct bnxt_tx_ring_info *txr;
922 bool more = false;
923 int i;
924
925 bnxt_for_each_napi_tx(i, bnapi, txr) {
926 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
927 more |= __bnxt_tx_int(bp, txr, budget);
928 }
929 if (!more)
930 bnapi->events &= ~BNXT_TX_CMP_EVENT;
931 }
932
bnxt_separate_head_pool(struct bnxt_rx_ring_info * rxr)933 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
934 {
935 return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
936 }
937
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)938 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
939 struct bnxt_rx_ring_info *rxr,
940 unsigned int *offset,
941 gfp_t gfp)
942 {
943 struct page *page;
944
945 if (rxr->rx_page_size < PAGE_SIZE) {
946 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
947 rxr->rx_page_size);
948 } else {
949 page = page_pool_dev_alloc_pages(rxr->page_pool);
950 *offset = 0;
951 }
952 if (!page)
953 return NULL;
954
955 *mapping = page_pool_get_dma_addr(page) + *offset;
956 return page;
957 }
958
__bnxt_alloc_rx_netmem(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)959 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
960 struct bnxt_rx_ring_info *rxr,
961 unsigned int *offset,
962 gfp_t gfp)
963 {
964 netmem_ref netmem;
965
966 if (rxr->rx_page_size < PAGE_SIZE) {
967 netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
968 rxr->rx_page_size, gfp);
969 } else {
970 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
971 *offset = 0;
972 }
973 if (!netmem)
974 return 0;
975
976 *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
977 return netmem;
978 }
979
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)980 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
981 struct bnxt_rx_ring_info *rxr,
982 gfp_t gfp)
983 {
984 unsigned int offset;
985 struct page *page;
986
987 page = page_pool_alloc_frag(rxr->head_pool, &offset,
988 bp->rx_buf_size, gfp);
989 if (!page)
990 return NULL;
991
992 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
993 return page_address(page) + offset;
994 }
995
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)996 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
997 u16 prod, gfp_t gfp)
998 {
999 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1000 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1001 dma_addr_t mapping;
1002
1003 if (BNXT_RX_PAGE_MODE(bp)) {
1004 unsigned int offset;
1005 struct page *page =
1006 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
1007
1008 if (!page)
1009 return -ENOMEM;
1010
1011 mapping += bp->rx_dma_offset;
1012 rx_buf->data = page;
1013 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
1014 } else {
1015 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
1016
1017 if (!data)
1018 return -ENOMEM;
1019
1020 rx_buf->data = data;
1021 rx_buf->data_ptr = data + bp->rx_offset;
1022 }
1023 rx_buf->mapping = mapping;
1024
1025 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1026 return 0;
1027 }
1028
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)1029 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1030 {
1031 u16 prod = rxr->rx_prod;
1032 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1033 struct bnxt *bp = rxr->bnapi->bp;
1034 struct rx_bd *cons_bd, *prod_bd;
1035
1036 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1037 cons_rx_buf = &rxr->rx_buf_ring[cons];
1038
1039 prod_rx_buf->data = data;
1040 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1041
1042 prod_rx_buf->mapping = cons_rx_buf->mapping;
1043
1044 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1045 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1046
1047 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1048 }
1049
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1050 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1051 {
1052 u16 next, max = rxr->rx_agg_bmap_size;
1053
1054 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1055 if (next >= max)
1056 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1057 return next;
1058 }
1059
bnxt_alloc_rx_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)1060 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1061 u16 prod, gfp_t gfp)
1062 {
1063 struct rx_bd *rxbd =
1064 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1065 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1066 u16 sw_prod = rxr->rx_sw_agg_prod;
1067 unsigned int offset = 0;
1068 dma_addr_t mapping;
1069 netmem_ref netmem;
1070
1071 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1072 if (!netmem)
1073 return -ENOMEM;
1074
1075 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1076 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1077
1078 __set_bit(sw_prod, rxr->rx_agg_bmap);
1079 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1080 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1081
1082 rx_agg_buf->netmem = netmem;
1083 rx_agg_buf->offset = offset;
1084 rx_agg_buf->mapping = mapping;
1085 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1086 rxbd->rx_bd_opaque = sw_prod;
1087 return 0;
1088 }
1089
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1090 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1091 struct bnxt_cp_ring_info *cpr,
1092 u16 cp_cons, u16 curr)
1093 {
1094 struct rx_agg_cmp *agg;
1095
1096 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1097 agg = (struct rx_agg_cmp *)
1098 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1099 return agg;
1100 }
1101
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1102 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1103 struct bnxt_rx_ring_info *rxr,
1104 u16 agg_id, u16 curr)
1105 {
1106 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1107
1108 return &tpa_info->agg_arr[curr];
1109 }
1110
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1111 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1112 u16 start, u32 agg_bufs, bool tpa)
1113 {
1114 struct bnxt_napi *bnapi = cpr->bnapi;
1115 struct bnxt *bp = bnapi->bp;
1116 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1117 u16 prod = rxr->rx_agg_prod;
1118 u16 sw_prod = rxr->rx_sw_agg_prod;
1119 bool p5_tpa = false;
1120 u32 i;
1121
1122 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1123 p5_tpa = true;
1124
1125 for (i = 0; i < agg_bufs; i++) {
1126 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1127 struct rx_agg_cmp *agg;
1128 struct rx_bd *prod_bd;
1129 netmem_ref netmem;
1130 u16 cons;
1131
1132 if (p5_tpa)
1133 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1134 else
1135 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1136 cons = agg->rx_agg_cmp_opaque;
1137 __clear_bit(cons, rxr->rx_agg_bmap);
1138
1139 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1140 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1141
1142 __set_bit(sw_prod, rxr->rx_agg_bmap);
1143 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1144 cons_rx_buf = &rxr->rx_agg_ring[cons];
1145
1146 /* It is possible for sw_prod to be equal to cons, so
1147 * set cons_rx_buf->netmem to 0 first.
1148 */
1149 netmem = cons_rx_buf->netmem;
1150 cons_rx_buf->netmem = 0;
1151 prod_rx_buf->netmem = netmem;
1152 prod_rx_buf->offset = cons_rx_buf->offset;
1153
1154 prod_rx_buf->mapping = cons_rx_buf->mapping;
1155
1156 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1157
1158 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1159 prod_bd->rx_bd_opaque = sw_prod;
1160
1161 prod = NEXT_RX_AGG(prod);
1162 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1163 }
1164 rxr->rx_agg_prod = prod;
1165 rxr->rx_sw_agg_prod = sw_prod;
1166 }
1167
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1168 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1169 struct bnxt_rx_ring_info *rxr,
1170 u16 cons, void *data, u8 *data_ptr,
1171 dma_addr_t dma_addr,
1172 unsigned int offset_and_len)
1173 {
1174 unsigned int len = offset_and_len & 0xffff;
1175 struct page *page = data;
1176 u16 prod = rxr->rx_prod;
1177 struct sk_buff *skb;
1178 int err;
1179
1180 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1181 if (unlikely(err)) {
1182 bnxt_reuse_rx_data(rxr, cons, data);
1183 return NULL;
1184 }
1185 dma_addr -= bp->rx_dma_offset;
1186 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1187 bp->rx_dir);
1188 skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
1189 if (!skb) {
1190 page_pool_recycle_direct(rxr->page_pool, page);
1191 return NULL;
1192 }
1193 skb_mark_for_recycle(skb);
1194 skb_reserve(skb, bp->rx_offset);
1195 __skb_put(skb, len);
1196
1197 return skb;
1198 }
1199
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1200 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1201 struct bnxt_rx_ring_info *rxr,
1202 u16 cons, void *data, u8 *data_ptr,
1203 dma_addr_t dma_addr,
1204 unsigned int offset_and_len)
1205 {
1206 unsigned int payload = offset_and_len >> 16;
1207 unsigned int len = offset_and_len & 0xffff;
1208 skb_frag_t *frag;
1209 struct page *page = data;
1210 u16 prod = rxr->rx_prod;
1211 struct sk_buff *skb;
1212 int off, err;
1213
1214 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1215 if (unlikely(err)) {
1216 bnxt_reuse_rx_data(rxr, cons, data);
1217 return NULL;
1218 }
1219 dma_addr -= bp->rx_dma_offset;
1220 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1221 bp->rx_dir);
1222
1223 if (unlikely(!payload))
1224 payload = eth_get_headlen(bp->dev, data_ptr, len);
1225
1226 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1227 if (!skb) {
1228 page_pool_recycle_direct(rxr->page_pool, page);
1229 return NULL;
1230 }
1231
1232 skb_mark_for_recycle(skb);
1233 off = (void *)data_ptr - page_address(page);
1234 skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
1235 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1236 payload + NET_IP_ALIGN);
1237
1238 frag = &skb_shinfo(skb)->frags[0];
1239 skb_frag_size_sub(frag, payload);
1240 skb_frag_off_add(frag, payload);
1241 skb->data_len -= payload;
1242 skb->tail += payload;
1243
1244 return skb;
1245 }
1246
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1247 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1248 struct bnxt_rx_ring_info *rxr, u16 cons,
1249 void *data, u8 *data_ptr,
1250 dma_addr_t dma_addr,
1251 unsigned int offset_and_len)
1252 {
1253 u16 prod = rxr->rx_prod;
1254 struct sk_buff *skb;
1255 int err;
1256
1257 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1258 if (unlikely(err)) {
1259 bnxt_reuse_rx_data(rxr, cons, data);
1260 return NULL;
1261 }
1262
1263 skb = napi_build_skb(data, bp->rx_buf_size);
1264 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1265 bp->rx_dir);
1266 if (!skb) {
1267 page_pool_free_va(rxr->head_pool, data, true);
1268 return NULL;
1269 }
1270
1271 skb_mark_for_recycle(skb);
1272 skb_reserve(skb, bp->rx_offset);
1273 skb_put(skb, offset_and_len & 0xffff);
1274 return skb;
1275 }
1276
__bnxt_rx_agg_netmems(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs,bool tpa,struct sk_buff * skb,struct xdp_buff * xdp)1277 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1278 struct bnxt_cp_ring_info *cpr,
1279 u16 idx, u32 agg_bufs, bool tpa,
1280 struct sk_buff *skb,
1281 struct xdp_buff *xdp)
1282 {
1283 struct bnxt_napi *bnapi = cpr->bnapi;
1284 struct skb_shared_info *shinfo;
1285 struct bnxt_rx_ring_info *rxr;
1286 u32 i, total_frag_len = 0;
1287 bool p5_tpa = false;
1288 u16 prod;
1289
1290 rxr = bnapi->rx_ring;
1291 prod = rxr->rx_agg_prod;
1292
1293 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1294 p5_tpa = true;
1295
1296 if (skb)
1297 shinfo = skb_shinfo(skb);
1298 else
1299 shinfo = xdp_get_shared_info_from_buff(xdp);
1300
1301 for (i = 0; i < agg_bufs; i++) {
1302 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1303 struct rx_agg_cmp *agg;
1304 u16 cons, frag_len;
1305 netmem_ref netmem;
1306
1307 if (p5_tpa)
1308 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1309 else
1310 agg = bnxt_get_agg(bp, cpr, idx, i);
1311 cons = agg->rx_agg_cmp_opaque;
1312 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1313 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1314
1315 cons_rx_buf = &rxr->rx_agg_ring[cons];
1316 if (skb) {
1317 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1318 cons_rx_buf->offset,
1319 frag_len, rxr->rx_page_size);
1320 } else {
1321 skb_frag_t *frag = &shinfo->frags[i];
1322
1323 skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1324 cons_rx_buf->offset,
1325 frag_len);
1326 shinfo->nr_frags = i + 1;
1327 }
1328 __clear_bit(cons, rxr->rx_agg_bmap);
1329
1330 /* It is possible for bnxt_alloc_rx_netmem() to allocate
1331 * a sw_prod index that equals the cons index, so we
1332 * need to clear the cons entry now.
1333 */
1334 netmem = cons_rx_buf->netmem;
1335 cons_rx_buf->netmem = 0;
1336
1337 if (xdp && netmem_is_pfmemalloc(netmem))
1338 xdp_buff_set_frag_pfmemalloc(xdp);
1339
1340 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1341 if (skb) {
1342 skb->len -= frag_len;
1343 skb->data_len -= frag_len;
1344 skb->truesize -= rxr->rx_page_size;
1345 }
1346
1347 --shinfo->nr_frags;
1348 cons_rx_buf->netmem = netmem;
1349
1350 /* Update prod since possibly some netmems have been
1351 * allocated already.
1352 */
1353 rxr->rx_agg_prod = prod;
1354 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1355 return 0;
1356 }
1357
1358 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1359 rxr->rx_page_size);
1360
1361 total_frag_len += frag_len;
1362 prod = NEXT_RX_AGG(prod);
1363 }
1364 rxr->rx_agg_prod = prod;
1365 return total_frag_len;
1366 }
1367
bnxt_rx_agg_netmems_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1368 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1369 struct bnxt_cp_ring_info *cpr,
1370 struct sk_buff *skb, u16 idx,
1371 u32 agg_bufs, bool tpa)
1372 {
1373 u32 total_frag_len = 0;
1374
1375 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1376 skb, NULL);
1377 if (!total_frag_len) {
1378 skb_mark_for_recycle(skb);
1379 dev_kfree_skb(skb);
1380 return NULL;
1381 }
1382
1383 return skb;
1384 }
1385
bnxt_rx_agg_netmems_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1386 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1387 struct bnxt_cp_ring_info *cpr,
1388 struct xdp_buff *xdp, u16 idx,
1389 u32 agg_bufs, bool tpa)
1390 {
1391 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1392 u32 total_frag_len = 0;
1393
1394 if (!xdp_buff_has_frags(xdp))
1395 shinfo->nr_frags = 0;
1396
1397 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1398 NULL, xdp);
1399 if (total_frag_len) {
1400 xdp_buff_set_frags_flag(xdp);
1401 shinfo->nr_frags = agg_bufs;
1402 shinfo->xdp_frags_size = total_frag_len;
1403 }
1404 return total_frag_len;
1405 }
1406
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1407 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1408 u8 agg_bufs, u32 *raw_cons)
1409 {
1410 u16 last;
1411 struct rx_agg_cmp *agg;
1412
1413 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1414 last = RING_CMP(*raw_cons);
1415 agg = (struct rx_agg_cmp *)
1416 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1417 return RX_AGG_CMP_VALID(agg, *raw_cons);
1418 }
1419
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1420 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1421 unsigned int len,
1422 dma_addr_t mapping)
1423 {
1424 struct bnxt *bp = bnapi->bp;
1425 struct pci_dev *pdev = bp->pdev;
1426 struct sk_buff *skb;
1427
1428 skb = napi_alloc_skb(&bnapi->napi, len);
1429 if (!skb)
1430 return NULL;
1431
1432 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1433 bp->rx_dir);
1434
1435 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1436 len + NET_IP_ALIGN);
1437
1438 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1439 bp->rx_dir);
1440
1441 skb_put(skb, len);
1442
1443 return skb;
1444 }
1445
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1446 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1447 unsigned int len,
1448 dma_addr_t mapping)
1449 {
1450 return bnxt_copy_data(bnapi, data, len, mapping);
1451 }
1452
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1453 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1454 struct xdp_buff *xdp,
1455 unsigned int len,
1456 dma_addr_t mapping)
1457 {
1458 unsigned int metasize = 0;
1459 u8 *data = xdp->data;
1460 struct sk_buff *skb;
1461
1462 len = xdp->data_end - xdp->data_meta;
1463 metasize = xdp->data - xdp->data_meta;
1464 data = xdp->data_meta;
1465
1466 skb = bnxt_copy_data(bnapi, data, len, mapping);
1467 if (!skb)
1468 return skb;
1469
1470 if (metasize) {
1471 skb_metadata_set(skb, metasize);
1472 __skb_pull(skb, metasize);
1473 }
1474
1475 return skb;
1476 }
1477
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1478 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1479 u32 *raw_cons, void *cmp)
1480 {
1481 struct rx_cmp *rxcmp = cmp;
1482 u32 tmp_raw_cons = *raw_cons;
1483 u8 cmp_type, agg_bufs = 0;
1484
1485 cmp_type = RX_CMP_TYPE(rxcmp);
1486
1487 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1488 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1489 RX_CMP_AGG_BUFS) >>
1490 RX_CMP_AGG_BUFS_SHIFT;
1491 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1492 struct rx_tpa_end_cmp *tpa_end = cmp;
1493
1494 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1495 return 0;
1496
1497 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1498 }
1499
1500 if (agg_bufs) {
1501 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1502 return -EBUSY;
1503 }
1504 *raw_cons = tmp_raw_cons;
1505 return 0;
1506 }
1507
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1508 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1509 {
1510 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1511 u16 idx = agg_id & MAX_TPA_P5_MASK;
1512
1513 if (test_bit(idx, map->agg_idx_bmap)) {
1514 idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
1515 if (idx >= MAX_TPA_P5)
1516 return INVALID_HW_RING_ID;
1517 }
1518 __set_bit(idx, map->agg_idx_bmap);
1519 map->agg_id_tbl[agg_id] = idx;
1520 return idx;
1521 }
1522
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1523 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1524 {
1525 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1526
1527 __clear_bit(idx, map->agg_idx_bmap);
1528 }
1529
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1530 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1531 {
1532 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1533
1534 return map->agg_id_tbl[agg_id];
1535 }
1536
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1537 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1538 struct rx_tpa_start_cmp *tpa_start,
1539 struct rx_tpa_start_cmp_ext *tpa_start1)
1540 {
1541 tpa_info->cfa_code_valid = 1;
1542 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1543 tpa_info->vlan_valid = 0;
1544 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1545 tpa_info->vlan_valid = 1;
1546 tpa_info->metadata =
1547 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1548 }
1549 }
1550
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1551 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1552 struct rx_tpa_start_cmp *tpa_start,
1553 struct rx_tpa_start_cmp_ext *tpa_start1)
1554 {
1555 tpa_info->vlan_valid = 0;
1556 if (TPA_START_VLAN_VALID(tpa_start)) {
1557 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1558 u32 vlan_proto = ETH_P_8021Q;
1559
1560 tpa_info->vlan_valid = 1;
1561 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1562 vlan_proto = ETH_P_8021AD;
1563 tpa_info->metadata = vlan_proto << 16 |
1564 TPA_START_METADATA0_TCI(tpa_start1);
1565 }
1566 }
1567
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1568 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1569 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1570 struct rx_tpa_start_cmp_ext *tpa_start1)
1571 {
1572 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1573 struct bnxt_tpa_info *tpa_info;
1574 u16 cons, prod, agg_id;
1575 struct rx_bd *prod_bd;
1576 dma_addr_t mapping;
1577
1578 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1579 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1580 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1581 if (unlikely(agg_id == INVALID_HW_RING_ID)) {
1582 netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
1583 rxr->bnapi->index,
1584 TPA_START_AGG_ID_P5(tpa_start));
1585 bnxt_sched_reset_rxr(bp, rxr);
1586 return;
1587 }
1588 } else {
1589 agg_id = TPA_START_AGG_ID(tpa_start);
1590 }
1591 cons = tpa_start->rx_tpa_start_cmp_opaque;
1592 prod = rxr->rx_prod;
1593 cons_rx_buf = &rxr->rx_buf_ring[cons];
1594 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1595 tpa_info = &rxr->rx_tpa[agg_id];
1596
1597 if (unlikely(cons != rxr->rx_next_cons ||
1598 TPA_START_ERROR(tpa_start))) {
1599 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1600 cons, rxr->rx_next_cons,
1601 TPA_START_ERROR_CODE(tpa_start1));
1602 bnxt_sched_reset_rxr(bp, rxr);
1603 return;
1604 }
1605 prod_rx_buf->data = tpa_info->data;
1606 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1607
1608 mapping = tpa_info->mapping;
1609 prod_rx_buf->mapping = mapping;
1610
1611 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1612
1613 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1614
1615 tpa_info->data = cons_rx_buf->data;
1616 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1617 cons_rx_buf->data = NULL;
1618 tpa_info->mapping = cons_rx_buf->mapping;
1619
1620 tpa_info->len =
1621 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1622 RX_TPA_START_CMP_LEN_SHIFT;
1623 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1624 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1625 tpa_info->gso_type = SKB_GSO_TCPV4;
1626 if (TPA_START_IS_IPV6(tpa_start1))
1627 tpa_info->gso_type = SKB_GSO_TCPV6;
1628 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1629 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1630 TPA_START_HASH_TYPE(tpa_start) == 3)
1631 tpa_info->gso_type = SKB_GSO_TCPV6;
1632 tpa_info->rss_hash =
1633 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1634 } else {
1635 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1636 tpa_info->gso_type = 0;
1637 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1638 }
1639 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1640 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1641 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1642 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1643 else
1644 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1645 tpa_info->agg_count = 0;
1646
1647 rxr->rx_prod = NEXT_RX(prod);
1648 cons = RING_RX(bp, NEXT_RX(cons));
1649 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1650 cons_rx_buf = &rxr->rx_buf_ring[cons];
1651
1652 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1653 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1654 cons_rx_buf->data = NULL;
1655 }
1656
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1657 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1658 {
1659 if (agg_bufs)
1660 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1661 }
1662
1663 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1664 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1665 {
1666 struct udphdr *uh = NULL;
1667
1668 if (ip_proto == htons(ETH_P_IP)) {
1669 struct iphdr *iph = (struct iphdr *)skb->data;
1670
1671 if (iph->protocol == IPPROTO_UDP)
1672 uh = (struct udphdr *)(iph + 1);
1673 } else {
1674 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1675
1676 if (iph->nexthdr == IPPROTO_UDP)
1677 uh = (struct udphdr *)(iph + 1);
1678 }
1679 if (uh) {
1680 if (uh->check)
1681 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1682 else
1683 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1684 }
1685 }
1686 #endif
1687
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1688 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1689 int payload_off, int tcp_ts,
1690 struct sk_buff *skb)
1691 {
1692 #ifdef CONFIG_INET
1693 struct tcphdr *th;
1694 int len, nw_off;
1695 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1696 u32 hdr_info = tpa_info->hdr_info;
1697 bool loopback = false;
1698
1699 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1700 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1701 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1702
1703 /* If the packet is an internal loopback packet, the offsets will
1704 * have an extra 4 bytes.
1705 */
1706 if (inner_mac_off == 4) {
1707 loopback = true;
1708 } else if (inner_mac_off > 4) {
1709 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1710 ETH_HLEN - 2));
1711
1712 /* We only support inner iPv4/ipv6. If we don't see the
1713 * correct protocol ID, it must be a loopback packet where
1714 * the offsets are off by 4.
1715 */
1716 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1717 loopback = true;
1718 }
1719 if (loopback) {
1720 /* internal loopback packet, subtract all offsets by 4 */
1721 inner_ip_off -= 4;
1722 inner_mac_off -= 4;
1723 outer_ip_off -= 4;
1724 }
1725
1726 nw_off = inner_ip_off - ETH_HLEN;
1727 skb_set_network_header(skb, nw_off);
1728 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1729 struct ipv6hdr *iph = ipv6_hdr(skb);
1730
1731 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1732 len = skb->len - skb_transport_offset(skb);
1733 th = tcp_hdr(skb);
1734 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1735 } else {
1736 struct iphdr *iph = ip_hdr(skb);
1737
1738 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1739 len = skb->len - skb_transport_offset(skb);
1740 th = tcp_hdr(skb);
1741 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1742 }
1743
1744 if (inner_mac_off) { /* tunnel */
1745 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1746 ETH_HLEN - 2));
1747
1748 bnxt_gro_tunnel(skb, proto);
1749 }
1750 #endif
1751 return skb;
1752 }
1753
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1754 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1755 int payload_off, int tcp_ts,
1756 struct sk_buff *skb)
1757 {
1758 #ifdef CONFIG_INET
1759 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1760 u32 hdr_info = tpa_info->hdr_info;
1761 int iphdr_len, nw_off;
1762
1763 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1764 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1765 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1766
1767 nw_off = inner_ip_off - ETH_HLEN;
1768 skb_set_network_header(skb, nw_off);
1769 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1770 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1771 skb_set_transport_header(skb, nw_off + iphdr_len);
1772
1773 if (inner_mac_off) { /* tunnel */
1774 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1775 ETH_HLEN - 2));
1776
1777 bnxt_gro_tunnel(skb, proto);
1778 }
1779 #endif
1780 return skb;
1781 }
1782
1783 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1784 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1785
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1786 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1787 int payload_off, int tcp_ts,
1788 struct sk_buff *skb)
1789 {
1790 #ifdef CONFIG_INET
1791 struct tcphdr *th;
1792 int len, nw_off, tcp_opt_len = 0;
1793
1794 if (tcp_ts)
1795 tcp_opt_len = 12;
1796
1797 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1798 struct iphdr *iph;
1799
1800 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1801 ETH_HLEN;
1802 skb_set_network_header(skb, nw_off);
1803 iph = ip_hdr(skb);
1804 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1805 len = skb->len - skb_transport_offset(skb);
1806 th = tcp_hdr(skb);
1807 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1808 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1809 struct ipv6hdr *iph;
1810
1811 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1812 ETH_HLEN;
1813 skb_set_network_header(skb, nw_off);
1814 iph = ipv6_hdr(skb);
1815 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1816 len = skb->len - skb_transport_offset(skb);
1817 th = tcp_hdr(skb);
1818 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1819 } else {
1820 dev_kfree_skb_any(skb);
1821 return NULL;
1822 }
1823
1824 if (nw_off) /* tunnel */
1825 bnxt_gro_tunnel(skb, skb->protocol);
1826 #endif
1827 return skb;
1828 }
1829
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb,struct bnxt_rx_sw_stats * rx_stats)1830 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1831 struct bnxt_tpa_info *tpa_info,
1832 struct rx_tpa_end_cmp *tpa_end,
1833 struct rx_tpa_end_cmp_ext *tpa_end1,
1834 struct sk_buff *skb,
1835 struct bnxt_rx_sw_stats *rx_stats)
1836 {
1837 #ifdef CONFIG_INET
1838 int payload_off;
1839 u16 segs;
1840
1841 segs = TPA_END_TPA_SEGS(tpa_end);
1842 if (segs == 1)
1843 return skb;
1844
1845 rx_stats->rx_hw_gro_packets++;
1846 rx_stats->rx_hw_gro_wire_packets += segs;
1847
1848 NAPI_GRO_CB(skb)->count = segs;
1849 skb_shinfo(skb)->gso_size =
1850 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1851 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1852 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1853 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1854 else
1855 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1856 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1857 if (likely(skb))
1858 tcp_gro_complete(skb);
1859 #endif
1860 return skb;
1861 }
1862
1863 /* Given the cfa_code of a received packet determine which
1864 * netdev (vf-rep or PF) the packet is destined to.
1865 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1866 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1867 {
1868 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1869
1870 /* if vf-rep dev is NULL, it must belong to the PF */
1871 return dev ? dev : bp->dev;
1872 }
1873
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1874 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1875 struct bnxt_cp_ring_info *cpr,
1876 u32 *raw_cons,
1877 struct rx_tpa_end_cmp *tpa_end,
1878 struct rx_tpa_end_cmp_ext *tpa_end1,
1879 u8 *event)
1880 {
1881 struct bnxt_napi *bnapi = cpr->bnapi;
1882 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1883 struct net_device *dev = bp->dev;
1884 u8 *data_ptr, agg_bufs;
1885 unsigned int len;
1886 struct bnxt_tpa_info *tpa_info;
1887 dma_addr_t mapping;
1888 struct sk_buff *skb;
1889 u16 idx = 0, agg_id;
1890 void *data;
1891 bool gro;
1892
1893 if (unlikely(bnapi->in_reset)) {
1894 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1895
1896 if (rc < 0)
1897 return ERR_PTR(-EBUSY);
1898 return NULL;
1899 }
1900
1901 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1902 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1903 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1904 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1905 tpa_info = &rxr->rx_tpa[agg_id];
1906 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1907 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1908 agg_bufs, tpa_info->agg_count);
1909 agg_bufs = tpa_info->agg_count;
1910 }
1911 tpa_info->agg_count = 0;
1912 *event |= BNXT_AGG_EVENT;
1913 bnxt_free_agg_idx(rxr, agg_id);
1914 idx = agg_id;
1915 gro = !!(bp->flags & BNXT_FLAG_GRO);
1916 } else {
1917 agg_id = TPA_END_AGG_ID(tpa_end);
1918 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1919 tpa_info = &rxr->rx_tpa[agg_id];
1920 idx = RING_CMP(*raw_cons);
1921 if (agg_bufs) {
1922 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1923 return ERR_PTR(-EBUSY);
1924
1925 *event |= BNXT_AGG_EVENT;
1926 idx = NEXT_CMP(idx);
1927 }
1928 gro = !!TPA_END_GRO(tpa_end);
1929 }
1930 data = tpa_info->data;
1931 data_ptr = tpa_info->data_ptr;
1932 prefetch(data_ptr);
1933 len = tpa_info->len;
1934 mapping = tpa_info->mapping;
1935
1936 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1937 bnxt_abort_tpa(cpr, idx, agg_bufs);
1938 if (agg_bufs > MAX_SKB_FRAGS)
1939 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1940 agg_bufs, (int)MAX_SKB_FRAGS);
1941 return NULL;
1942 }
1943
1944 if (len <= bp->rx_copybreak) {
1945 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1946 if (!skb) {
1947 bnxt_abort_tpa(cpr, idx, agg_bufs);
1948 cpr->sw_stats->rx.rx_oom_discards += 1;
1949 return NULL;
1950 }
1951 } else {
1952 u8 *new_data;
1953 dma_addr_t new_mapping;
1954
1955 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1956 GFP_ATOMIC);
1957 if (!new_data) {
1958 bnxt_abort_tpa(cpr, idx, agg_bufs);
1959 cpr->sw_stats->rx.rx_oom_discards += 1;
1960 return NULL;
1961 }
1962
1963 tpa_info->data = new_data;
1964 tpa_info->data_ptr = new_data + bp->rx_offset;
1965 tpa_info->mapping = new_mapping;
1966
1967 skb = napi_build_skb(data, bp->rx_buf_size);
1968 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1969 bp->rx_buf_use_size, bp->rx_dir);
1970
1971 if (!skb) {
1972 page_pool_free_va(rxr->head_pool, data, true);
1973 bnxt_abort_tpa(cpr, idx, agg_bufs);
1974 cpr->sw_stats->rx.rx_oom_discards += 1;
1975 return NULL;
1976 }
1977 skb_mark_for_recycle(skb);
1978 skb_reserve(skb, bp->rx_offset);
1979 skb_put(skb, len);
1980 }
1981
1982 if (agg_bufs) {
1983 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1984 true);
1985 if (!skb) {
1986 /* Page reuse already handled by bnxt_rx_pages(). */
1987 cpr->sw_stats->rx.rx_oom_discards += 1;
1988 return NULL;
1989 }
1990 }
1991
1992 if (tpa_info->cfa_code_valid)
1993 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1994 skb->protocol = eth_type_trans(skb, dev);
1995
1996 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1997 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1998
1999 if (tpa_info->vlan_valid &&
2000 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
2001 __be16 vlan_proto = htons(tpa_info->metadata >>
2002 RX_CMP_FLAGS2_METADATA_TPID_SFT);
2003 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2004
2005 if (eth_type_vlan(vlan_proto)) {
2006 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2007 } else {
2008 dev_kfree_skb(skb);
2009 return NULL;
2010 }
2011 }
2012
2013 skb_checksum_none_assert(skb);
2014 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
2015 skb->ip_summed = CHECKSUM_UNNECESSARY;
2016 skb->csum_level =
2017 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
2018 }
2019
2020 if (gro)
2021 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb,
2022 &cpr->sw_stats->rx);
2023
2024 return skb;
2025 }
2026
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)2027 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
2028 struct rx_agg_cmp *rx_agg)
2029 {
2030 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
2031 struct bnxt_tpa_info *tpa_info;
2032
2033 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
2034 tpa_info = &rxr->rx_tpa[agg_id];
2035 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
2036 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
2037 }
2038
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)2039 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
2040 struct sk_buff *skb)
2041 {
2042 skb_mark_for_recycle(skb);
2043
2044 if (skb->dev != bp->dev) {
2045 /* this packet belongs to a vf-rep */
2046 bnxt_vf_rep_rx(bp, skb);
2047 return;
2048 }
2049 skb_record_rx_queue(skb, bnapi->index);
2050 napi_gro_receive(&bnapi->napi, skb);
2051 }
2052
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)2053 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2054 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2055 {
2056 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2057
2058 if (BNXT_PTP_RX_TS_VALID(flags))
2059 goto ts_valid;
2060 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2061 return false;
2062
2063 ts_valid:
2064 *cmpl_ts = ts;
2065 return true;
2066 }
2067
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)2068 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2069 struct rx_cmp *rxcmp,
2070 struct rx_cmp_ext *rxcmp1)
2071 {
2072 __be16 vlan_proto;
2073 u16 vtag;
2074
2075 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2076 __le32 flags2 = rxcmp1->rx_cmp_flags2;
2077 u32 meta_data;
2078
2079 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2080 return skb;
2081
2082 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2083 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2084 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2085 if (eth_type_vlan(vlan_proto))
2086 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2087 else
2088 goto vlan_err;
2089 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2090 if (RX_CMP_VLAN_VALID(rxcmp)) {
2091 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2092
2093 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2094 vlan_proto = htons(ETH_P_8021Q);
2095 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2096 vlan_proto = htons(ETH_P_8021AD);
2097 else
2098 goto vlan_err;
2099 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2100 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2101 }
2102 }
2103 return skb;
2104 vlan_err:
2105 skb_mark_for_recycle(skb);
2106 dev_kfree_skb(skb);
2107 return NULL;
2108 }
2109
2110 /* returns the following:
2111 * 1 - 1 packet successfully received
2112 * 0 - successful TPA_START, packet not completed yet
2113 * -EBUSY - completion ring does not have all the agg buffers yet
2114 * -ENOMEM - packet aborted due to out of memory
2115 * -EIO - packet aborted due to hw error indicated in BD
2116 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2117 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2118 u32 *raw_cons, u8 *event)
2119 {
2120 struct bnxt_napi *bnapi = cpr->bnapi;
2121 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2122 struct net_device *dev = bp->dev;
2123 struct rx_cmp *rxcmp;
2124 struct rx_cmp_ext *rxcmp1;
2125 u32 tmp_raw_cons = *raw_cons;
2126 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2127 struct skb_shared_info *sinfo;
2128 struct bnxt_xdp_buff bnxt_xdp;
2129 struct bnxt_sw_rx_bd *rx_buf;
2130 unsigned int len;
2131 u8 *data_ptr, agg_bufs, cmp_type;
2132 bool xdp_active = false;
2133 dma_addr_t dma_addr;
2134 struct sk_buff *skb;
2135 u32 flags, misc;
2136 u32 cmpl_ts;
2137 void *data;
2138 int rc = 0;
2139
2140 rxcmp = (struct rx_cmp *)
2141 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2142
2143 cmp_type = RX_CMP_TYPE(rxcmp);
2144
2145 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2146 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2147 goto next_rx_no_prod_no_len;
2148 }
2149
2150 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2151 cp_cons = RING_CMP(tmp_raw_cons);
2152 rxcmp1 = (struct rx_cmp_ext *)
2153 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2154
2155 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2156 return -EBUSY;
2157
2158 /* The valid test of the entry must be done first before
2159 * reading any further.
2160 */
2161 dma_rmb();
2162 prod = rxr->rx_prod;
2163
2164 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2165 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2166 bnxt_tpa_start(bp, rxr, cmp_type,
2167 (struct rx_tpa_start_cmp *)rxcmp,
2168 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2169
2170 *event |= BNXT_RX_EVENT;
2171 goto next_rx_no_prod_no_len;
2172
2173 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2174 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2175 (struct rx_tpa_end_cmp *)rxcmp,
2176 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2177
2178 if (IS_ERR(skb))
2179 return -EBUSY;
2180
2181 rc = -ENOMEM;
2182 if (likely(skb)) {
2183 bnxt_deliver_skb(bp, bnapi, skb);
2184 rc = 1;
2185 }
2186 *event |= BNXT_RX_EVENT;
2187 goto next_rx_no_prod_no_len;
2188 }
2189
2190 cons = rxcmp->rx_cmp_opaque;
2191 if (unlikely(cons != rxr->rx_next_cons)) {
2192 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2193
2194 /* 0xffff is forced error, don't print it */
2195 if (rxr->rx_next_cons != 0xffff)
2196 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2197 cons, rxr->rx_next_cons);
2198 bnxt_sched_reset_rxr(bp, rxr);
2199 if (rc1)
2200 return rc1;
2201 goto next_rx_no_prod_no_len;
2202 }
2203 rx_buf = &rxr->rx_buf_ring[cons];
2204 data = rx_buf->data;
2205 data_ptr = rx_buf->data_ptr;
2206 prefetch(data_ptr);
2207
2208 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2209 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2210
2211 if (agg_bufs) {
2212 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2213 return -EBUSY;
2214
2215 cp_cons = NEXT_CMP(cp_cons);
2216 *event |= BNXT_AGG_EVENT;
2217 }
2218 *event |= BNXT_RX_EVENT;
2219
2220 rx_buf->data = NULL;
2221 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2222 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2223
2224 bnxt_reuse_rx_data(rxr, cons, data);
2225 if (agg_bufs)
2226 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2227 false);
2228
2229 rc = -EIO;
2230 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2231 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2232 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2233 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2234 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2235 rx_err);
2236 bnxt_sched_reset_rxr(bp, rxr);
2237 }
2238 }
2239 goto next_rx_no_len;
2240 }
2241
2242 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2243 len = flags >> RX_CMP_LEN_SHIFT;
2244 dma_addr = rx_buf->mapping;
2245
2246 if (bnxt_xdp_attached(bp, rxr)) {
2247 bnxt_xdp.rxcmp = rxcmp;
2248 bnxt_xdp.rxcmp1 = rxcmp1;
2249 bnxt_xdp.cmp_type = cmp_type;
2250
2251 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &bnxt_xdp.xdp);
2252 if (agg_bufs) {
2253 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr,
2254 &bnxt_xdp.xdp,
2255 cp_cons,
2256 agg_bufs,
2257 false);
2258 if (!frag_len)
2259 goto oom_next_rx;
2260
2261 }
2262 xdp_active = true;
2263 }
2264
2265 if (xdp_active) {
2266 if (bnxt_rx_xdp(bp, rxr, cons, &bnxt_xdp.xdp, data, &data_ptr,
2267 &len, event)) {
2268 rc = 1;
2269 goto next_rx;
2270 }
2271 if (xdp_buff_has_frags(&bnxt_xdp.xdp)) {
2272 sinfo = xdp_get_shared_info_from_buff(&bnxt_xdp.xdp);
2273 agg_bufs = sinfo->nr_frags;
2274 } else {
2275 agg_bufs = 0;
2276 }
2277 }
2278
2279 if (len <= bp->rx_copybreak) {
2280 if (!xdp_active)
2281 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2282 else
2283 skb = bnxt_copy_xdp(bnapi, &bnxt_xdp.xdp, len,
2284 dma_addr);
2285 bnxt_reuse_rx_data(rxr, cons, data);
2286 if (!skb) {
2287 if (agg_bufs) {
2288 if (!xdp_active)
2289 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2290 agg_bufs, false);
2291 else
2292 bnxt_xdp_buff_frags_free(rxr,
2293 &bnxt_xdp.xdp);
2294 }
2295 goto oom_next_rx;
2296 }
2297 } else {
2298 u32 payload;
2299
2300 if (rx_buf->data_ptr == data_ptr)
2301 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2302 else
2303 payload = 0;
2304 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2305 payload | len);
2306 if (!skb)
2307 goto oom_next_rx;
2308 }
2309
2310 if (agg_bufs) {
2311 if (!xdp_active) {
2312 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2313 agg_bufs, false);
2314 if (!skb)
2315 goto oom_next_rx;
2316 } else {
2317 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2318 rxr, &bnxt_xdp.xdp);
2319 if (!skb) {
2320 /* we should be able to free the old skb here */
2321 bnxt_xdp_buff_frags_free(rxr, &bnxt_xdp.xdp);
2322 goto oom_next_rx;
2323 }
2324 }
2325 }
2326
2327 if (RX_CMP_HASH_VALID(rxcmp)) {
2328 enum pkt_hash_types type;
2329
2330 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2331 type = bnxt_rss_ext_op(bp, rxcmp);
2332 } else {
2333 u32 itypes = RX_CMP_ITYPES(rxcmp);
2334
2335 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2336 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2337 type = PKT_HASH_TYPE_L4;
2338 else
2339 type = PKT_HASH_TYPE_L3;
2340 }
2341 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2342 }
2343
2344 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2345 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2346 skb->protocol = eth_type_trans(skb, dev);
2347
2348 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2349 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2350 if (!skb)
2351 goto next_rx;
2352 }
2353
2354 skb_checksum_none_assert(skb);
2355 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2356 if (dev->features & NETIF_F_RXCSUM) {
2357 skb->ip_summed = CHECKSUM_UNNECESSARY;
2358 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2359 }
2360 } else {
2361 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2362 if (dev->features & NETIF_F_RXCSUM)
2363 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2364 }
2365 }
2366
2367 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2368 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2369 u64 ns, ts;
2370
2371 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2372 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2373
2374 ns = bnxt_timecounter_cyc2time(ptp, ts);
2375 memset(skb_hwtstamps(skb), 0,
2376 sizeof(*skb_hwtstamps(skb)));
2377 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2378 }
2379 }
2380 }
2381 bnxt_deliver_skb(bp, bnapi, skb);
2382 rc = 1;
2383
2384 next_rx:
2385 cpr->rx_packets += 1;
2386 cpr->rx_bytes += len;
2387
2388 next_rx_no_len:
2389 rxr->rx_prod = NEXT_RX(prod);
2390 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2391
2392 next_rx_no_prod_no_len:
2393 *raw_cons = tmp_raw_cons;
2394
2395 return rc;
2396
2397 oom_next_rx:
2398 cpr->sw_stats->rx.rx_oom_discards += 1;
2399 rc = -ENOMEM;
2400 goto next_rx;
2401 }
2402
2403 /* In netpoll mode, if we are using a combined completion ring, we need to
2404 * discard the rx packets and recycle the buffers.
2405 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2406 static int bnxt_force_rx_discard(struct bnxt *bp,
2407 struct bnxt_cp_ring_info *cpr,
2408 u32 *raw_cons, u8 *event)
2409 {
2410 u32 tmp_raw_cons = *raw_cons;
2411 struct rx_cmp_ext *rxcmp1;
2412 struct rx_cmp *rxcmp;
2413 u16 cp_cons;
2414 u8 cmp_type;
2415 int rc;
2416
2417 cp_cons = RING_CMP(tmp_raw_cons);
2418 rxcmp = (struct rx_cmp *)
2419 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2420
2421 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2422 cp_cons = RING_CMP(tmp_raw_cons);
2423 rxcmp1 = (struct rx_cmp_ext *)
2424 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2425
2426 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2427 return -EBUSY;
2428
2429 /* The valid test of the entry must be done first before
2430 * reading any further.
2431 */
2432 dma_rmb();
2433 cmp_type = RX_CMP_TYPE(rxcmp);
2434 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2435 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2436 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2437 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2438 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2439 struct rx_tpa_end_cmp_ext *tpa_end1;
2440
2441 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2442 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2443 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2444 }
2445 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2446 if (rc && rc != -EBUSY)
2447 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2448 return rc;
2449 }
2450
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2451 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2452 {
2453 struct bnxt_fw_health *fw_health = bp->fw_health;
2454 u32 reg = fw_health->regs[reg_idx];
2455 u32 reg_type, reg_off, val = 0;
2456
2457 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2458 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2459 switch (reg_type) {
2460 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2461 pci_read_config_dword(bp->pdev, reg_off, &val);
2462 break;
2463 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2464 reg_off = fw_health->mapped_regs[reg_idx];
2465 fallthrough;
2466 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2467 val = readl(bp->bar0 + reg_off);
2468 break;
2469 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2470 val = readl(bp->bar1 + reg_off);
2471 break;
2472 }
2473 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2474 val &= fw_health->fw_reset_inprog_reg_mask;
2475 return val;
2476 }
2477
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2478 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2479 {
2480 int i;
2481
2482 for (i = 0; i < bp->rx_nr_rings; i++) {
2483 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2484 struct bnxt_ring_grp_info *grp_info;
2485
2486 grp_info = &bp->grp_info[grp_idx];
2487 if (grp_info->agg_fw_ring_id == ring_id)
2488 return grp_idx;
2489 }
2490 return INVALID_HW_RING_ID;
2491 }
2492
bnxt_get_force_speed(struct bnxt_link_info * link_info)2493 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2494 {
2495 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2496
2497 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2498 return link_info->force_link_speed2;
2499 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2500 return link_info->force_pam4_link_speed;
2501 return link_info->force_link_speed;
2502 }
2503
bnxt_set_force_speed(struct bnxt_link_info * link_info)2504 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2505 {
2506 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2507
2508 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2509 link_info->req_link_speed = link_info->force_link_speed2;
2510 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2511 switch (link_info->req_link_speed) {
2512 case BNXT_LINK_SPEED_50GB_PAM4:
2513 case BNXT_LINK_SPEED_100GB_PAM4:
2514 case BNXT_LINK_SPEED_200GB_PAM4:
2515 case BNXT_LINK_SPEED_400GB_PAM4:
2516 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2517 break;
2518 case BNXT_LINK_SPEED_100GB_PAM4_112:
2519 case BNXT_LINK_SPEED_200GB_PAM4_112:
2520 case BNXT_LINK_SPEED_400GB_PAM4_112:
2521 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2522 break;
2523 default:
2524 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2525 }
2526 return;
2527 }
2528 link_info->req_link_speed = link_info->force_link_speed;
2529 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2530 if (link_info->force_pam4_link_speed) {
2531 link_info->req_link_speed = link_info->force_pam4_link_speed;
2532 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2533 }
2534 }
2535
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2536 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2537 {
2538 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2539
2540 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2541 link_info->advertising = link_info->auto_link_speeds2;
2542 return;
2543 }
2544 link_info->advertising = link_info->auto_link_speeds;
2545 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2546 }
2547
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2548 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2549 {
2550 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2551
2552 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2553 if (link_info->req_link_speed != link_info->force_link_speed2)
2554 return true;
2555 return false;
2556 }
2557 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2558 link_info->req_link_speed != link_info->force_link_speed)
2559 return true;
2560 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2561 link_info->req_link_speed != link_info->force_pam4_link_speed)
2562 return true;
2563 return false;
2564 }
2565
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2566 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2567 {
2568 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2569
2570 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2571 if (link_info->advertising != link_info->auto_link_speeds2)
2572 return true;
2573 return false;
2574 }
2575 if (link_info->advertising != link_info->auto_link_speeds ||
2576 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2577 return true;
2578 return false;
2579 }
2580
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2581 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2582 {
2583 u32 flags = bp->ctx->ctx_arr[type].flags;
2584
2585 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2586 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2587 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2588 }
2589
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2590 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2591 {
2592 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2593 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2594 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2595 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2596 struct bnxt_bs_trace_info *bs_trace;
2597 int last_pg;
2598
2599 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2600 return;
2601
2602 mem_size = ctxm->max_entries * ctxm->entry_size;
2603 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2604 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2605
2606 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2607 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2608
2609 rmem = &ctx_pg[0].ring_mem;
2610 bs_trace = &bp->bs_trace[trace_type];
2611 bs_trace->ctx_type = ctxm->type;
2612 bs_trace->trace_type = trace_type;
2613 if (pages > MAX_CTX_PAGES) {
2614 int last_pg_dir = rmem->nr_pages - 1;
2615
2616 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2617 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2618 } else {
2619 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2620 }
2621 bs_trace->magic_byte += magic_byte_offset;
2622 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2623 }
2624
2625 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2626 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2627 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2628
2629 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2630 (((data2) & \
2631 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2632 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2633
2634 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2635 ((data2) & \
2636 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2637
2638 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2639 (((data2) & \
2640 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2641 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2642
2643 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2644 ((data1) & \
2645 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2646
2647 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2648 (((data1) & \
2649 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2650 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2651
2652 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2653 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2654 {
2655 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2656
2657 switch (err_type) {
2658 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2659 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2660 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2661 break;
2662 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2663 netdev_warn(bp->dev, "Pause Storm detected!\n");
2664 break;
2665 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2666 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2667 break;
2668 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2669 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2670 char *threshold_type;
2671 bool notify = false;
2672 char *dir_str;
2673
2674 switch (type) {
2675 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2676 threshold_type = "warning";
2677 break;
2678 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2679 threshold_type = "critical";
2680 break;
2681 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2682 threshold_type = "fatal";
2683 break;
2684 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2685 threshold_type = "shutdown";
2686 break;
2687 default:
2688 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2689 return false;
2690 }
2691 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2692 dir_str = "above";
2693 notify = true;
2694 } else {
2695 dir_str = "below";
2696 }
2697 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2698 dir_str, threshold_type);
2699 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2700 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2701 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2702 if (notify) {
2703 bp->thermal_threshold_type = type;
2704 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2705 return true;
2706 }
2707 return false;
2708 }
2709 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2710 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2711 break;
2712 default:
2713 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2714 err_type);
2715 break;
2716 }
2717 return false;
2718 }
2719
2720 #define BNXT_GET_EVENT_PORT(data) \
2721 ((data) & \
2722 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2723
2724 #define BNXT_EVENT_RING_TYPE(data2) \
2725 ((data2) & \
2726 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2727
2728 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2729 (BNXT_EVENT_RING_TYPE(data2) == \
2730 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2731
2732 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2733 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2734 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2735
2736 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2737 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2738 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2739
2740 #define BNXT_PHC_BITS 48
2741
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2742 static int bnxt_async_event_process(struct bnxt *bp,
2743 struct hwrm_async_event_cmpl *cmpl)
2744 {
2745 u16 event_id = le16_to_cpu(cmpl->event_id);
2746 u32 data1 = le32_to_cpu(cmpl->event_data1);
2747 u32 data2 = le32_to_cpu(cmpl->event_data2);
2748
2749 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2750 event_id, data1, data2);
2751
2752 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2753 switch (event_id) {
2754 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2755 struct bnxt_link_info *link_info = &bp->link_info;
2756
2757 if (BNXT_VF(bp))
2758 goto async_event_process_exit;
2759
2760 /* print unsupported speed warning in forced speed mode only */
2761 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2762 (data1 & 0x20000)) {
2763 u16 fw_speed = bnxt_get_force_speed(link_info);
2764 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2765
2766 if (speed != SPEED_UNKNOWN)
2767 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2768 speed);
2769 }
2770 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2771 }
2772 fallthrough;
2773 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2774 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2775 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2776 fallthrough;
2777 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2778 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2779 break;
2780 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2781 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2782 break;
2783 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2784 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2785
2786 if (BNXT_VF(bp))
2787 break;
2788
2789 if (bp->pf.port_id != port_id)
2790 break;
2791
2792 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2793 break;
2794 }
2795 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2796 if (BNXT_PF(bp))
2797 goto async_event_process_exit;
2798 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2799 break;
2800 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2801 char *type_str = "Solicited";
2802
2803 if (!bp->fw_health)
2804 goto async_event_process_exit;
2805
2806 bp->fw_reset_timestamp = jiffies;
2807 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2808 if (!bp->fw_reset_min_dsecs)
2809 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2810 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2811 if (!bp->fw_reset_max_dsecs)
2812 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2813 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2814 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2815 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2816 type_str = "Fatal";
2817 bp->fw_health->fatalities++;
2818 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2819 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2820 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2821 type_str = "Non-fatal";
2822 bp->fw_health->survivals++;
2823 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2824 }
2825 netif_warn(bp, hw, bp->dev,
2826 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2827 type_str, data1, data2,
2828 bp->fw_reset_min_dsecs * 100,
2829 bp->fw_reset_max_dsecs * 100);
2830 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2831 break;
2832 }
2833 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2834 struct bnxt_fw_health *fw_health = bp->fw_health;
2835 char *status_desc = "healthy";
2836 u32 status;
2837
2838 if (!fw_health)
2839 goto async_event_process_exit;
2840
2841 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2842 fw_health->enabled = false;
2843 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2844 break;
2845 }
2846 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2847 fw_health->tmr_multiplier =
2848 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2849 bp->current_interval * 10);
2850 fw_health->tmr_counter = fw_health->tmr_multiplier;
2851 if (!fw_health->enabled)
2852 fw_health->last_fw_heartbeat =
2853 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2854 fw_health->last_fw_reset_cnt =
2855 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2856 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2857 if (status != BNXT_FW_STATUS_HEALTHY)
2858 status_desc = "unhealthy";
2859 netif_info(bp, drv, bp->dev,
2860 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2861 fw_health->primary ? "primary" : "backup", status,
2862 status_desc, fw_health->last_fw_reset_cnt);
2863 if (!fw_health->enabled) {
2864 /* Make sure tmr_counter is set and visible to
2865 * bnxt_health_check() before setting enabled to true.
2866 */
2867 smp_wmb();
2868 fw_health->enabled = true;
2869 }
2870 goto async_event_process_exit;
2871 }
2872 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2873 netif_notice(bp, hw, bp->dev,
2874 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2875 data1, data2);
2876 goto async_event_process_exit;
2877 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2878 struct bnxt_rx_ring_info *rxr;
2879 u16 grp_idx;
2880
2881 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2882 goto async_event_process_exit;
2883
2884 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2885 BNXT_EVENT_RING_TYPE(data2), data1);
2886 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2887 goto async_event_process_exit;
2888
2889 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2890 if (grp_idx == INVALID_HW_RING_ID) {
2891 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2892 data1);
2893 goto async_event_process_exit;
2894 }
2895 rxr = bp->bnapi[grp_idx]->rx_ring;
2896 bnxt_sched_reset_rxr(bp, rxr);
2897 goto async_event_process_exit;
2898 }
2899 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2900 struct bnxt_fw_health *fw_health = bp->fw_health;
2901
2902 netif_notice(bp, hw, bp->dev,
2903 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2904 data1, data2);
2905 if (fw_health) {
2906 fw_health->echo_req_data1 = data1;
2907 fw_health->echo_req_data2 = data2;
2908 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2909 break;
2910 }
2911 goto async_event_process_exit;
2912 }
2913 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2914 bnxt_ptp_pps_event(bp, data1, data2);
2915 goto async_event_process_exit;
2916 }
2917 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2918 if (bnxt_event_error_report(bp, data1, data2))
2919 break;
2920 goto async_event_process_exit;
2921 }
2922 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2923 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2924 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2925 if (BNXT_PTP_USE_RTC(bp)) {
2926 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2927 unsigned long flags;
2928 u64 ns;
2929
2930 if (!ptp)
2931 goto async_event_process_exit;
2932
2933 bnxt_ptp_update_current_time(bp);
2934 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2935 BNXT_PHC_BITS) | ptp->current_time);
2936 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2937 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2938 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2939 }
2940 break;
2941 }
2942 goto async_event_process_exit;
2943 }
2944 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2945 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2946
2947 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2948 goto async_event_process_exit;
2949 }
2950 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2951 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2952 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2953
2954 if (type >= ARRAY_SIZE(bp->bs_trace))
2955 goto async_event_process_exit;
2956 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2957 goto async_event_process_exit;
2958 }
2959 default:
2960 goto async_event_process_exit;
2961 }
2962 __bnxt_queue_sp_work(bp);
2963 async_event_process_exit:
2964 bnxt_ulp_async_events(bp, cmpl);
2965 return 0;
2966 }
2967
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2968 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2969 {
2970 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2971 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2972 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2973 (struct hwrm_fwd_req_cmpl *)txcmp;
2974
2975 switch (cmpl_type) {
2976 case CMPL_BASE_TYPE_HWRM_DONE:
2977 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2978 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2979 break;
2980
2981 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2982 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2983
2984 if ((vf_id < bp->pf.first_vf_id) ||
2985 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2986 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2987 vf_id);
2988 return -EINVAL;
2989 }
2990
2991 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2992 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2993 break;
2994
2995 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2996 bnxt_async_event_process(bp,
2997 (struct hwrm_async_event_cmpl *)txcmp);
2998 break;
2999
3000 default:
3001 break;
3002 }
3003
3004 return 0;
3005 }
3006
bnxt_vnic_is_active(struct bnxt * bp)3007 static bool bnxt_vnic_is_active(struct bnxt *bp)
3008 {
3009 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
3010
3011 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
3012 }
3013
bnxt_msix(int irq,void * dev_instance)3014 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
3015 {
3016 struct bnxt_napi *bnapi = dev_instance;
3017 struct bnxt *bp = bnapi->bp;
3018 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3019 u32 cons = RING_CMP(cpr->cp_raw_cons);
3020
3021 cpr->event_ctr++;
3022 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
3023 napi_schedule(&bnapi->napi);
3024 return IRQ_HANDLED;
3025 }
3026
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)3027 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3028 {
3029 u32 raw_cons = cpr->cp_raw_cons;
3030 u16 cons = RING_CMP(raw_cons);
3031 struct tx_cmp *txcmp;
3032
3033 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3034
3035 return TX_CMP_VALID(txcmp, raw_cons);
3036 }
3037
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3038 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3039 int budget)
3040 {
3041 struct bnxt_napi *bnapi = cpr->bnapi;
3042 u32 raw_cons = cpr->cp_raw_cons;
3043 bool flush_xdp = false;
3044 u32 cons;
3045 int rx_pkts = 0;
3046 u8 event = 0;
3047 struct tx_cmp *txcmp;
3048
3049 cpr->has_more_work = 0;
3050 cpr->had_work_done = 1;
3051 while (1) {
3052 u8 cmp_type;
3053 int rc;
3054
3055 cons = RING_CMP(raw_cons);
3056 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3057
3058 if (!TX_CMP_VALID(txcmp, raw_cons))
3059 break;
3060
3061 /* The valid test of the entry must be done first before
3062 * reading any further.
3063 */
3064 dma_rmb();
3065 cmp_type = TX_CMP_TYPE(txcmp);
3066 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3067 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3068 u32 opaque = txcmp->tx_cmp_opaque;
3069 struct bnxt_tx_ring_info *txr;
3070 u16 tx_freed;
3071
3072 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3073 event |= BNXT_TX_CMP_EVENT;
3074 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3075 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3076 else
3077 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3078 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3079 bp->tx_ring_mask;
3080 /* return full budget so NAPI will complete. */
3081 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3082 rx_pkts = budget;
3083 raw_cons = NEXT_RAW_CMP(raw_cons);
3084 if (budget)
3085 cpr->has_more_work = 1;
3086 break;
3087 }
3088 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3089 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3090 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3091 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3092 if (likely(budget))
3093 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3094 else
3095 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3096 &event);
3097 if (event & BNXT_REDIRECT_EVENT)
3098 flush_xdp = true;
3099 if (likely(rc >= 0))
3100 rx_pkts += rc;
3101 /* Increment rx_pkts when rc is -ENOMEM to count towards
3102 * the NAPI budget. Otherwise, we may potentially loop
3103 * here forever if we consistently cannot allocate
3104 * buffers.
3105 */
3106 else if (rc == -ENOMEM && budget)
3107 rx_pkts++;
3108 else if (rc == -EBUSY) /* partial completion */
3109 break;
3110 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3111 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3112 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3113 bnxt_hwrm_handler(bp, txcmp);
3114 }
3115 raw_cons = NEXT_RAW_CMP(raw_cons);
3116
3117 if (rx_pkts && rx_pkts == budget) {
3118 cpr->has_more_work = 1;
3119 break;
3120 }
3121 }
3122
3123 if (flush_xdp) {
3124 xdp_do_flush();
3125 event &= ~BNXT_REDIRECT_EVENT;
3126 }
3127
3128 if (event & BNXT_TX_EVENT) {
3129 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3130 u16 prod = txr->tx_prod;
3131
3132 /* Sync BD data before updating doorbell */
3133 wmb();
3134
3135 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3136 event &= ~BNXT_TX_EVENT;
3137 }
3138
3139 cpr->cp_raw_cons = raw_cons;
3140 bnapi->events |= event;
3141 return rx_pkts;
3142 }
3143
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3144 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3145 int budget)
3146 {
3147 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3148 bnapi->tx_int(bp, bnapi, budget);
3149
3150 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3151 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3152
3153 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3154 bnapi->events &= ~BNXT_RX_EVENT;
3155 }
3156 if (bnapi->events & BNXT_AGG_EVENT) {
3157 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3158
3159 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3160 bnapi->events &= ~BNXT_AGG_EVENT;
3161 }
3162 }
3163
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3164 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3165 int budget)
3166 {
3167 struct bnxt_napi *bnapi = cpr->bnapi;
3168 int rx_pkts;
3169
3170 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3171
3172 /* ACK completion ring before freeing tx ring and producing new
3173 * buffers in rx/agg rings to prevent overflowing the completion
3174 * ring.
3175 */
3176 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3177
3178 __bnxt_poll_work_done(bp, bnapi, budget);
3179 return rx_pkts;
3180 }
3181
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3182 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3183 {
3184 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3185 struct bnxt *bp = bnapi->bp;
3186 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3187 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3188 struct tx_cmp *txcmp;
3189 struct rx_cmp_ext *rxcmp1;
3190 u32 cp_cons, tmp_raw_cons;
3191 u32 raw_cons = cpr->cp_raw_cons;
3192 bool flush_xdp = false;
3193 u32 rx_pkts = 0;
3194 u8 event = 0;
3195
3196 while (1) {
3197 int rc;
3198
3199 cp_cons = RING_CMP(raw_cons);
3200 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3201
3202 if (!TX_CMP_VALID(txcmp, raw_cons))
3203 break;
3204
3205 /* The valid test of the entry must be done first before
3206 * reading any further.
3207 */
3208 dma_rmb();
3209 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3210 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3211 cp_cons = RING_CMP(tmp_raw_cons);
3212 rxcmp1 = (struct rx_cmp_ext *)
3213 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3214
3215 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3216 break;
3217
3218 /* force an error to recycle the buffer */
3219 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3220 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3221
3222 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3223 if (likely(rc == -EIO) && budget)
3224 rx_pkts++;
3225 else if (rc == -EBUSY) /* partial completion */
3226 break;
3227 if (event & BNXT_REDIRECT_EVENT)
3228 flush_xdp = true;
3229 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3230 CMPL_BASE_TYPE_HWRM_DONE)) {
3231 bnxt_hwrm_handler(bp, txcmp);
3232 } else {
3233 netdev_err(bp->dev,
3234 "Invalid completion received on special ring\n");
3235 }
3236 raw_cons = NEXT_RAW_CMP(raw_cons);
3237
3238 if (rx_pkts == budget)
3239 break;
3240 }
3241
3242 cpr->cp_raw_cons = raw_cons;
3243 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3244 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3245
3246 if (event & BNXT_AGG_EVENT)
3247 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3248 if (flush_xdp)
3249 xdp_do_flush();
3250
3251 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3252 napi_complete_done(napi, rx_pkts);
3253 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3254 }
3255 return rx_pkts;
3256 }
3257
bnxt_poll(struct napi_struct * napi,int budget)3258 static int bnxt_poll(struct napi_struct *napi, int budget)
3259 {
3260 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3261 struct bnxt *bp = bnapi->bp;
3262 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3263 int work_done = 0;
3264
3265 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3266 napi_complete(napi);
3267 return 0;
3268 }
3269 while (1) {
3270 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3271
3272 if (work_done >= budget) {
3273 if (!budget)
3274 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3275 break;
3276 }
3277
3278 if (!bnxt_has_work(bp, cpr)) {
3279 if (napi_complete_done(napi, work_done))
3280 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3281 break;
3282 }
3283 }
3284 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3285 struct dim_sample dim_sample = {};
3286
3287 dim_update_sample(cpr->event_ctr,
3288 cpr->rx_packets,
3289 cpr->rx_bytes,
3290 &dim_sample);
3291 net_dim(&cpr->dim, &dim_sample);
3292 }
3293 return work_done;
3294 }
3295
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3296 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3297 {
3298 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3299 int i, work_done = 0;
3300
3301 for (i = 0; i < cpr->cp_ring_count; i++) {
3302 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3303
3304 if (cpr2->had_nqe_notify) {
3305 work_done += __bnxt_poll_work(bp, cpr2,
3306 budget - work_done);
3307 cpr->has_more_work |= cpr2->has_more_work;
3308 }
3309 }
3310 return work_done;
3311 }
3312
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3313 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3314 u64 dbr_type, int budget)
3315 {
3316 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3317 int i;
3318
3319 for (i = 0; i < cpr->cp_ring_count; i++) {
3320 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3321 struct bnxt_db_info *db;
3322
3323 if (cpr2->had_work_done) {
3324 u32 tgl = 0;
3325
3326 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3327 cpr2->had_nqe_notify = 0;
3328 tgl = cpr2->toggle;
3329 }
3330 db = &cpr2->cp_db;
3331 bnxt_writeq(bp,
3332 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3333 DB_RING_IDX(db, cpr2->cp_raw_cons),
3334 db->doorbell);
3335 cpr2->had_work_done = 0;
3336 }
3337 }
3338 __bnxt_poll_work_done(bp, bnapi, budget);
3339 }
3340
bnxt_poll_p5(struct napi_struct * napi,int budget)3341 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3342 {
3343 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3344 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3345 struct bnxt_cp_ring_info *cpr_rx;
3346 u32 raw_cons = cpr->cp_raw_cons;
3347 struct bnxt *bp = bnapi->bp;
3348 struct nqe_cn *nqcmp;
3349 int work_done = 0;
3350 u32 cons;
3351
3352 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3353 napi_complete(napi);
3354 return 0;
3355 }
3356 if (cpr->has_more_work) {
3357 cpr->has_more_work = 0;
3358 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3359 }
3360 while (1) {
3361 u16 type;
3362
3363 cons = RING_CMP(raw_cons);
3364 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3365
3366 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3367 if (cpr->has_more_work)
3368 break;
3369
3370 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3371 budget);
3372 cpr->cp_raw_cons = raw_cons;
3373 if (napi_complete_done(napi, work_done))
3374 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3375 cpr->cp_raw_cons);
3376 goto poll_done;
3377 }
3378
3379 /* The valid test of the entry must be done first before
3380 * reading any further.
3381 */
3382 dma_rmb();
3383
3384 type = le16_to_cpu(nqcmp->type);
3385 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3386 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3387 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3388 struct bnxt_cp_ring_info *cpr2;
3389
3390 /* No more budget for RX work */
3391 if (budget && work_done >= budget &&
3392 cq_type == BNXT_NQ_HDL_TYPE_RX)
3393 break;
3394
3395 idx = BNXT_NQ_HDL_IDX(idx);
3396 cpr2 = &cpr->cp_ring_arr[idx];
3397 cpr2->had_nqe_notify = 1;
3398 cpr2->toggle = NQE_CN_TOGGLE(type);
3399 work_done += __bnxt_poll_work(bp, cpr2,
3400 budget - work_done);
3401 cpr->has_more_work |= cpr2->has_more_work;
3402 } else {
3403 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3404 }
3405 raw_cons = NEXT_RAW_CMP(raw_cons);
3406 }
3407 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3408 if (raw_cons != cpr->cp_raw_cons) {
3409 cpr->cp_raw_cons = raw_cons;
3410 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3411 }
3412 poll_done:
3413 cpr_rx = &cpr->cp_ring_arr[0];
3414 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3415 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3416 struct dim_sample dim_sample = {};
3417
3418 dim_update_sample(cpr->event_ctr,
3419 cpr_rx->rx_packets,
3420 cpr_rx->rx_bytes,
3421 &dim_sample);
3422 net_dim(&cpr->dim, &dim_sample);
3423 }
3424 return work_done;
3425 }
3426
bnxt_free_one_tx_ring_skbs(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int idx)3427 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3428 struct bnxt_tx_ring_info *txr, int idx)
3429 {
3430 int i, max_idx;
3431 struct pci_dev *pdev = bp->pdev;
3432 unsigned int dma_len;
3433 dma_addr_t dma_addr;
3434
3435 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3436
3437 for (i = 0; i < max_idx;) {
3438 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3439 struct bnxt_sw_tx_bd *head_buf = tx_buf;
3440 struct sk_buff *skb;
3441 int j, last;
3442
3443 if (idx < bp->tx_nr_rings_xdp &&
3444 tx_buf->action == XDP_REDIRECT) {
3445 dma_addr = dma_unmap_addr(tx_buf, mapping);
3446 dma_len = dma_unmap_len(tx_buf, len);
3447
3448 dma_unmap_single(&pdev->dev, dma_addr, dma_len,
3449 DMA_TO_DEVICE);
3450 xdp_return_frame(tx_buf->xdpf);
3451 tx_buf->action = 0;
3452 tx_buf->xdpf = NULL;
3453 i++;
3454 continue;
3455 }
3456
3457 skb = tx_buf->skb;
3458 if (!skb) {
3459 i++;
3460 continue;
3461 }
3462
3463 tx_buf->skb = NULL;
3464
3465 if (tx_buf->is_push) {
3466 dev_kfree_skb(skb);
3467 i += 2;
3468 continue;
3469 }
3470
3471 if (dma_unmap_len(tx_buf, len)) {
3472 dma_addr = dma_unmap_addr(tx_buf, mapping);
3473 dma_len = dma_unmap_len(tx_buf, len);
3474
3475 dma_unmap_single(&pdev->dev, dma_addr, dma_len,
3476 DMA_TO_DEVICE);
3477 }
3478
3479 last = tx_buf->nr_frags;
3480 i += 2;
3481 for (j = 0; j < last; j++, i++) {
3482 int ring_idx = i & bp->tx_ring_mask;
3483
3484 tx_buf = &txr->tx_buf_ring[ring_idx];
3485 if (dma_unmap_len(tx_buf, len)) {
3486 dma_addr = dma_unmap_addr(tx_buf, mapping);
3487 dma_len = dma_unmap_len(tx_buf, len);
3488
3489 netmem_dma_unmap_page_attrs(&pdev->dev,
3490 dma_addr, dma_len,
3491 DMA_TO_DEVICE, 0);
3492 }
3493 }
3494 if (head_buf->is_sw_gso) {
3495 u16 inline_cons = txr->tx_inline_cons + 1;
3496
3497 WRITE_ONCE(txr->tx_inline_cons, inline_cons);
3498 if (head_buf->is_sw_gso == BNXT_SW_GSO_LAST) {
3499 tso_dma_map_complete(&pdev->dev,
3500 &head_buf->sw_gso_cstate);
3501 } else {
3502 skb = NULL;
3503 }
3504 head_buf->is_sw_gso = 0;
3505 }
3506 if (skb)
3507 dev_kfree_skb(skb);
3508 }
3509 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3510 }
3511
bnxt_free_tx_skbs(struct bnxt * bp)3512 static void bnxt_free_tx_skbs(struct bnxt *bp)
3513 {
3514 int i;
3515
3516 if (!bp->tx_ring)
3517 return;
3518
3519 for (i = 0; i < bp->tx_nr_rings; i++) {
3520 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3521
3522 if (!txr->tx_buf_ring)
3523 continue;
3524
3525 bnxt_free_one_tx_ring_skbs(bp, txr, i);
3526 }
3527
3528 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3529 bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3530 }
3531
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3532 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3533 {
3534 int i, max_idx;
3535
3536 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3537
3538 for (i = 0; i < max_idx; i++) {
3539 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3540 void *data = rx_buf->data;
3541
3542 if (!data)
3543 continue;
3544
3545 rx_buf->data = NULL;
3546 if (BNXT_RX_PAGE_MODE(bp))
3547 page_pool_recycle_direct(rxr->page_pool, data);
3548 else
3549 page_pool_free_va(rxr->head_pool, data, true);
3550 }
3551 }
3552
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3553 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3554 {
3555 int i, max_idx;
3556
3557 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3558
3559 for (i = 0; i < max_idx; i++) {
3560 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3561 netmem_ref netmem = rx_agg_buf->netmem;
3562
3563 if (!netmem)
3564 continue;
3565
3566 rx_agg_buf->netmem = 0;
3567 __clear_bit(i, rxr->rx_agg_bmap);
3568
3569 page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3570 }
3571 }
3572
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3573 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3574 struct bnxt_rx_ring_info *rxr)
3575 {
3576 int i;
3577
3578 for (i = 0; i < bp->max_tpa; i++) {
3579 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3580 u8 *data = tpa_info->data;
3581
3582 if (!data)
3583 continue;
3584
3585 tpa_info->data = NULL;
3586 page_pool_free_va(rxr->head_pool, data, false);
3587 }
3588 }
3589
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3590 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3591 struct bnxt_rx_ring_info *rxr)
3592 {
3593 struct bnxt_tpa_idx_map *map;
3594
3595 if (!rxr->rx_tpa)
3596 goto skip_rx_tpa_free;
3597
3598 bnxt_free_one_tpa_info_data(bp, rxr);
3599
3600 skip_rx_tpa_free:
3601 if (!rxr->rx_buf_ring)
3602 goto skip_rx_buf_free;
3603
3604 bnxt_free_one_rx_ring(bp, rxr);
3605
3606 skip_rx_buf_free:
3607 if (!rxr->rx_agg_ring)
3608 goto skip_rx_agg_free;
3609
3610 bnxt_free_one_rx_agg_ring(bp, rxr);
3611
3612 skip_rx_agg_free:
3613 map = rxr->rx_tpa_idx_map;
3614 if (map)
3615 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3616 }
3617
bnxt_free_rx_skbs(struct bnxt * bp)3618 static void bnxt_free_rx_skbs(struct bnxt *bp)
3619 {
3620 int i;
3621
3622 if (!bp->rx_ring)
3623 return;
3624
3625 for (i = 0; i < bp->rx_nr_rings; i++)
3626 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3627 }
3628
bnxt_free_skbs(struct bnxt * bp)3629 static void bnxt_free_skbs(struct bnxt *bp)
3630 {
3631 bnxt_free_tx_skbs(bp);
3632 bnxt_free_rx_skbs(bp);
3633 }
3634
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3635 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3636 {
3637 u8 init_val = ctxm->init_value;
3638 u16 offset = ctxm->init_offset;
3639 u8 *p2 = p;
3640 int i;
3641
3642 if (!init_val)
3643 return;
3644 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3645 memset(p, init_val, len);
3646 return;
3647 }
3648 for (i = 0; i < len; i += ctxm->entry_size)
3649 *(p2 + i + offset) = init_val;
3650 }
3651
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3652 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3653 void *buf, size_t offset, size_t head,
3654 size_t tail)
3655 {
3656 int i, head_page, start_idx, source_offset;
3657 size_t len, rem_len, total_len, max_bytes;
3658
3659 head_page = head / rmem->page_size;
3660 source_offset = head % rmem->page_size;
3661 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3662 if (!total_len)
3663 total_len = MAX_CTX_BYTES;
3664 start_idx = head_page % MAX_CTX_PAGES;
3665 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3666 source_offset;
3667 total_len = min(total_len, max_bytes);
3668 rem_len = total_len;
3669
3670 for (i = start_idx; rem_len; i++, source_offset = 0) {
3671 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3672 if (buf)
3673 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3674 len);
3675 offset += len;
3676 rem_len -= len;
3677 }
3678 return total_len;
3679 }
3680
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3681 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3682 {
3683 struct pci_dev *pdev = bp->pdev;
3684 int i;
3685
3686 if (!rmem->pg_arr)
3687 goto skip_pages;
3688
3689 for (i = 0; i < rmem->nr_pages; i++) {
3690 if (!rmem->pg_arr[i])
3691 continue;
3692
3693 dma_free_coherent(&pdev->dev, rmem->page_size,
3694 rmem->pg_arr[i], rmem->dma_arr[i]);
3695
3696 rmem->pg_arr[i] = NULL;
3697 }
3698 skip_pages:
3699 if (rmem->pg_tbl) {
3700 size_t pg_tbl_size = rmem->nr_pages * 8;
3701
3702 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3703 pg_tbl_size = rmem->page_size;
3704 dma_free_coherent(&pdev->dev, pg_tbl_size,
3705 rmem->pg_tbl, rmem->pg_tbl_map);
3706 rmem->pg_tbl = NULL;
3707 }
3708 if (rmem->vmem_size && *rmem->vmem) {
3709 vfree(*rmem->vmem);
3710 *rmem->vmem = NULL;
3711 }
3712 }
3713
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3714 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3715 {
3716 struct pci_dev *pdev = bp->pdev;
3717 u64 valid_bit = 0;
3718 int i;
3719
3720 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3721 valid_bit = PTU_PTE_VALID;
3722 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3723 size_t pg_tbl_size = rmem->nr_pages * 8;
3724
3725 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3726 pg_tbl_size = rmem->page_size;
3727 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3728 &rmem->pg_tbl_map,
3729 GFP_KERNEL);
3730 if (!rmem->pg_tbl)
3731 return -ENOMEM;
3732 }
3733
3734 for (i = 0; i < rmem->nr_pages; i++) {
3735 u64 extra_bits = valid_bit;
3736
3737 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3738 rmem->page_size,
3739 &rmem->dma_arr[i],
3740 GFP_KERNEL);
3741 if (!rmem->pg_arr[i])
3742 return -ENOMEM;
3743
3744 if (rmem->ctx_mem)
3745 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3746 rmem->page_size);
3747 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3748 if (i == rmem->nr_pages - 2 &&
3749 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3750 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3751 else if (i == rmem->nr_pages - 1 &&
3752 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3753 extra_bits |= PTU_PTE_LAST;
3754 rmem->pg_tbl[i] =
3755 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3756 }
3757 }
3758
3759 if (rmem->vmem_size) {
3760 *rmem->vmem = vzalloc(rmem->vmem_size);
3761 if (!(*rmem->vmem))
3762 return -ENOMEM;
3763 }
3764 return 0;
3765 }
3766
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3767 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3768 struct bnxt_rx_ring_info *rxr)
3769 {
3770 int i;
3771
3772 kfree(rxr->rx_tpa_idx_map);
3773 rxr->rx_tpa_idx_map = NULL;
3774 if (rxr->rx_tpa) {
3775 for (i = 0; i < bp->max_tpa; i++) {
3776 kfree(rxr->rx_tpa[i].agg_arr);
3777 rxr->rx_tpa[i].agg_arr = NULL;
3778 }
3779 }
3780 kfree(rxr->rx_tpa);
3781 rxr->rx_tpa = NULL;
3782 }
3783
bnxt_free_tpa_info(struct bnxt * bp)3784 static void bnxt_free_tpa_info(struct bnxt *bp)
3785 {
3786 int i;
3787
3788 for (i = 0; i < bp->rx_nr_rings; i++) {
3789 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3790
3791 bnxt_free_one_tpa_info(bp, rxr);
3792 }
3793 }
3794
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3795 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3796 struct bnxt_rx_ring_info *rxr)
3797 {
3798 struct rx_agg_cmp *agg;
3799 int i;
3800
3801 rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa);
3802 if (!rxr->rx_tpa)
3803 return -ENOMEM;
3804
3805 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3806 return 0;
3807 for (i = 0; i < bp->max_tpa; i++) {
3808 agg = kzalloc_objs(*agg, MAX_SKB_FRAGS);
3809 if (!agg)
3810 return -ENOMEM;
3811 rxr->rx_tpa[i].agg_arr = agg;
3812 }
3813 rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map);
3814 if (!rxr->rx_tpa_idx_map)
3815 return -ENOMEM;
3816
3817 return 0;
3818 }
3819
bnxt_alloc_tpa_info(struct bnxt * bp)3820 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3821 {
3822 int i, rc;
3823
3824 bp->max_tpa = MAX_TPA;
3825 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3826 if (!bp->max_tpa_v2)
3827 return 0;
3828 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3829 }
3830
3831 for (i = 0; i < bp->rx_nr_rings; i++) {
3832 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3833
3834 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3835 if (rc)
3836 return rc;
3837 }
3838 return 0;
3839 }
3840
bnxt_free_rx_rings(struct bnxt * bp)3841 static void bnxt_free_rx_rings(struct bnxt *bp)
3842 {
3843 int i;
3844
3845 if (!bp->rx_ring)
3846 return;
3847
3848 bnxt_free_tpa_info(bp);
3849 for (i = 0; i < bp->rx_nr_rings; i++) {
3850 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3851 struct bnxt_ring_struct *ring;
3852
3853 if (rxr->xdp_prog)
3854 bpf_prog_put(rxr->xdp_prog);
3855
3856 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3857 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3858
3859 page_pool_destroy(rxr->page_pool);
3860 page_pool_destroy(rxr->head_pool);
3861 rxr->page_pool = rxr->head_pool = NULL;
3862
3863 kfree(rxr->rx_agg_bmap);
3864 rxr->rx_agg_bmap = NULL;
3865
3866 ring = &rxr->rx_ring_struct;
3867 bnxt_free_ring(bp, &ring->ring_mem);
3868
3869 ring = &rxr->rx_agg_ring_struct;
3870 bnxt_free_ring(bp, &ring->ring_mem);
3871 }
3872 }
3873
bnxt_rx_agg_ring_fill_level(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3874 static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
3875 struct bnxt_rx_ring_info *rxr)
3876 {
3877 /* User may have chosen larger than default rx_page_size,
3878 * we keep the ring sizes uniform and also want uniform amount
3879 * of bytes consumed per ring, so cap how much of the rings we fill.
3880 */
3881 int fill_level = bp->rx_agg_ring_size;
3882
3883 if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
3884 fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3885
3886 return fill_level;
3887 }
3888
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3889 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3890 struct bnxt_rx_ring_info *rxr,
3891 int numa_node)
3892 {
3893 unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3894 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3895 struct page_pool_params pp = { 0 };
3896 struct page_pool *pool;
3897
3898 pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
3899 if (BNXT_RX_PAGE_MODE(bp))
3900 pp.pool_size += bp->rx_ring_size / rx_size_fac;
3901
3902 pp.order = get_order(rxr->rx_page_size);
3903 pp.nid = numa_node;
3904 pp.netdev = bp->dev;
3905 pp.dev = &bp->pdev->dev;
3906 pp.dma_dir = bp->rx_dir;
3907 pp.max_len = PAGE_SIZE << pp.order;
3908 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3909 PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3910 pp.queue_idx = rxr->bnapi->index;
3911
3912 pool = page_pool_create(&pp);
3913 if (IS_ERR(pool))
3914 return PTR_ERR(pool);
3915 rxr->page_pool = pool;
3916
3917 rxr->need_head_pool = page_pool_is_unreadable(pool);
3918 rxr->need_head_pool |= !!pp.order;
3919 if (bnxt_separate_head_pool(rxr)) {
3920 pp.order = 0;
3921 pp.max_len = PAGE_SIZE;
3922 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3923 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3924 pool = page_pool_create(&pp);
3925 if (IS_ERR(pool))
3926 goto err_destroy_pp;
3927 } else {
3928 page_pool_get(pool);
3929 }
3930 rxr->head_pool = pool;
3931
3932 return 0;
3933
3934 err_destroy_pp:
3935 page_pool_destroy(rxr->page_pool);
3936 rxr->page_pool = NULL;
3937 return PTR_ERR(pool);
3938 }
3939
bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info * rxr)3940 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3941 {
3942 page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3943 page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3944 }
3945
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3946 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3947 {
3948 u16 mem_size;
3949
3950 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3951 mem_size = rxr->rx_agg_bmap_size / 8;
3952 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3953 if (!rxr->rx_agg_bmap)
3954 return -ENOMEM;
3955
3956 return 0;
3957 }
3958
bnxt_alloc_rx_rings(struct bnxt * bp)3959 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3960 {
3961 int numa_node = dev_to_node(&bp->pdev->dev);
3962 int i, rc = 0, agg_rings = 0, cpu;
3963
3964 if (!bp->rx_ring)
3965 return -ENOMEM;
3966
3967 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3968 agg_rings = 1;
3969
3970 for (i = 0; i < bp->rx_nr_rings; i++) {
3971 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3972 struct bnxt_ring_struct *ring;
3973 int cpu_node;
3974
3975 ring = &rxr->rx_ring_struct;
3976
3977 cpu = cpumask_local_spread(i, numa_node);
3978 cpu_node = cpu_to_node(cpu);
3979 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3980 i, cpu_node);
3981 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3982 if (rc)
3983 return rc;
3984 bnxt_enable_rx_page_pool(rxr);
3985
3986 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3987 if (rc < 0)
3988 return rc;
3989
3990 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3991 MEM_TYPE_PAGE_POOL,
3992 rxr->page_pool);
3993 if (rc) {
3994 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3995 return rc;
3996 }
3997
3998 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3999 if (rc)
4000 return rc;
4001
4002 ring->grp_idx = i;
4003 if (agg_rings) {
4004 ring = &rxr->rx_agg_ring_struct;
4005 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4006 if (rc)
4007 return rc;
4008
4009 ring->grp_idx = i;
4010 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
4011 if (rc)
4012 return rc;
4013 }
4014 }
4015 if (bp->flags & BNXT_FLAG_TPA)
4016 rc = bnxt_alloc_tpa_info(bp);
4017 return rc;
4018 }
4019
bnxt_free_tx_inline_buf(struct bnxt_tx_ring_info * txr,struct pci_dev * pdev)4020 static void bnxt_free_tx_inline_buf(struct bnxt_tx_ring_info *txr,
4021 struct pci_dev *pdev)
4022 {
4023 if (!txr->tx_inline_buf)
4024 return;
4025
4026 dma_unmap_single(&pdev->dev, txr->tx_inline_dma,
4027 txr->tx_inline_size, DMA_TO_DEVICE);
4028 kfree(txr->tx_inline_buf);
4029 txr->tx_inline_buf = NULL;
4030 txr->tx_inline_size = 0;
4031 }
4032
bnxt_alloc_tx_inline_buf(struct bnxt_tx_ring_info * txr,struct pci_dev * pdev,unsigned int size)4033 static int bnxt_alloc_tx_inline_buf(struct bnxt_tx_ring_info *txr,
4034 struct pci_dev *pdev,
4035 unsigned int size)
4036 {
4037 txr->tx_inline_buf = kmalloc(size, GFP_KERNEL);
4038 if (!txr->tx_inline_buf)
4039 return -ENOMEM;
4040
4041 txr->tx_inline_dma = dma_map_single(&pdev->dev, txr->tx_inline_buf,
4042 size, DMA_TO_DEVICE);
4043 if (dma_mapping_error(&pdev->dev, txr->tx_inline_dma)) {
4044 kfree(txr->tx_inline_buf);
4045 txr->tx_inline_buf = NULL;
4046 return -ENOMEM;
4047 }
4048 txr->tx_inline_size = size;
4049
4050 return 0;
4051 }
4052
bnxt_free_tx_rings(struct bnxt * bp)4053 static void bnxt_free_tx_rings(struct bnxt *bp)
4054 {
4055 int i;
4056 struct pci_dev *pdev = bp->pdev;
4057
4058 if (!bp->tx_ring)
4059 return;
4060
4061 for (i = 0; i < bp->tx_nr_rings; i++) {
4062 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4063 struct bnxt_ring_struct *ring;
4064
4065 if (txr->tx_push) {
4066 dma_free_coherent(&pdev->dev, bp->tx_push_size,
4067 txr->tx_push, txr->tx_push_mapping);
4068 txr->tx_push = NULL;
4069 }
4070
4071 bnxt_free_tx_inline_buf(txr, pdev);
4072
4073 ring = &txr->tx_ring_struct;
4074
4075 bnxt_free_ring(bp, &ring->ring_mem);
4076 }
4077 }
4078
4079 #define BNXT_TC_TO_RING_BASE(bp, tc) \
4080 ((tc) * (bp)->tx_nr_rings_per_tc)
4081
4082 #define BNXT_RING_TO_TC_OFF(bp, tx) \
4083 ((tx) % (bp)->tx_nr_rings_per_tc)
4084
4085 #define BNXT_RING_TO_TC(bp, tx) \
4086 ((tx) / (bp)->tx_nr_rings_per_tc)
4087
bnxt_alloc_tx_rings(struct bnxt * bp)4088 static int bnxt_alloc_tx_rings(struct bnxt *bp)
4089 {
4090 int i, j, rc;
4091 struct pci_dev *pdev = bp->pdev;
4092
4093 bp->tx_push_size = 0;
4094 if (bp->tx_push_thresh) {
4095 int push_size;
4096
4097 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
4098 bp->tx_push_thresh);
4099
4100 if (push_size > 256) {
4101 push_size = 0;
4102 bp->tx_push_thresh = 0;
4103 }
4104
4105 bp->tx_push_size = push_size;
4106 }
4107
4108 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
4109 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4110 struct bnxt_ring_struct *ring;
4111 u8 qidx;
4112
4113 ring = &txr->tx_ring_struct;
4114
4115 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4116 if (rc)
4117 return rc;
4118
4119 ring->grp_idx = txr->bnapi->index;
4120 if (bp->tx_push_size) {
4121 dma_addr_t mapping;
4122
4123 /* One pre-allocated DMA buffer to backup
4124 * TX push operation
4125 */
4126 txr->tx_push = dma_alloc_coherent(&pdev->dev,
4127 bp->tx_push_size,
4128 &txr->tx_push_mapping,
4129 GFP_KERNEL);
4130
4131 if (!txr->tx_push)
4132 return -ENOMEM;
4133
4134 mapping = txr->tx_push_mapping +
4135 sizeof(struct tx_push_bd);
4136 txr->data_mapping = cpu_to_le64(mapping);
4137 }
4138 if (!(bp->flags & BNXT_FLAG_UDP_GSO_CAP)) {
4139 rc = bnxt_alloc_tx_inline_buf(txr, pdev,
4140 BNXT_SW_USO_MAX_SEGS *
4141 TSO_HEADER_SIZE);
4142 if (rc)
4143 return rc;
4144 }
4145 qidx = bp->tc_to_qidx[j];
4146 ring->queue_id = bp->q_info[qidx].queue_id;
4147 spin_lock_init(&txr->xdp_tx_lock);
4148 if (i < bp->tx_nr_rings_xdp)
4149 continue;
4150 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4151 j++;
4152 }
4153 return 0;
4154 }
4155
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)4156 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4157 {
4158 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4159
4160 kfree(cpr->cp_desc_ring);
4161 cpr->cp_desc_ring = NULL;
4162 ring->ring_mem.pg_arr = NULL;
4163 kfree(cpr->cp_desc_mapping);
4164 cpr->cp_desc_mapping = NULL;
4165 ring->ring_mem.dma_arr = NULL;
4166 }
4167
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)4168 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4169 {
4170 cpr->cp_desc_ring = kzalloc_objs(*cpr->cp_desc_ring, n);
4171 if (!cpr->cp_desc_ring)
4172 return -ENOMEM;
4173 cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n);
4174 if (!cpr->cp_desc_mapping)
4175 return -ENOMEM;
4176 return 0;
4177 }
4178
bnxt_free_all_cp_arrays(struct bnxt * bp)4179 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4180 {
4181 int i;
4182
4183 if (!bp->bnapi)
4184 return;
4185 for (i = 0; i < bp->cp_nr_rings; i++) {
4186 struct bnxt_napi *bnapi = bp->bnapi[i];
4187
4188 if (!bnapi)
4189 continue;
4190 bnxt_free_cp_arrays(&bnapi->cp_ring);
4191 }
4192 }
4193
bnxt_alloc_all_cp_arrays(struct bnxt * bp)4194 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4195 {
4196 int i, n = bp->cp_nr_pages;
4197
4198 for (i = 0; i < bp->cp_nr_rings; i++) {
4199 struct bnxt_napi *bnapi = bp->bnapi[i];
4200 int rc;
4201
4202 if (!bnapi)
4203 continue;
4204 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4205 if (rc)
4206 return rc;
4207 }
4208 return 0;
4209 }
4210
bnxt_free_cp_rings(struct bnxt * bp)4211 static void bnxt_free_cp_rings(struct bnxt *bp)
4212 {
4213 int i;
4214
4215 if (!bp->bnapi)
4216 return;
4217
4218 for (i = 0; i < bp->cp_nr_rings; i++) {
4219 struct bnxt_napi *bnapi = bp->bnapi[i];
4220 struct bnxt_cp_ring_info *cpr;
4221 struct bnxt_ring_struct *ring;
4222 int j;
4223
4224 if (!bnapi)
4225 continue;
4226
4227 cpr = &bnapi->cp_ring;
4228 ring = &cpr->cp_ring_struct;
4229
4230 bnxt_free_ring(bp, &ring->ring_mem);
4231
4232 if (!cpr->cp_ring_arr)
4233 continue;
4234
4235 for (j = 0; j < cpr->cp_ring_count; j++) {
4236 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4237
4238 ring = &cpr2->cp_ring_struct;
4239 bnxt_free_ring(bp, &ring->ring_mem);
4240 bnxt_free_cp_arrays(cpr2);
4241 }
4242 kfree(cpr->cp_ring_arr);
4243 cpr->cp_ring_arr = NULL;
4244 cpr->cp_ring_count = 0;
4245 }
4246 }
4247
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4248 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4249 struct bnxt_cp_ring_info *cpr)
4250 {
4251 struct bnxt_ring_mem_info *rmem;
4252 struct bnxt_ring_struct *ring;
4253 int rc;
4254
4255 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4256 if (rc) {
4257 bnxt_free_cp_arrays(cpr);
4258 return -ENOMEM;
4259 }
4260 ring = &cpr->cp_ring_struct;
4261 rmem = &ring->ring_mem;
4262 rmem->nr_pages = bp->cp_nr_pages;
4263 rmem->page_size = HW_CMPD_RING_SIZE;
4264 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4265 rmem->dma_arr = cpr->cp_desc_mapping;
4266 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4267 rc = bnxt_alloc_ring(bp, rmem);
4268 if (rc) {
4269 bnxt_free_ring(bp, rmem);
4270 bnxt_free_cp_arrays(cpr);
4271 }
4272 return rc;
4273 }
4274
bnxt_alloc_cp_rings(struct bnxt * bp)4275 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4276 {
4277 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4278 int i, j, rc, ulp_msix;
4279 int tcs = bp->num_tc;
4280
4281 if (!tcs)
4282 tcs = 1;
4283 ulp_msix = bnxt_get_ulp_msix_num(bp);
4284 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4285 struct bnxt_napi *bnapi = bp->bnapi[i];
4286 struct bnxt_cp_ring_info *cpr, *cpr2;
4287 struct bnxt_ring_struct *ring;
4288 int cp_count = 0, k;
4289 int rx = 0, tx = 0;
4290
4291 if (!bnapi)
4292 continue;
4293
4294 cpr = &bnapi->cp_ring;
4295 cpr->bnapi = bnapi;
4296 ring = &cpr->cp_ring_struct;
4297
4298 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4299 if (rc)
4300 return rc;
4301
4302 ring->map_idx = ulp_msix + i;
4303
4304 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4305 continue;
4306
4307 if (i < bp->rx_nr_rings) {
4308 cp_count++;
4309 rx = 1;
4310 }
4311 if (i < bp->tx_nr_rings_xdp) {
4312 cp_count++;
4313 tx = 1;
4314 } else if ((sh && i < bp->tx_nr_rings) ||
4315 (!sh && i >= bp->rx_nr_rings)) {
4316 cp_count += tcs;
4317 tx = 1;
4318 }
4319
4320 cpr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
4321 if (!cpr->cp_ring_arr)
4322 return -ENOMEM;
4323 cpr->cp_ring_count = cp_count;
4324
4325 for (k = 0; k < cp_count; k++) {
4326 cpr2 = &cpr->cp_ring_arr[k];
4327 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4328 if (rc)
4329 return rc;
4330 cpr2->bnapi = bnapi;
4331 cpr2->sw_stats = cpr->sw_stats;
4332 cpr2->cp_idx = k;
4333 if (!k && rx) {
4334 bp->rx_ring[i].rx_cpr = cpr2;
4335 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4336 } else {
4337 int n, tc = k - rx;
4338
4339 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4340 bp->tx_ring[n].tx_cpr = cpr2;
4341 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4342 }
4343 }
4344 if (tx)
4345 j++;
4346 }
4347 return 0;
4348 }
4349
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4350 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4351 struct bnxt_rx_ring_info *rxr)
4352 {
4353 struct bnxt_ring_mem_info *rmem;
4354 struct bnxt_ring_struct *ring;
4355
4356 ring = &rxr->rx_ring_struct;
4357 rmem = &ring->ring_mem;
4358 rmem->nr_pages = bp->rx_nr_pages;
4359 rmem->page_size = HW_RXBD_RING_SIZE;
4360 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4361 rmem->dma_arr = rxr->rx_desc_mapping;
4362 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4363 rmem->vmem = (void **)&rxr->rx_buf_ring;
4364
4365 ring = &rxr->rx_agg_ring_struct;
4366 rmem = &ring->ring_mem;
4367 rmem->nr_pages = bp->rx_agg_nr_pages;
4368 rmem->page_size = HW_RXBD_RING_SIZE;
4369 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4370 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4371 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4372 rmem->vmem = (void **)&rxr->rx_agg_ring;
4373 }
4374
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4375 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4376 struct bnxt_rx_ring_info *rxr)
4377 {
4378 struct bnxt_ring_mem_info *rmem;
4379 struct bnxt_ring_struct *ring;
4380 int i;
4381
4382 rxr->page_pool->p.napi = NULL;
4383 rxr->page_pool = NULL;
4384 rxr->head_pool->p.napi = NULL;
4385 rxr->head_pool = NULL;
4386 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4387
4388 ring = &rxr->rx_ring_struct;
4389 rmem = &ring->ring_mem;
4390 rmem->pg_tbl = NULL;
4391 rmem->pg_tbl_map = 0;
4392 for (i = 0; i < rmem->nr_pages; i++) {
4393 rmem->pg_arr[i] = NULL;
4394 rmem->dma_arr[i] = 0;
4395 }
4396 *rmem->vmem = NULL;
4397
4398 ring = &rxr->rx_agg_ring_struct;
4399 rmem = &ring->ring_mem;
4400 rmem->pg_tbl = NULL;
4401 rmem->pg_tbl_map = 0;
4402 for (i = 0; i < rmem->nr_pages; i++) {
4403 rmem->pg_arr[i] = NULL;
4404 rmem->dma_arr[i] = 0;
4405 }
4406 *rmem->vmem = NULL;
4407 }
4408
bnxt_init_ring_struct(struct bnxt * bp)4409 static void bnxt_init_ring_struct(struct bnxt *bp)
4410 {
4411 int i, j;
4412
4413 for (i = 0; i < bp->cp_nr_rings; i++) {
4414 struct bnxt_napi *bnapi = bp->bnapi[i];
4415 struct netdev_queue_config qcfg;
4416 struct bnxt_ring_mem_info *rmem;
4417 struct bnxt_cp_ring_info *cpr;
4418 struct bnxt_rx_ring_info *rxr;
4419 struct bnxt_tx_ring_info *txr;
4420 struct bnxt_ring_struct *ring;
4421
4422 if (!bnapi)
4423 continue;
4424
4425 cpr = &bnapi->cp_ring;
4426 ring = &cpr->cp_ring_struct;
4427 rmem = &ring->ring_mem;
4428 rmem->nr_pages = bp->cp_nr_pages;
4429 rmem->page_size = HW_CMPD_RING_SIZE;
4430 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4431 rmem->dma_arr = cpr->cp_desc_mapping;
4432 rmem->vmem_size = 0;
4433
4434 rxr = bnapi->rx_ring;
4435 if (!rxr)
4436 goto skip_rx;
4437
4438 netdev_queue_config(bp->dev, i, &qcfg);
4439 rxr->rx_page_size = qcfg.rx_page_size;
4440
4441 ring = &rxr->rx_ring_struct;
4442 rmem = &ring->ring_mem;
4443 rmem->nr_pages = bp->rx_nr_pages;
4444 rmem->page_size = HW_RXBD_RING_SIZE;
4445 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4446 rmem->dma_arr = rxr->rx_desc_mapping;
4447 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4448 rmem->vmem = (void **)&rxr->rx_buf_ring;
4449
4450 ring = &rxr->rx_agg_ring_struct;
4451 rmem = &ring->ring_mem;
4452 rmem->nr_pages = bp->rx_agg_nr_pages;
4453 rmem->page_size = HW_RXBD_RING_SIZE;
4454 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4455 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4456 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4457 rmem->vmem = (void **)&rxr->rx_agg_ring;
4458
4459 skip_rx:
4460 bnxt_for_each_napi_tx(j, bnapi, txr) {
4461 ring = &txr->tx_ring_struct;
4462 rmem = &ring->ring_mem;
4463 rmem->nr_pages = bp->tx_nr_pages;
4464 rmem->page_size = HW_TXBD_RING_SIZE;
4465 rmem->pg_arr = (void **)txr->tx_desc_ring;
4466 rmem->dma_arr = txr->tx_desc_mapping;
4467 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4468 rmem->vmem = (void **)&txr->tx_buf_ring;
4469 }
4470 }
4471 }
4472
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4473 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4474 {
4475 int i;
4476 u32 prod;
4477 struct rx_bd **rx_buf_ring;
4478
4479 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4480 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4481 int j;
4482 struct rx_bd *rxbd;
4483
4484 rxbd = rx_buf_ring[i];
4485 if (!rxbd)
4486 continue;
4487
4488 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4489 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4490 rxbd->rx_bd_opaque = prod;
4491 }
4492 }
4493 }
4494
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4495 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4496 struct bnxt_rx_ring_info *rxr,
4497 int ring_nr)
4498 {
4499 u32 prod;
4500 int i;
4501
4502 prod = rxr->rx_prod;
4503 for (i = 0; i < bp->rx_ring_size; i++) {
4504 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4505 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4506 ring_nr, i, bp->rx_ring_size);
4507 break;
4508 }
4509 prod = NEXT_RX(prod);
4510 }
4511 rxr->rx_prod = prod;
4512 }
4513
bnxt_alloc_one_rx_ring_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4514 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4515 struct bnxt_rx_ring_info *rxr,
4516 int ring_nr)
4517 {
4518 int fill_level, i;
4519 u32 prod;
4520
4521 fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
4522
4523 prod = rxr->rx_agg_prod;
4524 for (i = 0; i < fill_level; i++) {
4525 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4526 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4527 ring_nr, i, bp->rx_agg_ring_size);
4528 break;
4529 }
4530 prod = NEXT_RX_AGG(prod);
4531 }
4532 rxr->rx_agg_prod = prod;
4533 }
4534
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4535 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4536 struct bnxt_rx_ring_info *rxr)
4537 {
4538 dma_addr_t mapping;
4539 u8 *data;
4540 int i;
4541
4542 for (i = 0; i < bp->max_tpa; i++) {
4543 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4544 GFP_KERNEL);
4545 if (!data)
4546 return -ENOMEM;
4547
4548 rxr->rx_tpa[i].data = data;
4549 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4550 rxr->rx_tpa[i].mapping = mapping;
4551 }
4552
4553 return 0;
4554 }
4555
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4556 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4557 {
4558 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4559 int rc;
4560
4561 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4562
4563 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4564 return 0;
4565
4566 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4567
4568 if (rxr->rx_tpa) {
4569 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4570 if (rc)
4571 return rc;
4572 }
4573 return 0;
4574 }
4575
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4576 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4577 struct bnxt_rx_ring_info *rxr)
4578 {
4579 struct bnxt_ring_struct *ring;
4580 u32 type;
4581
4582 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4583 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4584
4585 if (NET_IP_ALIGN == 2)
4586 type |= RX_BD_FLAGS_SOP;
4587
4588 ring = &rxr->rx_ring_struct;
4589 bnxt_init_rxbd_pages(ring, type);
4590 ring->fw_ring_id = INVALID_HW_RING_ID;
4591 }
4592
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4593 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4594 struct bnxt_rx_ring_info *rxr)
4595 {
4596 struct bnxt_ring_struct *ring;
4597 u32 type;
4598
4599 ring = &rxr->rx_agg_ring_struct;
4600 ring->fw_ring_id = INVALID_HW_RING_ID;
4601 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4602 type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
4603 RX_BD_TYPE_RX_AGG_BD;
4604
4605 /* On P7, setting EOP will cause the chip to disable
4606 * Relaxed Ordering (RO) for TPA data. Disable EOP for
4607 * potentially higher performance with RO.
4608 */
4609 if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
4610 type |= RX_BD_FLAGS_AGG_EOP;
4611
4612 bnxt_init_rxbd_pages(ring, type);
4613 }
4614 }
4615
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4616 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4617 {
4618 struct bnxt_rx_ring_info *rxr;
4619
4620 rxr = &bp->rx_ring[ring_nr];
4621 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4622
4623 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4624 &rxr->bnapi->napi);
4625
4626 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4627 bpf_prog_add(bp->xdp_prog, 1);
4628 rxr->xdp_prog = bp->xdp_prog;
4629 }
4630
4631 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4632
4633 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4634 }
4635
bnxt_init_cp_rings(struct bnxt * bp)4636 static void bnxt_init_cp_rings(struct bnxt *bp)
4637 {
4638 int i, j;
4639
4640 for (i = 0; i < bp->cp_nr_rings; i++) {
4641 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4642 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4643
4644 ring->fw_ring_id = INVALID_HW_RING_ID;
4645 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4646 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4647 if (!cpr->cp_ring_arr)
4648 continue;
4649 for (j = 0; j < cpr->cp_ring_count; j++) {
4650 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4651
4652 ring = &cpr2->cp_ring_struct;
4653 ring->fw_ring_id = INVALID_HW_RING_ID;
4654 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4655 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4656 }
4657 }
4658 }
4659
bnxt_init_rx_rings(struct bnxt * bp)4660 static int bnxt_init_rx_rings(struct bnxt *bp)
4661 {
4662 int i, rc = 0;
4663
4664 if (BNXT_RX_PAGE_MODE(bp)) {
4665 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4666 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4667 } else {
4668 bp->rx_offset = BNXT_RX_OFFSET;
4669 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4670 }
4671
4672 for (i = 0; i < bp->rx_nr_rings; i++) {
4673 rc = bnxt_init_one_rx_ring(bp, i);
4674 if (rc)
4675 break;
4676 }
4677
4678 return rc;
4679 }
4680
bnxt_init_tx_rings(struct bnxt * bp)4681 static int bnxt_init_tx_rings(struct bnxt *bp)
4682 {
4683 netdev_features_t features;
4684 u16 i;
4685
4686 features = bp->dev->features;
4687
4688 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4689 bnxt_min_tx_desc_cnt(bp, features));
4690
4691 for (i = 0; i < bp->tx_nr_rings; i++) {
4692 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4693 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4694
4695 ring->fw_ring_id = INVALID_HW_RING_ID;
4696
4697 if (i >= bp->tx_nr_rings_xdp)
4698 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4699 NETDEV_QUEUE_TYPE_TX,
4700 &txr->bnapi->napi);
4701 }
4702
4703 return 0;
4704 }
4705
bnxt_free_ring_grps(struct bnxt * bp)4706 static void bnxt_free_ring_grps(struct bnxt *bp)
4707 {
4708 kfree(bp->grp_info);
4709 bp->grp_info = NULL;
4710 }
4711
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4712 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4713 {
4714 int i;
4715
4716 if (irq_re_init) {
4717 bp->grp_info = kzalloc_objs(struct bnxt_ring_grp_info,
4718 bp->cp_nr_rings);
4719 if (!bp->grp_info)
4720 return -ENOMEM;
4721 }
4722 for (i = 0; i < bp->cp_nr_rings; i++) {
4723 if (irq_re_init)
4724 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4725 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4726 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4727 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4728 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4729 }
4730 return 0;
4731 }
4732
bnxt_free_vnics(struct bnxt * bp)4733 static void bnxt_free_vnics(struct bnxt *bp)
4734 {
4735 kfree(bp->vnic_info);
4736 bp->vnic_info = NULL;
4737 bp->nr_vnics = 0;
4738 }
4739
bnxt_alloc_vnics(struct bnxt * bp)4740 static int bnxt_alloc_vnics(struct bnxt *bp)
4741 {
4742 int num_vnics = 1;
4743
4744 #ifdef CONFIG_RFS_ACCEL
4745 if (bp->flags & BNXT_FLAG_RFS) {
4746 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4747 num_vnics++;
4748 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4749 num_vnics += bp->rx_nr_rings;
4750 }
4751 #endif
4752
4753 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4754 num_vnics++;
4755
4756 bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics);
4757 if (!bp->vnic_info)
4758 return -ENOMEM;
4759
4760 bp->nr_vnics = num_vnics;
4761 return 0;
4762 }
4763
bnxt_init_vnics(struct bnxt * bp)4764 static void bnxt_init_vnics(struct bnxt *bp)
4765 {
4766 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4767 int i;
4768
4769 for (i = 0; i < bp->nr_vnics; i++) {
4770 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4771 int j;
4772
4773 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4774 vnic->vnic_id = i;
4775 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4776 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4777
4778 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4779
4780 if (bp->vnic_info[i].rss_hash_key) {
4781 if (i == BNXT_VNIC_DEFAULT) {
4782 u8 *key = (void *)vnic->rss_hash_key;
4783 int k;
4784
4785 if (!bp->rss_hash_key_valid &&
4786 !bp->rss_hash_key_updated) {
4787 get_random_bytes(bp->rss_hash_key,
4788 HW_HASH_KEY_SIZE);
4789 bp->rss_hash_key_updated = true;
4790 }
4791
4792 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4793 HW_HASH_KEY_SIZE);
4794
4795 if (!bp->rss_hash_key_updated)
4796 continue;
4797
4798 bp->rss_hash_key_updated = false;
4799 bp->rss_hash_key_valid = true;
4800
4801 bp->toeplitz_prefix = 0;
4802 for (k = 0; k < 8; k++) {
4803 bp->toeplitz_prefix <<= 8;
4804 bp->toeplitz_prefix |= key[k];
4805 }
4806 } else {
4807 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4808 HW_HASH_KEY_SIZE);
4809 }
4810 }
4811 }
4812 }
4813
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4814 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4815 {
4816 int pages;
4817
4818 pages = ring_size / desc_per_pg;
4819
4820 if (!pages)
4821 return 1;
4822
4823 pages++;
4824
4825 while (pages & (pages - 1))
4826 pages++;
4827
4828 return pages;
4829 }
4830
bnxt_set_tpa_flags(struct bnxt * bp)4831 void bnxt_set_tpa_flags(struct bnxt *bp)
4832 {
4833 bp->flags &= ~BNXT_FLAG_TPA;
4834 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4835 return;
4836 if (bp->dev->features & NETIF_F_LRO)
4837 bp->flags |= BNXT_FLAG_LRO;
4838 else if (bp->dev->features & NETIF_F_GRO_HW)
4839 bp->flags |= BNXT_FLAG_GRO;
4840 }
4841
bnxt_init_ring_params(struct bnxt * bp)4842 static void bnxt_init_ring_params(struct bnxt *bp)
4843 {
4844 unsigned int rx_size;
4845
4846 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4847 /* Try to fit 4 chunks into a 4k page */
4848 rx_size = SZ_1K -
4849 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4850 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4851 }
4852
4853 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4854 * be set on entry.
4855 */
bnxt_set_ring_params(struct bnxt * bp)4856 void bnxt_set_ring_params(struct bnxt *bp)
4857 {
4858 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4859 u32 agg_factor = 0, agg_ring_size = 0;
4860
4861 /* 8 for CRC and VLAN */
4862 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4863
4864 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4865 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4866
4867 ring_size = bp->rx_ring_size;
4868 bp->rx_agg_ring_size = 0;
4869 bp->rx_agg_nr_pages = 0;
4870
4871 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4872 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4873
4874 bp->flags &= ~BNXT_FLAG_JUMBO;
4875 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4876 u32 jumbo_factor;
4877
4878 bp->flags |= BNXT_FLAG_JUMBO;
4879 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4880 if (jumbo_factor > agg_factor)
4881 agg_factor = jumbo_factor;
4882 }
4883 if (agg_factor) {
4884 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4885 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4886 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4887 bp->rx_ring_size, ring_size);
4888 bp->rx_ring_size = ring_size;
4889 }
4890 agg_ring_size = ring_size * agg_factor;
4891
4892 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4893 RX_DESC_CNT);
4894 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4895 u32 tmp = agg_ring_size;
4896
4897 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4898 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4899 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4900 tmp, agg_ring_size);
4901 }
4902 bp->rx_agg_ring_size = agg_ring_size;
4903 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4904
4905 if (BNXT_RX_PAGE_MODE(bp)) {
4906 rx_space = PAGE_SIZE;
4907 rx_size = PAGE_SIZE -
4908 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4909 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4910 } else {
4911 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4912 bp->rx_copybreak,
4913 bp->dev->cfg_pending->hds_thresh);
4914 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4915 rx_space = rx_size + NET_SKB_PAD +
4916 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4917 }
4918 }
4919
4920 bp->rx_buf_use_size = rx_size;
4921 bp->rx_buf_size = rx_space;
4922
4923 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4924 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4925
4926 ring_size = bp->tx_ring_size;
4927 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4928 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4929
4930 max_rx_cmpl = bp->rx_ring_size;
4931 /* MAX TPA needs to be added because TPA_START completions are
4932 * immediately recycled, so the TPA completions are not bound by
4933 * the RX ring size.
4934 */
4935 if (bp->flags & BNXT_FLAG_TPA)
4936 max_rx_cmpl += bp->max_tpa;
4937 /* RX and TPA completions are 32-byte, all others are 16-byte */
4938 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4939 bp->cp_ring_size = ring_size;
4940
4941 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4942 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4943 bp->cp_nr_pages = MAX_CP_PAGES;
4944 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4945 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4946 ring_size, bp->cp_ring_size);
4947 }
4948 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4949 bp->cp_ring_mask = bp->cp_bit - 1;
4950 }
4951
4952 /* Changing allocation mode of RX rings.
4953 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4954 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4955 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4956 {
4957 struct net_device *dev = bp->dev;
4958
4959 if (page_mode) {
4960 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4961 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4962
4963 if (bp->xdp_prog->aux->xdp_has_frags)
4964 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4965 else
4966 dev->max_mtu =
4967 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4968 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4969 bp->flags |= BNXT_FLAG_JUMBO;
4970 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4971 } else {
4972 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4973 bp->rx_skb_func = bnxt_rx_page_skb;
4974 }
4975 bp->rx_dir = DMA_BIDIRECTIONAL;
4976 } else {
4977 dev->max_mtu = bp->max_mtu;
4978 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4979 bp->rx_dir = DMA_FROM_DEVICE;
4980 bp->rx_skb_func = bnxt_rx_skb;
4981 }
4982 }
4983
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4984 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4985 {
4986 __bnxt_set_rx_skb_mode(bp, page_mode);
4987
4988 if (!page_mode) {
4989 int rx, tx;
4990
4991 bnxt_get_max_rings(bp, &rx, &tx, true);
4992 if (rx > 1) {
4993 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4994 bp->dev->hw_features |= NETIF_F_LRO;
4995 }
4996 }
4997
4998 /* Update LRO and GRO_HW availability */
4999 netdev_update_features(bp->dev);
5000 }
5001
bnxt_free_vnic_attributes(struct bnxt * bp)5002 static void bnxt_free_vnic_attributes(struct bnxt *bp)
5003 {
5004 int i;
5005 struct bnxt_vnic_info *vnic;
5006 struct pci_dev *pdev = bp->pdev;
5007
5008 if (!bp->vnic_info)
5009 return;
5010
5011 for (i = 0; i < bp->nr_vnics; i++) {
5012 vnic = &bp->vnic_info[i];
5013
5014 kfree(vnic->fw_grp_ids);
5015 vnic->fw_grp_ids = NULL;
5016
5017 kfree(vnic->uc_list);
5018 vnic->uc_list = NULL;
5019
5020 if (vnic->mc_list) {
5021 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
5022 vnic->mc_list, vnic->mc_list_mapping);
5023 vnic->mc_list = NULL;
5024 }
5025
5026 if (vnic->rss_table) {
5027 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
5028 vnic->rss_table,
5029 vnic->rss_table_dma_addr);
5030 vnic->rss_table = NULL;
5031 }
5032
5033 vnic->rss_hash_key = NULL;
5034 vnic->flags = 0;
5035 }
5036 }
5037
bnxt_alloc_vnic_attributes(struct bnxt * bp)5038 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
5039 {
5040 int i, rc = 0, size;
5041 struct bnxt_vnic_info *vnic;
5042 struct pci_dev *pdev = bp->pdev;
5043 int max_rings;
5044
5045 for (i = 0; i < bp->nr_vnics; i++) {
5046 vnic = &bp->vnic_info[i];
5047
5048 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
5049 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
5050
5051 if (mem_size > 0) {
5052 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
5053 if (!vnic->uc_list) {
5054 rc = -ENOMEM;
5055 goto out;
5056 }
5057 }
5058 }
5059
5060 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
5061 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
5062 vnic->mc_list =
5063 dma_alloc_coherent(&pdev->dev,
5064 vnic->mc_list_size,
5065 &vnic->mc_list_mapping,
5066 GFP_KERNEL);
5067 if (!vnic->mc_list) {
5068 rc = -ENOMEM;
5069 goto out;
5070 }
5071 }
5072
5073 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5074 goto vnic_skip_grps;
5075
5076 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5077 max_rings = bp->rx_nr_rings;
5078 else
5079 max_rings = 1;
5080
5081 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
5082 if (!vnic->fw_grp_ids) {
5083 rc = -ENOMEM;
5084 goto out;
5085 }
5086 vnic_skip_grps:
5087 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
5088 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
5089 continue;
5090
5091 /* Allocate rss table and hash key */
5092 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
5093 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5094 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
5095
5096 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
5097 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
5098 vnic->rss_table_size,
5099 &vnic->rss_table_dma_addr,
5100 GFP_KERNEL);
5101 if (!vnic->rss_table) {
5102 rc = -ENOMEM;
5103 goto out;
5104 }
5105
5106 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
5107 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
5108 }
5109 return 0;
5110
5111 out:
5112 return rc;
5113 }
5114
bnxt_free_hwrm_resources(struct bnxt * bp)5115 static void bnxt_free_hwrm_resources(struct bnxt *bp)
5116 {
5117 struct bnxt_hwrm_wait_token *token;
5118
5119 dma_pool_destroy(bp->hwrm_dma_pool);
5120 bp->hwrm_dma_pool = NULL;
5121
5122 rcu_read_lock();
5123 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
5124 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
5125 rcu_read_unlock();
5126 }
5127
bnxt_alloc_hwrm_resources(struct bnxt * bp)5128 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5129 {
5130 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5131 BNXT_HWRM_DMA_SIZE,
5132 BNXT_HWRM_DMA_ALIGN, 0);
5133 if (!bp->hwrm_dma_pool)
5134 return -ENOMEM;
5135
5136 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5137
5138 return 0;
5139 }
5140
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)5141 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5142 {
5143 kfree(stats->hw_masks);
5144 stats->hw_masks = NULL;
5145 kfree(stats->sw_stats);
5146 stats->sw_stats = NULL;
5147 if (stats->hw_stats) {
5148 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5149 stats->hw_stats_map);
5150 stats->hw_stats = NULL;
5151 }
5152 }
5153
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)5154 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5155 bool alloc_masks)
5156 {
5157 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5158 &stats->hw_stats_map, GFP_KERNEL);
5159 if (!stats->hw_stats)
5160 return -ENOMEM;
5161
5162 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5163 if (!stats->sw_stats)
5164 goto stats_mem_err;
5165
5166 if (alloc_masks) {
5167 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5168 if (!stats->hw_masks)
5169 goto stats_mem_err;
5170 }
5171 return 0;
5172
5173 stats_mem_err:
5174 bnxt_free_stats_mem(bp, stats);
5175 return -ENOMEM;
5176 }
5177
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)5178 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5179 {
5180 int i;
5181
5182 for (i = 0; i < count; i++)
5183 mask_arr[i] = mask;
5184 }
5185
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)5186 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5187 {
5188 int i;
5189
5190 for (i = 0; i < count; i++)
5191 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5192 }
5193
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)5194 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5195 struct bnxt_stats_mem *stats)
5196 {
5197 struct hwrm_func_qstats_ext_output *resp;
5198 struct hwrm_func_qstats_ext_input *req;
5199 __le64 *hw_masks;
5200 int rc;
5201
5202 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5203 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5204 return -EOPNOTSUPP;
5205
5206 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5207 if (rc)
5208 return rc;
5209
5210 req->fid = cpu_to_le16(0xffff);
5211 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5212
5213 resp = hwrm_req_hold(bp, req);
5214 rc = hwrm_req_send(bp, req);
5215 if (!rc) {
5216 hw_masks = &resp->rx_ucast_pkts;
5217 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5218 }
5219 hwrm_req_drop(bp, req);
5220 return rc;
5221 }
5222
5223 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5224 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5225
bnxt_init_stats(struct bnxt * bp)5226 static void bnxt_init_stats(struct bnxt *bp)
5227 {
5228 struct bnxt_napi *bnapi = bp->bnapi[0];
5229 struct bnxt_cp_ring_info *cpr;
5230 struct bnxt_stats_mem *stats;
5231 __le64 *rx_stats, *tx_stats;
5232 int rc, rx_count, tx_count;
5233 u64 *rx_masks, *tx_masks;
5234 u64 mask;
5235 u8 flags;
5236
5237 cpr = &bnapi->cp_ring;
5238 stats = &cpr->stats;
5239 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5240 if (rc) {
5241 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5242 mask = (1ULL << 48) - 1;
5243 else
5244 mask = -1ULL;
5245 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5246 }
5247 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5248 stats = &bp->port_stats;
5249 rx_stats = stats->hw_stats;
5250 rx_masks = stats->hw_masks;
5251 rx_count = sizeof(struct rx_port_stats) / 8;
5252 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5253 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5254 tx_count = sizeof(struct tx_port_stats) / 8;
5255
5256 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5257 rc = bnxt_hwrm_port_qstats(bp, flags);
5258 if (rc) {
5259 mask = (1ULL << 40) - 1;
5260
5261 bnxt_fill_masks(rx_masks, mask, rx_count);
5262 bnxt_fill_masks(tx_masks, mask, tx_count);
5263 } else {
5264 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5265 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5266 bnxt_hwrm_port_qstats(bp, 0);
5267 }
5268 }
5269 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5270 stats = &bp->rx_port_stats_ext;
5271 rx_stats = stats->hw_stats;
5272 rx_masks = stats->hw_masks;
5273 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5274 stats = &bp->tx_port_stats_ext;
5275 tx_stats = stats->hw_stats;
5276 tx_masks = stats->hw_masks;
5277 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5278
5279 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5280 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5281 if (rc) {
5282 mask = (1ULL << 40) - 1;
5283
5284 bnxt_fill_masks(rx_masks, mask, rx_count);
5285 if (tx_stats)
5286 bnxt_fill_masks(tx_masks, mask, tx_count);
5287 } else {
5288 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5289 if (tx_stats)
5290 bnxt_copy_hw_masks(tx_masks, tx_stats,
5291 tx_count);
5292 bnxt_hwrm_port_qstats_ext(bp, 0);
5293 }
5294 }
5295 }
5296
bnxt_free_port_stats(struct bnxt * bp)5297 static void bnxt_free_port_stats(struct bnxt *bp)
5298 {
5299 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5300 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5301
5302 bnxt_free_stats_mem(bp, &bp->port_stats);
5303 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5304 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5305 }
5306
bnxt_free_ring_stats(struct bnxt * bp)5307 static void bnxt_free_ring_stats(struct bnxt *bp)
5308 {
5309 int i;
5310
5311 if (!bp->bnapi)
5312 return;
5313
5314 for (i = 0; i < bp->cp_nr_rings; i++) {
5315 struct bnxt_napi *bnapi = bp->bnapi[i];
5316 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5317
5318 bnxt_free_stats_mem(bp, &cpr->stats);
5319
5320 kfree(cpr->sw_stats);
5321 cpr->sw_stats = NULL;
5322 }
5323 }
5324
bnxt_alloc_stats(struct bnxt * bp)5325 static int bnxt_alloc_stats(struct bnxt *bp)
5326 {
5327 u32 size, i;
5328 int rc;
5329
5330 size = bp->hw_ring_stats_size;
5331
5332 for (i = 0; i < bp->cp_nr_rings; i++) {
5333 struct bnxt_napi *bnapi = bp->bnapi[i];
5334 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5335
5336 cpr->sw_stats = kzalloc_obj(*cpr->sw_stats);
5337 if (!cpr->sw_stats)
5338 return -ENOMEM;
5339
5340 cpr->stats.len = size;
5341 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5342 if (rc)
5343 return rc;
5344
5345 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5346 }
5347
5348 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5349 return 0;
5350
5351 if (bp->port_stats.hw_stats)
5352 goto alloc_ext_stats;
5353
5354 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5355 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5356 if (rc)
5357 return rc;
5358
5359 bp->flags |= BNXT_FLAG_PORT_STATS;
5360
5361 alloc_ext_stats:
5362 /* Display extended statistics only if FW supports it */
5363 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5364 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5365 return 0;
5366
5367 if (bp->rx_port_stats_ext.hw_stats)
5368 goto alloc_tx_ext_stats;
5369
5370 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5371 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5372 /* Extended stats are optional */
5373 if (rc)
5374 return 0;
5375
5376 alloc_tx_ext_stats:
5377 if (bp->tx_port_stats_ext.hw_stats)
5378 return 0;
5379
5380 if (bp->hwrm_spec_code >= 0x10902 ||
5381 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5382 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5383 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5384 /* Extended stats are optional */
5385 if (rc)
5386 return 0;
5387 }
5388 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5389 return 0;
5390 }
5391
bnxt_clear_ring_indices(struct bnxt * bp)5392 static void bnxt_clear_ring_indices(struct bnxt *bp)
5393 {
5394 int i, j;
5395
5396 if (!bp->bnapi)
5397 return;
5398
5399 for (i = 0; i < bp->cp_nr_rings; i++) {
5400 struct bnxt_napi *bnapi = bp->bnapi[i];
5401 struct bnxt_cp_ring_info *cpr;
5402 struct bnxt_rx_ring_info *rxr;
5403 struct bnxt_tx_ring_info *txr;
5404
5405 if (!bnapi)
5406 continue;
5407
5408 cpr = &bnapi->cp_ring;
5409 cpr->cp_raw_cons = 0;
5410
5411 bnxt_for_each_napi_tx(j, bnapi, txr) {
5412 txr->tx_prod = 0;
5413 txr->tx_cons = 0;
5414 txr->tx_hw_cons = 0;
5415 }
5416
5417 rxr = bnapi->rx_ring;
5418 if (rxr) {
5419 rxr->rx_prod = 0;
5420 rxr->rx_agg_prod = 0;
5421 rxr->rx_sw_agg_prod = 0;
5422 rxr->rx_next_cons = 0;
5423 }
5424 bnapi->events = 0;
5425 }
5426 }
5427
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5428 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5429 {
5430 u8 type = fltr->type, flags = fltr->flags;
5431
5432 INIT_LIST_HEAD(&fltr->list);
5433 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5434 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5435 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5436 }
5437
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5438 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5439 {
5440 if (!list_empty(&fltr->list))
5441 list_del_init(&fltr->list);
5442 }
5443
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5444 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5445 {
5446 struct bnxt_filter_base *usr_fltr, *tmp;
5447
5448 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5449 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5450 continue;
5451 bnxt_del_one_usr_fltr(bp, usr_fltr);
5452 }
5453 }
5454
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5455 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5456 {
5457 hlist_del(&fltr->hash);
5458 bnxt_del_one_usr_fltr(bp, fltr);
5459 if (fltr->flags) {
5460 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5461 bp->ntp_fltr_count--;
5462 }
5463 kfree(fltr);
5464 }
5465
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5466 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5467 {
5468 int i;
5469
5470 netdev_assert_locked_or_invisible(bp->dev);
5471
5472 /* Under netdev instance lock and all our NAPIs have been disabled.
5473 * It's safe to delete the hash table.
5474 */
5475 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5476 struct hlist_head *head;
5477 struct hlist_node *tmp;
5478 struct bnxt_ntuple_filter *fltr;
5479
5480 head = &bp->ntp_fltr_hash_tbl[i];
5481 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5482 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5483 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5484 !list_empty(&fltr->base.list)))
5485 continue;
5486 bnxt_del_fltr(bp, &fltr->base);
5487 }
5488 }
5489 if (!all)
5490 return;
5491
5492 bitmap_free(bp->ntp_fltr_bmap);
5493 bp->ntp_fltr_bmap = NULL;
5494 bp->ntp_fltr_count = 0;
5495 }
5496
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5497 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5498 {
5499 int i, rc = 0;
5500
5501 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5502 return 0;
5503
5504 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5505 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5506
5507 bp->ntp_fltr_count = 0;
5508 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5509
5510 if (!bp->ntp_fltr_bmap)
5511 rc = -ENOMEM;
5512
5513 return rc;
5514 }
5515
bnxt_free_l2_filters(struct bnxt * bp,bool all)5516 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5517 {
5518 int i;
5519
5520 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5521 struct hlist_head *head;
5522 struct hlist_node *tmp;
5523 struct bnxt_l2_filter *fltr;
5524
5525 head = &bp->l2_fltr_hash_tbl[i];
5526 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5527 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5528 !list_empty(&fltr->base.list)))
5529 continue;
5530 bnxt_del_fltr(bp, &fltr->base);
5531 }
5532 }
5533 }
5534
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5535 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5536 {
5537 int i;
5538
5539 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5540 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5541 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5542 }
5543
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5544 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5545 {
5546 bnxt_free_vnic_attributes(bp);
5547 bnxt_free_tx_rings(bp);
5548 bnxt_free_rx_rings(bp);
5549 bnxt_free_cp_rings(bp);
5550 bnxt_free_all_cp_arrays(bp);
5551 bnxt_free_ntp_fltrs(bp, false);
5552 bnxt_free_l2_filters(bp, false);
5553 if (irq_re_init) {
5554 bnxt_free_ring_stats(bp);
5555 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5556 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5557 bnxt_free_port_stats(bp);
5558 bnxt_free_ring_grps(bp);
5559 bnxt_free_vnics(bp);
5560 kfree(bp->tx_ring_map);
5561 bp->tx_ring_map = NULL;
5562 kfree(bp->tx_ring);
5563 bp->tx_ring = NULL;
5564 kfree(bp->rx_ring);
5565 bp->rx_ring = NULL;
5566 kfree(bp->bnapi);
5567 bp->bnapi = NULL;
5568 } else {
5569 bnxt_clear_ring_indices(bp);
5570 }
5571 }
5572
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5573 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5574 {
5575 int i, j, rc, size, arr_size;
5576 void *bnapi;
5577
5578 if (irq_re_init) {
5579 /* Allocate bnapi mem pointer array and mem block for
5580 * all queues
5581 */
5582 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5583 bp->cp_nr_rings);
5584 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5585 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5586 if (!bnapi)
5587 return -ENOMEM;
5588
5589 bp->bnapi = bnapi;
5590 bnapi += arr_size;
5591 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5592 bp->bnapi[i] = bnapi;
5593 bp->bnapi[i]->index = i;
5594 bp->bnapi[i]->bp = bp;
5595 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5596 struct bnxt_cp_ring_info *cpr =
5597 &bp->bnapi[i]->cp_ring;
5598
5599 cpr->cp_ring_struct.ring_mem.flags =
5600 BNXT_RMEM_RING_PTE_FLAG;
5601 }
5602 }
5603
5604 bp->rx_ring = kzalloc_objs(struct bnxt_rx_ring_info,
5605 bp->rx_nr_rings);
5606 if (!bp->rx_ring)
5607 return -ENOMEM;
5608
5609 for (i = 0; i < bp->rx_nr_rings; i++) {
5610 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5611
5612 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5613 rxr->rx_ring_struct.ring_mem.flags =
5614 BNXT_RMEM_RING_PTE_FLAG;
5615 rxr->rx_agg_ring_struct.ring_mem.flags =
5616 BNXT_RMEM_RING_PTE_FLAG;
5617 } else {
5618 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5619 }
5620 rxr->bnapi = bp->bnapi[i];
5621 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5622 }
5623
5624 bp->tx_ring = kzalloc_objs(struct bnxt_tx_ring_info,
5625 bp->tx_nr_rings);
5626 if (!bp->tx_ring)
5627 return -ENOMEM;
5628
5629 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5630 GFP_KERNEL);
5631
5632 if (!bp->tx_ring_map)
5633 return -ENOMEM;
5634
5635 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5636 j = 0;
5637 else
5638 j = bp->rx_nr_rings;
5639
5640 for (i = 0; i < bp->tx_nr_rings; i++) {
5641 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5642 struct bnxt_napi *bnapi2;
5643
5644 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5645 txr->tx_ring_struct.ring_mem.flags =
5646 BNXT_RMEM_RING_PTE_FLAG;
5647 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5648 if (i >= bp->tx_nr_rings_xdp) {
5649 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5650
5651 bnapi2 = bp->bnapi[k];
5652 txr->txq_index = i - bp->tx_nr_rings_xdp;
5653 txr->tx_napi_idx =
5654 BNXT_RING_TO_TC(bp, txr->txq_index);
5655 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5656 bnapi2->tx_int = bnxt_tx_int;
5657 } else {
5658 bnapi2 = bp->bnapi[j];
5659 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5660 bnapi2->tx_ring[0] = txr;
5661 bnapi2->tx_int = bnxt_tx_int_xdp;
5662 j++;
5663 }
5664 txr->bnapi = bnapi2;
5665 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5666 txr->tx_cpr = &bnapi2->cp_ring;
5667 }
5668
5669 rc = bnxt_alloc_stats(bp);
5670 if (rc)
5671 goto alloc_mem_err;
5672 bnxt_init_stats(bp);
5673
5674 rc = bnxt_alloc_ntp_fltrs(bp);
5675 if (rc)
5676 goto alloc_mem_err;
5677
5678 rc = bnxt_alloc_vnics(bp);
5679 if (rc)
5680 goto alloc_mem_err;
5681 }
5682
5683 rc = bnxt_alloc_all_cp_arrays(bp);
5684 if (rc)
5685 goto alloc_mem_err;
5686
5687 bnxt_init_ring_struct(bp);
5688
5689 rc = bnxt_alloc_rx_rings(bp);
5690 if (rc)
5691 goto alloc_mem_err;
5692
5693 rc = bnxt_alloc_tx_rings(bp);
5694 if (rc)
5695 goto alloc_mem_err;
5696
5697 rc = bnxt_alloc_cp_rings(bp);
5698 if (rc)
5699 goto alloc_mem_err;
5700
5701 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5702 BNXT_VNIC_MCAST_FLAG |
5703 BNXT_VNIC_UCAST_FLAG;
5704 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5705 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5706 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5707
5708 rc = bnxt_alloc_vnic_attributes(bp);
5709 if (rc)
5710 goto alloc_mem_err;
5711 return 0;
5712
5713 alloc_mem_err:
5714 bnxt_free_mem(bp, true);
5715 return rc;
5716 }
5717
bnxt_disable_int(struct bnxt * bp)5718 static void bnxt_disable_int(struct bnxt *bp)
5719 {
5720 int i;
5721
5722 if (!bp->bnapi)
5723 return;
5724
5725 for (i = 0; i < bp->cp_nr_rings; i++) {
5726 struct bnxt_napi *bnapi = bp->bnapi[i];
5727 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5728 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5729
5730 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5731 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5732 }
5733 }
5734
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5735 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5736 {
5737 struct bnxt_napi *bnapi = bp->bnapi[n];
5738 struct bnxt_cp_ring_info *cpr;
5739
5740 cpr = &bnapi->cp_ring;
5741 return cpr->cp_ring_struct.map_idx;
5742 }
5743
bnxt_disable_int_sync(struct bnxt * bp)5744 static void bnxt_disable_int_sync(struct bnxt *bp)
5745 {
5746 int i;
5747
5748 if (!bp->irq_tbl)
5749 return;
5750
5751 atomic_inc(&bp->intr_sem);
5752
5753 bnxt_disable_int(bp);
5754 for (i = 0; i < bp->cp_nr_rings; i++) {
5755 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5756
5757 synchronize_irq(bp->irq_tbl[map_idx].vector);
5758 }
5759 }
5760
bnxt_enable_int(struct bnxt * bp)5761 static void bnxt_enable_int(struct bnxt *bp)
5762 {
5763 int i;
5764
5765 atomic_set(&bp->intr_sem, 0);
5766 for (i = 0; i < bp->cp_nr_rings; i++) {
5767 struct bnxt_napi *bnapi = bp->bnapi[i];
5768 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5769
5770 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5771 }
5772 }
5773
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5774 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5775 bool async_only)
5776 {
5777 DECLARE_BITMAP(async_events_bmap, 256);
5778 u32 *events = (u32 *)async_events_bmap;
5779 struct hwrm_func_drv_rgtr_output *resp;
5780 struct hwrm_func_drv_rgtr_input *req;
5781 u32 flags;
5782 int rc, i;
5783
5784 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5785 if (rc)
5786 return rc;
5787
5788 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5789 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5790 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5791
5792 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5793 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5794 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5795 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5796 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5797 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5798 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5799 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5800 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5801 req->flags = cpu_to_le32(flags);
5802 req->ver_maj_8b = DRV_VER_MAJ;
5803 req->ver_min_8b = DRV_VER_MIN;
5804 req->ver_upd_8b = DRV_VER_UPD;
5805 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5806 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5807 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5808
5809 if (BNXT_PF(bp)) {
5810 u32 data[8];
5811 int i;
5812
5813 memset(data, 0, sizeof(data));
5814 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5815 u16 cmd = bnxt_vf_req_snif[i];
5816 unsigned int bit, idx;
5817
5818 if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
5819 cmd == HWRM_PORT_PHY_QCFG)
5820 continue;
5821
5822 idx = cmd / 32;
5823 bit = cmd % 32;
5824 data[idx] |= 1 << bit;
5825 }
5826
5827 for (i = 0; i < 8; i++)
5828 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5829
5830 req->enables |=
5831 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5832 }
5833
5834 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5835 req->flags |= cpu_to_le32(
5836 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5837
5838 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5839 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5840 u16 event_id = bnxt_async_events_arr[i];
5841
5842 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5843 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5844 continue;
5845 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5846 !bp->ptp_cfg)
5847 continue;
5848 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5849 }
5850 if (bmap && bmap_size) {
5851 for (i = 0; i < bmap_size; i++) {
5852 if (test_bit(i, bmap))
5853 __set_bit(i, async_events_bmap);
5854 }
5855 }
5856 for (i = 0; i < 8; i++)
5857 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5858
5859 if (async_only)
5860 req->enables =
5861 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5862
5863 resp = hwrm_req_hold(bp, req);
5864 rc = hwrm_req_send(bp, req);
5865 if (!rc) {
5866 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5867 if (resp->flags &
5868 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5869 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5870 }
5871 hwrm_req_drop(bp, req);
5872 return rc;
5873 }
5874
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5875 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5876 {
5877 struct hwrm_func_drv_unrgtr_input *req;
5878 int rc;
5879
5880 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5881 return 0;
5882
5883 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5884 if (rc)
5885 return rc;
5886 return hwrm_req_send(bp, req);
5887 }
5888
5889 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5890
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5891 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5892 {
5893 struct hwrm_tunnel_dst_port_free_input *req;
5894 int rc;
5895
5896 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5897 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5898 return 0;
5899 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5900 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5901 return 0;
5902
5903 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5904 if (rc)
5905 return rc;
5906
5907 req->tunnel_type = tunnel_type;
5908
5909 switch (tunnel_type) {
5910 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5911 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5912 bp->vxlan_port = 0;
5913 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5914 break;
5915 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5916 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5917 bp->nge_port = 0;
5918 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5919 break;
5920 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5921 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5922 bp->vxlan_gpe_port = 0;
5923 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5924 break;
5925 default:
5926 break;
5927 }
5928
5929 rc = hwrm_req_send(bp, req);
5930 if (rc)
5931 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5932 rc);
5933 if (bp->flags & BNXT_FLAG_TPA)
5934 bnxt_set_tpa(bp, true);
5935 return rc;
5936 }
5937
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5938 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5939 u8 tunnel_type)
5940 {
5941 struct hwrm_tunnel_dst_port_alloc_output *resp;
5942 struct hwrm_tunnel_dst_port_alloc_input *req;
5943 int rc;
5944
5945 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5946 if (rc)
5947 return rc;
5948
5949 req->tunnel_type = tunnel_type;
5950 req->tunnel_dst_port_val = port;
5951
5952 resp = hwrm_req_hold(bp, req);
5953 rc = hwrm_req_send(bp, req);
5954 if (rc) {
5955 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5956 rc);
5957 goto err_out;
5958 }
5959
5960 switch (tunnel_type) {
5961 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5962 bp->vxlan_port = port;
5963 bp->vxlan_fw_dst_port_id =
5964 le16_to_cpu(resp->tunnel_dst_port_id);
5965 break;
5966 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5967 bp->nge_port = port;
5968 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5969 break;
5970 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5971 bp->vxlan_gpe_port = port;
5972 bp->vxlan_gpe_fw_dst_port_id =
5973 le16_to_cpu(resp->tunnel_dst_port_id);
5974 break;
5975 default:
5976 break;
5977 }
5978 if (bp->flags & BNXT_FLAG_TPA)
5979 bnxt_set_tpa(bp, true);
5980
5981 err_out:
5982 hwrm_req_drop(bp, req);
5983 return rc;
5984 }
5985
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5986 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5987 {
5988 struct hwrm_cfa_l2_set_rx_mask_input *req;
5989 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5990 int rc;
5991
5992 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5993 if (rc)
5994 return rc;
5995
5996 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5997 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5998 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5999 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
6000 }
6001 req->mask = cpu_to_le32(vnic->rx_mask);
6002 return hwrm_req_send_silent(bp, req);
6003 }
6004
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)6005 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6006 {
6007 if (!atomic_dec_and_test(&fltr->refcnt))
6008 return;
6009 spin_lock_bh(&bp->ntp_fltr_lock);
6010 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
6011 spin_unlock_bh(&bp->ntp_fltr_lock);
6012 return;
6013 }
6014 hlist_del_rcu(&fltr->base.hash);
6015 bnxt_del_one_usr_fltr(bp, &fltr->base);
6016 if (fltr->base.flags) {
6017 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
6018 bp->ntp_fltr_count--;
6019 }
6020 spin_unlock_bh(&bp->ntp_fltr_lock);
6021 kfree_rcu(fltr, base.rcu);
6022 }
6023
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)6024 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
6025 struct bnxt_l2_key *key,
6026 u32 idx)
6027 {
6028 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
6029 struct bnxt_l2_filter *fltr;
6030
6031 hlist_for_each_entry_rcu(fltr, head, base.hash) {
6032 struct bnxt_l2_key *l2_key = &fltr->l2_key;
6033
6034 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
6035 l2_key->vlan == key->vlan)
6036 return fltr;
6037 }
6038 return NULL;
6039 }
6040
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)6041 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
6042 struct bnxt_l2_key *key,
6043 u32 idx)
6044 {
6045 struct bnxt_l2_filter *fltr = NULL;
6046
6047 rcu_read_lock();
6048 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6049 if (fltr)
6050 atomic_inc(&fltr->refcnt);
6051 rcu_read_unlock();
6052 return fltr;
6053 }
6054
6055 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
6056 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
6057 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
6058 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
6059 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
6060
6061 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
6062 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
6063 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
6064 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
6065 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
6066
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)6067 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
6068 {
6069 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6070 if (BNXT_IPV4_4TUPLE(bp, fkeys))
6071 return sizeof(fkeys->addrs.v4addrs) +
6072 sizeof(fkeys->ports);
6073
6074 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
6075 return sizeof(fkeys->addrs.v4addrs);
6076 }
6077
6078 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
6079 if (BNXT_IPV6_4TUPLE(bp, fkeys))
6080 return sizeof(fkeys->addrs.v6addrs) +
6081 sizeof(fkeys->ports);
6082
6083 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
6084 return sizeof(fkeys->addrs.v6addrs);
6085 }
6086
6087 return 0;
6088 }
6089
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)6090 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
6091 const unsigned char *key)
6092 {
6093 u64 prefix = bp->toeplitz_prefix, hash = 0;
6094 struct bnxt_ipv4_tuple tuple4;
6095 struct bnxt_ipv6_tuple tuple6;
6096 int i, j, len = 0;
6097 u8 *four_tuple;
6098
6099 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
6100 if (!len)
6101 return 0;
6102
6103 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6104 tuple4.v4addrs = fkeys->addrs.v4addrs;
6105 tuple4.ports = fkeys->ports;
6106 four_tuple = (unsigned char *)&tuple4;
6107 } else {
6108 tuple6.v6addrs = fkeys->addrs.v6addrs;
6109 tuple6.ports = fkeys->ports;
6110 four_tuple = (unsigned char *)&tuple6;
6111 }
6112
6113 for (i = 0, j = 8; i < len; i++, j++) {
6114 u8 byte = four_tuple[i];
6115 int bit;
6116
6117 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
6118 if (byte & 0x80)
6119 hash ^= prefix;
6120 }
6121 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
6122 }
6123
6124 /* The valid part of the hash is in the upper 32 bits. */
6125 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
6126 }
6127
6128 #ifdef CONFIG_RFS_ACCEL
6129 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)6130 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6131 {
6132 struct bnxt_l2_filter *fltr;
6133 u32 idx;
6134
6135 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6136 BNXT_L2_FLTR_HASH_MASK;
6137 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6138 return fltr;
6139 }
6140 #endif
6141
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)6142 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6143 struct bnxt_l2_key *key, u32 idx)
6144 {
6145 struct hlist_head *head;
6146
6147 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6148 fltr->l2_key.vlan = key->vlan;
6149 fltr->base.type = BNXT_FLTR_TYPE_L2;
6150 if (fltr->base.flags) {
6151 int bit_id;
6152
6153 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6154 bp->max_fltr, 0);
6155 if (bit_id < 0)
6156 return -ENOMEM;
6157 fltr->base.sw_id = (u16)bit_id;
6158 bp->ntp_fltr_count++;
6159 }
6160 head = &bp->l2_fltr_hash_tbl[idx];
6161 hlist_add_head_rcu(&fltr->base.hash, head);
6162 bnxt_insert_usr_fltr(bp, &fltr->base);
6163 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6164 atomic_set(&fltr->refcnt, 1);
6165 return 0;
6166 }
6167
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)6168 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6169 struct bnxt_l2_key *key,
6170 gfp_t gfp)
6171 {
6172 struct bnxt_l2_filter *fltr;
6173 u32 idx;
6174 int rc;
6175
6176 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6177 BNXT_L2_FLTR_HASH_MASK;
6178 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6179 if (fltr)
6180 return fltr;
6181
6182 fltr = kzalloc_obj(*fltr, gfp);
6183 if (!fltr)
6184 return ERR_PTR(-ENOMEM);
6185 spin_lock_bh(&bp->ntp_fltr_lock);
6186 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6187 spin_unlock_bh(&bp->ntp_fltr_lock);
6188 if (rc) {
6189 bnxt_del_l2_filter(bp, fltr);
6190 fltr = ERR_PTR(rc);
6191 }
6192 return fltr;
6193 }
6194
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)6195 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6196 struct bnxt_l2_key *key,
6197 u16 flags)
6198 {
6199 struct bnxt_l2_filter *fltr;
6200 u32 idx;
6201 int rc;
6202
6203 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6204 BNXT_L2_FLTR_HASH_MASK;
6205 spin_lock_bh(&bp->ntp_fltr_lock);
6206 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6207 if (fltr) {
6208 fltr = ERR_PTR(-EEXIST);
6209 goto l2_filter_exit;
6210 }
6211 fltr = kzalloc_obj(*fltr, GFP_ATOMIC);
6212 if (!fltr) {
6213 fltr = ERR_PTR(-ENOMEM);
6214 goto l2_filter_exit;
6215 }
6216 fltr->base.flags = flags;
6217 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6218 if (rc) {
6219 spin_unlock_bh(&bp->ntp_fltr_lock);
6220 bnxt_del_l2_filter(bp, fltr);
6221 return ERR_PTR(rc);
6222 }
6223
6224 l2_filter_exit:
6225 spin_unlock_bh(&bp->ntp_fltr_lock);
6226 return fltr;
6227 }
6228
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6229 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6230 {
6231 #ifdef CONFIG_BNXT_SRIOV
6232 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6233
6234 return vf->fw_fid;
6235 #else
6236 return INVALID_HW_RING_ID;
6237 #endif
6238 }
6239
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6240 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6241 {
6242 struct hwrm_cfa_l2_filter_free_input *req;
6243 u16 target_id = 0xffff;
6244 int rc;
6245
6246 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6247 struct bnxt_pf_info *pf = &bp->pf;
6248
6249 if (fltr->base.vf_idx >= pf->active_vfs)
6250 return -EINVAL;
6251
6252 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6253 if (target_id == INVALID_HW_RING_ID)
6254 return -EINVAL;
6255 }
6256
6257 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6258 if (rc)
6259 return rc;
6260
6261 req->target_id = cpu_to_le16(target_id);
6262 req->l2_filter_id = fltr->base.filter_id;
6263 return hwrm_req_send(bp, req);
6264 }
6265
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6266 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6267 {
6268 struct hwrm_cfa_l2_filter_alloc_output *resp;
6269 struct hwrm_cfa_l2_filter_alloc_input *req;
6270 u16 target_id = 0xffff;
6271 int rc;
6272
6273 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6274 struct bnxt_pf_info *pf = &bp->pf;
6275
6276 if (fltr->base.vf_idx >= pf->active_vfs)
6277 return -EINVAL;
6278
6279 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6280 }
6281 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6282 if (rc)
6283 return rc;
6284
6285 req->target_id = cpu_to_le16(target_id);
6286 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6287
6288 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6289 req->flags |=
6290 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6291 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6292 req->enables =
6293 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6294 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6295 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6296 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6297 eth_broadcast_addr(req->l2_addr_mask);
6298
6299 if (fltr->l2_key.vlan) {
6300 req->enables |=
6301 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6302 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6303 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6304 req->num_vlans = 1;
6305 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6306 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6307 }
6308
6309 resp = hwrm_req_hold(bp, req);
6310 rc = hwrm_req_send(bp, req);
6311 if (!rc) {
6312 fltr->base.filter_id = resp->l2_filter_id;
6313 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6314 }
6315 hwrm_req_drop(bp, req);
6316 return rc;
6317 }
6318
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6319 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6320 struct bnxt_ntuple_filter *fltr)
6321 {
6322 struct hwrm_cfa_ntuple_filter_free_input *req;
6323 int rc;
6324
6325 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6326 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
6327 return 0;
6328
6329 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6330 if (rc)
6331 return rc;
6332
6333 req->ntuple_filter_id = fltr->base.filter_id;
6334 return hwrm_req_send(bp, req);
6335 }
6336
6337 #define BNXT_NTP_FLTR_FLAGS \
6338 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6339 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6340 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6341 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6342 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6343 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6344 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6345 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6346 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6347 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6348 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6349 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6350 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6351
6352 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6353 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6354
bnxt_fill_ipv6_mask(__be32 mask[4])6355 void bnxt_fill_ipv6_mask(__be32 mask[4])
6356 {
6357 int i;
6358
6359 for (i = 0; i < 4; i++)
6360 mask[i] = cpu_to_be32(~0);
6361 }
6362
6363 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6364 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6365 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6366 struct bnxt_ntuple_filter *fltr)
6367 {
6368 u16 rxq = fltr->base.rxq;
6369
6370 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6371 struct ethtool_rxfh_context *ctx;
6372 struct bnxt_rss_ctx *rss_ctx;
6373 struct bnxt_vnic_info *vnic;
6374
6375 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6376 fltr->base.fw_vnic_id);
6377 if (ctx) {
6378 rss_ctx = ethtool_rxfh_context_priv(ctx);
6379 vnic = &rss_ctx->vnic;
6380
6381 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6382 }
6383 return;
6384 }
6385 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6386 struct bnxt_vnic_info *vnic;
6387 u32 enables;
6388
6389 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6390 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6391 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6392 req->enables |= cpu_to_le32(enables);
6393 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6394 } else {
6395 u32 flags;
6396
6397 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6398 req->flags |= cpu_to_le32(flags);
6399 req->dst_id = cpu_to_le16(rxq);
6400 }
6401 }
6402
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6403 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6404 struct bnxt_ntuple_filter *fltr)
6405 {
6406 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6407 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6408 struct bnxt_flow_masks *masks = &fltr->fmasks;
6409 struct flow_keys *keys = &fltr->fkeys;
6410 struct bnxt_l2_filter *l2_fltr;
6411 struct bnxt_vnic_info *vnic;
6412 int rc;
6413
6414 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6415 if (rc)
6416 return rc;
6417
6418 l2_fltr = fltr->l2_fltr;
6419 req->l2_filter_id = l2_fltr->base.filter_id;
6420
6421 if (fltr->base.flags & BNXT_ACT_DROP) {
6422 req->flags =
6423 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6424 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6425 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6426 } else {
6427 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6428 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6429 }
6430 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6431
6432 req->ethertype = htons(ETH_P_IP);
6433 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6434 req->ip_protocol = keys->basic.ip_proto;
6435
6436 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6437 req->ethertype = htons(ETH_P_IPV6);
6438 req->ip_addr_type =
6439 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6440 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6441 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6442 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6443 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6444 } else {
6445 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6446 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6447 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6448 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6449 }
6450 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6451 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6452 req->tunnel_type =
6453 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6454 }
6455
6456 req->src_port = keys->ports.src;
6457 req->src_port_mask = masks->ports.src;
6458 req->dst_port = keys->ports.dst;
6459 req->dst_port_mask = masks->ports.dst;
6460
6461 resp = hwrm_req_hold(bp, req);
6462 rc = hwrm_req_send(bp, req);
6463 if (!rc)
6464 fltr->base.filter_id = resp->ntuple_filter_id;
6465 hwrm_req_drop(bp, req);
6466 return rc;
6467 }
6468
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6469 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6470 const u8 *mac_addr)
6471 {
6472 struct bnxt_l2_filter *fltr;
6473 struct bnxt_l2_key key;
6474 int rc;
6475
6476 ether_addr_copy(key.dst_mac_addr, mac_addr);
6477 key.vlan = 0;
6478 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6479 if (IS_ERR(fltr))
6480 return PTR_ERR(fltr);
6481
6482 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6483 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6484 if (rc)
6485 bnxt_del_l2_filter(bp, fltr);
6486 else
6487 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6488 return rc;
6489 }
6490
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6491 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6492 {
6493 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6494
6495 /* Any associated ntuple filters will also be cleared by firmware. */
6496 for (i = 0; i < num_of_vnics; i++) {
6497 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6498
6499 for (j = 0; j < vnic->uc_filter_count; j++) {
6500 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6501
6502 bnxt_hwrm_l2_filter_free(bp, fltr);
6503 bnxt_del_l2_filter(bp, fltr);
6504 }
6505 vnic->uc_filter_count = 0;
6506 }
6507 }
6508
6509 #define BNXT_DFLT_TUNL_TPA_BMAP \
6510 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6511 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6512 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6513
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6514 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6515 struct hwrm_vnic_tpa_cfg_input *req)
6516 {
6517 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6518
6519 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6520 return;
6521
6522 if (bp->vxlan_port)
6523 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6524 if (bp->vxlan_gpe_port)
6525 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6526 if (bp->nge_port)
6527 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6528
6529 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6530 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6531 }
6532
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6533 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6534 u32 tpa_flags)
6535 {
6536 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6537 struct hwrm_vnic_tpa_cfg_input *req;
6538 int rc;
6539
6540 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6541 return 0;
6542
6543 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6544 if (rc)
6545 return rc;
6546
6547 if (tpa_flags) {
6548 u16 mss = bp->dev->mtu - 40;
6549 u32 nsegs, n, segs = 0, flags;
6550
6551 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6552 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6553 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6554 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6555 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6556 if (tpa_flags & BNXT_FLAG_GRO)
6557 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6558
6559 req->flags = cpu_to_le32(flags);
6560
6561 req->enables =
6562 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6563 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6564 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6565
6566 /* Number of segs are log2 units, and first packet is not
6567 * included as part of this units.
6568 */
6569 if (mss <= BNXT_RX_PAGE_SIZE) {
6570 n = BNXT_RX_PAGE_SIZE / mss;
6571 nsegs = (MAX_SKB_FRAGS - 1) * n;
6572 } else {
6573 n = mss / BNXT_RX_PAGE_SIZE;
6574 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6575 n++;
6576 nsegs = (MAX_SKB_FRAGS - n) / n;
6577 }
6578
6579 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6580 segs = MAX_TPA_SEGS_P5;
6581 max_aggs = bp->max_tpa;
6582 } else {
6583 segs = ilog2(nsegs);
6584 }
6585 req->max_agg_segs = cpu_to_le16(segs);
6586 req->max_aggs = cpu_to_le16(max_aggs);
6587
6588 req->min_agg_len = cpu_to_le32(512);
6589 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6590 }
6591 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6592
6593 return hwrm_req_send(bp, req);
6594 }
6595
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6596 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6597 {
6598 struct bnxt_ring_grp_info *grp_info;
6599
6600 grp_info = &bp->grp_info[ring->grp_idx];
6601 return grp_info->cp_fw_ring_id;
6602 }
6603
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6604 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6605 {
6606 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6607 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6608 else
6609 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6610 }
6611
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6612 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6613 {
6614 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6615 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6616 else
6617 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6618 }
6619
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6620 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6621 {
6622 int entries;
6623
6624 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6625 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6626 else
6627 entries = HW_HASH_INDEX_SIZE;
6628
6629 bp->rss_indir_tbl_entries = entries;
6630 bp->rss_indir_tbl =
6631 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6632 if (!bp->rss_indir_tbl)
6633 return -ENOMEM;
6634
6635 return 0;
6636 }
6637
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6638 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6639 struct ethtool_rxfh_context *rss_ctx)
6640 {
6641 u16 max_rings, max_entries, pad, i;
6642 u32 *rss_indir_tbl;
6643
6644 if (!bp->rx_nr_rings)
6645 return;
6646
6647 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6648 max_rings = bp->rx_nr_rings - 1;
6649 else
6650 max_rings = bp->rx_nr_rings;
6651
6652 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6653 if (rss_ctx)
6654 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6655 else
6656 rss_indir_tbl = &bp->rss_indir_tbl[0];
6657
6658 for (i = 0; i < max_entries; i++)
6659 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6660
6661 pad = bp->rss_indir_tbl_entries - max_entries;
6662 if (pad)
6663 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6664 }
6665
bnxt_get_max_rss_ring(struct bnxt * bp)6666 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6667 {
6668 u32 i, tbl_size, max_ring = 0;
6669
6670 if (!bp->rss_indir_tbl)
6671 return 0;
6672
6673 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6674 for (i = 0; i < tbl_size; i++)
6675 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6676 return max_ring;
6677 }
6678
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6679 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6680 {
6681 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6682 if (!rx_rings)
6683 return 0;
6684 if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
6685 return BNXT_RSS_TABLE_MAX_TBL_P5;
6686
6687 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6688 BNXT_RSS_TABLE_ENTRIES_P5);
6689 }
6690 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6691 return 2;
6692 return 1;
6693 }
6694
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6695 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6696 {
6697 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6698 u16 i, j;
6699
6700 /* Fill the RSS indirection table with ring group ids */
6701 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6702 if (!no_rss)
6703 j = bp->rss_indir_tbl[i];
6704 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6705 }
6706 }
6707
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6708 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6709 struct bnxt_vnic_info *vnic)
6710 {
6711 __le16 *ring_tbl = vnic->rss_table;
6712 struct bnxt_rx_ring_info *rxr;
6713 u16 tbl_size, i;
6714
6715 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6716
6717 for (i = 0; i < tbl_size; i++) {
6718 u16 ring_id, j;
6719
6720 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6721 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6722 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6723 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6724 else
6725 j = bp->rss_indir_tbl[i];
6726 rxr = &bp->rx_ring[j];
6727
6728 ring_id = rxr->rx_ring_struct.fw_ring_id;
6729 *ring_tbl++ = cpu_to_le16(ring_id);
6730 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6731 *ring_tbl++ = cpu_to_le16(ring_id);
6732 }
6733 }
6734
6735 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6736 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6737 struct bnxt_vnic_info *vnic)
6738 {
6739 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6740 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6741 if (bp->flags & BNXT_FLAG_CHIP_P7)
6742 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6743 } else {
6744 bnxt_fill_hw_rss_tbl(bp, vnic);
6745 }
6746
6747 if (bp->rss_hash_delta) {
6748 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6749 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6750 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6751 else
6752 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6753 } else {
6754 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6755 }
6756 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6757 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6758 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6759 }
6760
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6761 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6762 bool set_rss)
6763 {
6764 struct hwrm_vnic_rss_cfg_input *req;
6765 int rc;
6766
6767 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6768 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6769 return 0;
6770
6771 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6772 if (rc)
6773 return rc;
6774
6775 if (set_rss)
6776 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6777 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6778 return hwrm_req_send(bp, req);
6779 }
6780
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6781 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6782 struct bnxt_vnic_info *vnic, bool set_rss)
6783 {
6784 struct hwrm_vnic_rss_cfg_input *req;
6785 dma_addr_t ring_tbl_map;
6786 u32 i, nr_ctxs;
6787 int rc;
6788
6789 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6790 if (rc)
6791 return rc;
6792
6793 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6794 if (!set_rss)
6795 return hwrm_req_send(bp, req);
6796
6797 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6798 ring_tbl_map = vnic->rss_table_dma_addr;
6799 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6800
6801 hwrm_req_hold(bp, req);
6802 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6803 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6804 req->ring_table_pair_index = i;
6805 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6806 rc = hwrm_req_send(bp, req);
6807 if (rc)
6808 goto exit;
6809 }
6810
6811 exit:
6812 hwrm_req_drop(bp, req);
6813 return rc;
6814 }
6815
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6816 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6817 {
6818 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6819 struct hwrm_vnic_rss_qcfg_output *resp;
6820 struct hwrm_vnic_rss_qcfg_input *req;
6821
6822 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6823 return;
6824
6825 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6826 /* all contexts configured to same hash_type, zero always exists */
6827 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6828 resp = hwrm_req_hold(bp, req);
6829 if (!hwrm_req_send(bp, req)) {
6830 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6831 bp->rss_hash_delta = 0;
6832 }
6833 hwrm_req_drop(bp, req);
6834 }
6835
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6836 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6837 {
6838 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6839 struct hwrm_vnic_plcmodes_cfg_input *req;
6840 int rc;
6841
6842 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6843 if (rc)
6844 return rc;
6845
6846 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6847 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6848 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6849
6850 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6851 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6852 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6853 req->enables |=
6854 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6855 req->hds_threshold = cpu_to_le16(hds_thresh);
6856 }
6857 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6858 return hwrm_req_send(bp, req);
6859 }
6860
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6861 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6862 struct bnxt_vnic_info *vnic,
6863 u16 ctx_idx)
6864 {
6865 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6866
6867 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6868 return;
6869
6870 req->rss_cos_lb_ctx_id =
6871 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6872
6873 hwrm_req_send(bp, req);
6874 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6875 }
6876
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6877 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6878 {
6879 int i, j;
6880
6881 for (i = 0; i < bp->nr_vnics; i++) {
6882 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6883
6884 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6885 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6886 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6887 }
6888 }
6889 bp->rsscos_nr_ctxs = 0;
6890 }
6891
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6892 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6893 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6894 {
6895 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6896 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6897 int rc;
6898
6899 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6900 if (rc)
6901 return rc;
6902
6903 resp = hwrm_req_hold(bp, req);
6904 rc = hwrm_req_send(bp, req);
6905 if (!rc)
6906 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6907 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6908 hwrm_req_drop(bp, req);
6909
6910 return rc;
6911 }
6912
bnxt_get_roce_vnic_mode(struct bnxt * bp)6913 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6914 {
6915 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6916 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6917 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6918 }
6919
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6920 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6921 {
6922 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6923 struct hwrm_vnic_cfg_input *req;
6924 unsigned int ring = 0, grp_idx;
6925 u16 def_vlan = 0;
6926 int rc;
6927
6928 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6929 if (rc)
6930 return rc;
6931
6932 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6933 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6934
6935 req->default_rx_ring_id =
6936 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6937 req->default_cmpl_ring_id =
6938 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6939 req->enables =
6940 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6941 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6942 goto vnic_mru;
6943 }
6944 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6945 /* Only RSS support for now TBD: COS & LB */
6946 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6947 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6948 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6949 VNIC_CFG_REQ_ENABLES_MRU);
6950 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6951 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6952 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6953 VNIC_CFG_REQ_ENABLES_MRU);
6954 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6955 } else {
6956 req->rss_rule = cpu_to_le16(0xffff);
6957 }
6958
6959 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6960 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6961 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6962 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6963 } else {
6964 req->cos_rule = cpu_to_le16(0xffff);
6965 }
6966
6967 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6968 ring = 0;
6969 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6970 ring = vnic->vnic_id - 1;
6971 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6972 ring = bp->rx_nr_rings - 1;
6973
6974 grp_idx = bp->rx_ring[ring].bnapi->index;
6975 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6976 req->lb_rule = cpu_to_le16(0xffff);
6977 vnic_mru:
6978 vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6979 req->mru = cpu_to_le16(vnic->mru);
6980
6981 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6982 #ifdef CONFIG_BNXT_SRIOV
6983 if (BNXT_VF(bp))
6984 def_vlan = bp->vf.vlan;
6985 #endif
6986 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6987 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6988 if (vnic->vnic_id == BNXT_VNIC_DEFAULT &&
6989 bnxt_ulp_registered(bp->edev[BNXT_AUXDEV_RDMA]))
6990 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6991
6992 return hwrm_req_send(bp, req);
6993 }
6994
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6995 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6996 struct bnxt_vnic_info *vnic)
6997 {
6998 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6999 struct hwrm_vnic_free_input *req;
7000
7001 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
7002 return;
7003
7004 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
7005
7006 hwrm_req_send(bp, req);
7007 vnic->fw_vnic_id = INVALID_HW_RING_ID;
7008 }
7009 }
7010
bnxt_hwrm_vnic_free(struct bnxt * bp)7011 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
7012 {
7013 u16 i;
7014
7015 for (i = 0; i < bp->nr_vnics; i++)
7016 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
7017 }
7018
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)7019 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
7020 unsigned int start_rx_ring_idx,
7021 unsigned int nr_rings)
7022 {
7023 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
7024 struct hwrm_vnic_alloc_output *resp;
7025 struct hwrm_vnic_alloc_input *req;
7026 int rc;
7027
7028 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
7029 if (rc)
7030 return rc;
7031
7032 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7033 goto vnic_no_ring_grps;
7034
7035 /* map ring groups to this vnic */
7036 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
7037 grp_idx = bp->rx_ring[i].bnapi->index;
7038 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
7039 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
7040 j, nr_rings);
7041 break;
7042 }
7043 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
7044 }
7045
7046 vnic_no_ring_grps:
7047 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
7048 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
7049 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
7050 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
7051
7052 resp = hwrm_req_hold(bp, req);
7053 rc = hwrm_req_send(bp, req);
7054 if (!rc)
7055 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
7056 hwrm_req_drop(bp, req);
7057 return rc;
7058 }
7059
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)7060 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
7061 {
7062 struct hwrm_vnic_qcaps_output *resp;
7063 struct hwrm_vnic_qcaps_input *req;
7064 int rc;
7065
7066 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
7067 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
7068 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
7069 if (bp->hwrm_spec_code < 0x10600)
7070 return 0;
7071
7072 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
7073 if (rc)
7074 return rc;
7075
7076 resp = hwrm_req_hold(bp, req);
7077 rc = hwrm_req_send(bp, req);
7078 if (!rc) {
7079 u32 flags = le32_to_cpu(resp->flags);
7080
7081 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
7082 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
7083 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
7084 if (flags &
7085 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
7086 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
7087
7088 /* Older P5 fw before EXT_HW_STATS support did not set
7089 * VLAN_STRIP_CAP properly.
7090 */
7091 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
7092 (BNXT_CHIP_P5(bp) &&
7093 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
7094 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
7095 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
7096 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
7097 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
7098 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
7099 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
7100 if (bp->max_tpa_v2) {
7101 if (BNXT_CHIP_P5(bp))
7102 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
7103 else
7104 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
7105 }
7106 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
7107 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
7108 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
7109 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
7110 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
7111 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
7112 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
7113 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
7114 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
7115 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
7116 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
7117 bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
7118 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
7119 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
7120 }
7121 hwrm_req_drop(bp, req);
7122 return rc;
7123 }
7124
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)7125 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
7126 {
7127 struct hwrm_ring_grp_alloc_output *resp;
7128 struct hwrm_ring_grp_alloc_input *req;
7129 int rc;
7130 u16 i;
7131
7132 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7133 return 0;
7134
7135 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
7136 if (rc)
7137 return rc;
7138
7139 resp = hwrm_req_hold(bp, req);
7140 for (i = 0; i < bp->rx_nr_rings; i++) {
7141 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7142
7143 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7144 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7145 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7146 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7147
7148 rc = hwrm_req_send(bp, req);
7149
7150 if (rc)
7151 break;
7152
7153 bp->grp_info[grp_idx].fw_grp_id =
7154 le32_to_cpu(resp->ring_group_id);
7155 }
7156 hwrm_req_drop(bp, req);
7157 return rc;
7158 }
7159
bnxt_hwrm_ring_grp_free(struct bnxt * bp)7160 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7161 {
7162 struct hwrm_ring_grp_free_input *req;
7163 u16 i;
7164
7165 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7166 return;
7167
7168 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7169 return;
7170
7171 hwrm_req_hold(bp, req);
7172 for (i = 0; i < bp->cp_nr_rings; i++) {
7173 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7174 continue;
7175 req->ring_group_id =
7176 cpu_to_le32(bp->grp_info[i].fw_grp_id);
7177
7178 hwrm_req_send(bp, req);
7179 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7180 }
7181 hwrm_req_drop(bp, req);
7182 }
7183
bnxt_set_rx_ring_params_p5(struct bnxt * bp,u32 ring_type,struct hwrm_ring_alloc_input * req,struct bnxt_rx_ring_info * rxr,struct bnxt_ring_struct * ring)7184 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7185 struct hwrm_ring_alloc_input *req,
7186 struct bnxt_rx_ring_info *rxr,
7187 struct bnxt_ring_struct *ring)
7188 {
7189 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7190 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7191 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7192
7193 if (ring_type == HWRM_RING_ALLOC_AGG) {
7194 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7195 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7196 req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
7197 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7198 } else {
7199 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7200 if (NET_IP_ALIGN == 2)
7201 req->flags =
7202 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7203 }
7204 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7205 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7206 req->enables |= cpu_to_le32(enables);
7207 }
7208
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)7209 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7210 struct bnxt_rx_ring_info *rxr,
7211 struct bnxt_ring_struct *ring,
7212 u32 ring_type, u32 map_index)
7213 {
7214 struct hwrm_ring_alloc_output *resp;
7215 struct hwrm_ring_alloc_input *req;
7216 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7217 struct bnxt_ring_grp_info *grp_info;
7218 int rc, err = 0;
7219 u16 ring_id;
7220
7221 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7222 if (rc)
7223 goto exit;
7224
7225 req->enables = 0;
7226 if (rmem->nr_pages > 1) {
7227 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7228 /* Page size is in log2 units */
7229 req->page_size = BNXT_PAGE_SHIFT;
7230 req->page_tbl_depth = 1;
7231 } else {
7232 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
7233 }
7234 req->fbo = 0;
7235 /* Association of ring index with doorbell index and MSIX number */
7236 req->logical_id = cpu_to_le16(map_index);
7237
7238 switch (ring_type) {
7239 case HWRM_RING_ALLOC_TX: {
7240 struct bnxt_tx_ring_info *txr;
7241 u16 flags = 0;
7242
7243 txr = container_of(ring, struct bnxt_tx_ring_info,
7244 tx_ring_struct);
7245 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7246 /* Association of transmit ring with completion ring */
7247 grp_info = &bp->grp_info[ring->grp_idx];
7248 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7249 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7250 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7251 req->queue_id = cpu_to_le16(ring->queue_id);
7252 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7253 req->cmpl_coal_cnt =
7254 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7255 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7256 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7257 req->flags = cpu_to_le16(flags);
7258 break;
7259 }
7260 case HWRM_RING_ALLOC_RX:
7261 case HWRM_RING_ALLOC_AGG:
7262 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7263 req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7264 cpu_to_le32(bp->rx_ring_mask + 1) :
7265 cpu_to_le32(bp->rx_agg_ring_mask + 1);
7266 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7267 bnxt_set_rx_ring_params_p5(bp, ring_type, req,
7268 rxr, ring);
7269 break;
7270 case HWRM_RING_ALLOC_CMPL:
7271 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7272 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7273 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7274 /* Association of cp ring with nq */
7275 grp_info = &bp->grp_info[map_index];
7276 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7277 req->cq_handle = cpu_to_le64(ring->handle);
7278 req->enables |= cpu_to_le32(
7279 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7280 } else {
7281 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7282 }
7283 break;
7284 case HWRM_RING_ALLOC_NQ:
7285 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7286 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7287 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7288 break;
7289 default:
7290 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7291 ring_type);
7292 return -EINVAL;
7293 }
7294
7295 resp = hwrm_req_hold(bp, req);
7296 rc = hwrm_req_send(bp, req);
7297 err = le16_to_cpu(resp->error_code);
7298 ring_id = le16_to_cpu(resp->ring_id);
7299 hwrm_req_drop(bp, req);
7300
7301 exit:
7302 if (rc || err) {
7303 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7304 ring_type, rc, err);
7305 return -EIO;
7306 }
7307 ring->fw_ring_id = ring_id;
7308 return rc;
7309 }
7310
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7311 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7312 {
7313 int rc;
7314
7315 if (BNXT_PF(bp)) {
7316 struct hwrm_func_cfg_input *req;
7317
7318 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7319 if (rc)
7320 return rc;
7321
7322 req->fid = cpu_to_le16(0xffff);
7323 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7324 req->async_event_cr = cpu_to_le16(idx);
7325 return hwrm_req_send(bp, req);
7326 } else {
7327 struct hwrm_func_vf_cfg_input *req;
7328
7329 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7330 if (rc)
7331 return rc;
7332
7333 req->enables =
7334 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7335 req->async_event_cr = cpu_to_le16(idx);
7336 return hwrm_req_send(bp, req);
7337 }
7338 }
7339
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7340 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7341 u32 ring_type)
7342 {
7343 switch (ring_type) {
7344 case HWRM_RING_ALLOC_TX:
7345 db->db_ring_mask = bp->tx_ring_mask;
7346 break;
7347 case HWRM_RING_ALLOC_RX:
7348 db->db_ring_mask = bp->rx_ring_mask;
7349 break;
7350 case HWRM_RING_ALLOC_AGG:
7351 db->db_ring_mask = bp->rx_agg_ring_mask;
7352 break;
7353 case HWRM_RING_ALLOC_CMPL:
7354 case HWRM_RING_ALLOC_NQ:
7355 db->db_ring_mask = bp->cp_ring_mask;
7356 break;
7357 }
7358 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7359 db->db_epoch_mask = db->db_ring_mask + 1;
7360 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7361 }
7362 }
7363
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7364 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7365 u32 map_idx, u32 xid)
7366 {
7367 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7368 switch (ring_type) {
7369 case HWRM_RING_ALLOC_TX:
7370 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7371 break;
7372 case HWRM_RING_ALLOC_RX:
7373 case HWRM_RING_ALLOC_AGG:
7374 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7375 break;
7376 case HWRM_RING_ALLOC_CMPL:
7377 db->db_key64 = DBR_PATH_L2;
7378 break;
7379 case HWRM_RING_ALLOC_NQ:
7380 db->db_key64 = DBR_PATH_L2;
7381 break;
7382 }
7383 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7384
7385 if (bp->flags & BNXT_FLAG_CHIP_P7)
7386 db->db_key64 |= DBR_VALID;
7387
7388 db->doorbell = bp->bar1 + bp->db_offset;
7389 } else {
7390 db->doorbell = bp->bar1 + map_idx * 0x80;
7391 switch (ring_type) {
7392 case HWRM_RING_ALLOC_TX:
7393 db->db_key32 = DB_KEY_TX;
7394 break;
7395 case HWRM_RING_ALLOC_RX:
7396 case HWRM_RING_ALLOC_AGG:
7397 db->db_key32 = DB_KEY_RX;
7398 break;
7399 case HWRM_RING_ALLOC_CMPL:
7400 db->db_key32 = DB_KEY_CP;
7401 break;
7402 }
7403 }
7404 bnxt_set_db_mask(bp, db, ring_type);
7405 }
7406
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7407 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7408 struct bnxt_rx_ring_info *rxr)
7409 {
7410 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7411 struct bnxt_napi *bnapi = rxr->bnapi;
7412 u32 type = HWRM_RING_ALLOC_RX;
7413 u32 map_idx = bnapi->index;
7414 int rc;
7415
7416 rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7417 if (rc)
7418 return rc;
7419
7420 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7421 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7422
7423 return 0;
7424 }
7425
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7426 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7427 struct bnxt_rx_ring_info *rxr)
7428 {
7429 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7430 u32 type = HWRM_RING_ALLOC_AGG;
7431 u32 grp_idx = ring->grp_idx;
7432 u32 map_idx;
7433 int rc;
7434
7435 map_idx = grp_idx + bp->rx_nr_rings;
7436 rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7437 if (rc)
7438 return rc;
7439
7440 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7441 ring->fw_ring_id);
7442 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7443 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7444 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7445
7446 return 0;
7447 }
7448
bnxt_hwrm_cp_ring_alloc_p5(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7449 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7450 struct bnxt_cp_ring_info *cpr)
7451 {
7452 const u32 type = HWRM_RING_ALLOC_CMPL;
7453 struct bnxt_napi *bnapi = cpr->bnapi;
7454 struct bnxt_ring_struct *ring;
7455 u32 map_idx = bnapi->index;
7456 int rc;
7457
7458 ring = &cpr->cp_ring_struct;
7459 ring->handle = BNXT_SET_NQ_HDL(cpr);
7460 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7461 if (rc)
7462 return rc;
7463 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7464 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7465 return 0;
7466 }
7467
bnxt_hwrm_tx_ring_alloc(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u32 tx_idx)7468 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7469 struct bnxt_tx_ring_info *txr, u32 tx_idx)
7470 {
7471 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7472 const u32 type = HWRM_RING_ALLOC_TX;
7473 int rc;
7474
7475 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
7476 if (rc)
7477 return rc;
7478 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7479 return 0;
7480 }
7481
bnxt_hwrm_ring_alloc(struct bnxt * bp)7482 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7483 {
7484 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7485 int i, rc = 0;
7486 u32 type;
7487
7488 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7489 type = HWRM_RING_ALLOC_NQ;
7490 else
7491 type = HWRM_RING_ALLOC_CMPL;
7492 for (i = 0; i < bp->cp_nr_rings; i++) {
7493 struct bnxt_napi *bnapi = bp->bnapi[i];
7494 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7495 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7496 u32 map_idx = ring->map_idx;
7497 unsigned int vector;
7498
7499 vector = bp->irq_tbl[map_idx].vector;
7500 disable_irq_nosync(vector);
7501 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7502 if (rc) {
7503 enable_irq(vector);
7504 goto err_out;
7505 }
7506 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7507 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7508 enable_irq(vector);
7509 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7510
7511 if (!i) {
7512 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7513 if (rc)
7514 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7515 }
7516 }
7517
7518 for (i = 0; i < bp->tx_nr_rings; i++) {
7519 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7520
7521 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7522 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7523 if (rc)
7524 goto err_out;
7525 }
7526 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7527 if (rc)
7528 goto err_out;
7529 }
7530
7531 for (i = 0; i < bp->rx_nr_rings; i++) {
7532 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7533
7534 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7535 if (rc)
7536 goto err_out;
7537 /* If we have agg rings, post agg buffers first. */
7538 if (!agg_rings)
7539 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7540 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7541 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7542 if (rc)
7543 goto err_out;
7544 }
7545 }
7546
7547 if (agg_rings) {
7548 for (i = 0; i < bp->rx_nr_rings; i++) {
7549 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7550 if (rc)
7551 goto err_out;
7552 }
7553 }
7554 err_out:
7555 return rc;
7556 }
7557
bnxt_cancel_dim(struct bnxt * bp)7558 static void bnxt_cancel_dim(struct bnxt *bp)
7559 {
7560 int i;
7561
7562 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7563 * if NAPI is enabled.
7564 */
7565 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7566 return;
7567
7568 /* Make sure NAPI sees that the VNIC is disabled */
7569 synchronize_net();
7570 for (i = 0; i < bp->rx_nr_rings; i++) {
7571 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7572 struct bnxt_napi *bnapi = rxr->bnapi;
7573
7574 cancel_work_sync(&bnapi->cp_ring.dim.work);
7575 }
7576 }
7577
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7578 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7579 struct bnxt_ring_struct *ring,
7580 u32 ring_type, int cmpl_ring_id)
7581 {
7582 struct hwrm_ring_free_output *resp;
7583 struct hwrm_ring_free_input *req;
7584 u16 error_code = 0;
7585 int rc;
7586
7587 if (BNXT_NO_FW_ACCESS(bp))
7588 return 0;
7589
7590 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7591 if (rc)
7592 goto exit;
7593
7594 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7595 req->ring_type = ring_type;
7596 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7597
7598 resp = hwrm_req_hold(bp, req);
7599 rc = hwrm_req_send(bp, req);
7600 error_code = le16_to_cpu(resp->error_code);
7601 hwrm_req_drop(bp, req);
7602 exit:
7603 if (rc || error_code) {
7604 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7605 ring_type, rc, error_code);
7606 return -EIO;
7607 }
7608 return 0;
7609 }
7610
bnxt_hwrm_tx_ring_free(struct bnxt * bp,struct bnxt_tx_ring_info * txr,bool close_path)7611 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7612 struct bnxt_tx_ring_info *txr,
7613 bool close_path)
7614 {
7615 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7616 u32 cmpl_ring_id;
7617
7618 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7619 return;
7620
7621 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7622 INVALID_HW_RING_ID;
7623 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7624 cmpl_ring_id);
7625 ring->fw_ring_id = INVALID_HW_RING_ID;
7626 }
7627
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7628 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7629 struct bnxt_rx_ring_info *rxr,
7630 bool close_path)
7631 {
7632 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7633 u32 grp_idx = rxr->bnapi->index;
7634 u32 cmpl_ring_id;
7635
7636 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7637 return;
7638
7639 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7640 hwrm_ring_free_send_msg(bp, ring,
7641 RING_FREE_REQ_RING_TYPE_RX,
7642 close_path ? cmpl_ring_id :
7643 INVALID_HW_RING_ID);
7644 ring->fw_ring_id = INVALID_HW_RING_ID;
7645 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7646 }
7647
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7648 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7649 struct bnxt_rx_ring_info *rxr,
7650 bool close_path)
7651 {
7652 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7653 u32 grp_idx = rxr->bnapi->index;
7654 u32 type, cmpl_ring_id;
7655
7656 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7657 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7658 else
7659 type = RING_FREE_REQ_RING_TYPE_RX;
7660
7661 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7662 return;
7663
7664 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7665 hwrm_ring_free_send_msg(bp, ring, type,
7666 close_path ? cmpl_ring_id :
7667 INVALID_HW_RING_ID);
7668 ring->fw_ring_id = INVALID_HW_RING_ID;
7669 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7670 }
7671
bnxt_hwrm_cp_ring_free(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7672 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7673 struct bnxt_cp_ring_info *cpr)
7674 {
7675 struct bnxt_ring_struct *ring;
7676
7677 ring = &cpr->cp_ring_struct;
7678 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7679 return;
7680
7681 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7682 INVALID_HW_RING_ID);
7683 ring->fw_ring_id = INVALID_HW_RING_ID;
7684 }
7685
bnxt_clear_one_cp_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7686 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7687 {
7688 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7689 int i, size = ring->ring_mem.page_size;
7690
7691 cpr->cp_raw_cons = 0;
7692 cpr->toggle = 0;
7693
7694 for (i = 0; i < bp->cp_nr_pages; i++)
7695 if (cpr->cp_desc_ring[i])
7696 memset(cpr->cp_desc_ring[i], 0, size);
7697 }
7698
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7699 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7700 {
7701 u32 type;
7702 int i;
7703
7704 if (!bp->bnapi)
7705 return;
7706
7707 for (i = 0; i < bp->tx_nr_rings; i++)
7708 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7709
7710 bnxt_cancel_dim(bp);
7711 for (i = 0; i < bp->rx_nr_rings; i++) {
7712 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7713 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7714 }
7715
7716 /* The completion rings are about to be freed. After that the
7717 * IRQ doorbell will not work anymore. So we need to disable
7718 * IRQ here.
7719 */
7720 bnxt_disable_int_sync(bp);
7721
7722 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7723 type = RING_FREE_REQ_RING_TYPE_NQ;
7724 else
7725 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7726 for (i = 0; i < bp->cp_nr_rings; i++) {
7727 struct bnxt_napi *bnapi = bp->bnapi[i];
7728 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7729 struct bnxt_ring_struct *ring;
7730 int j;
7731
7732 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7733 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7734
7735 ring = &cpr->cp_ring_struct;
7736 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7737 hwrm_ring_free_send_msg(bp, ring, type,
7738 INVALID_HW_RING_ID);
7739 ring->fw_ring_id = INVALID_HW_RING_ID;
7740 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7741 }
7742 }
7743 }
7744
7745 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7746 bool shared);
7747 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7748 bool shared);
7749
bnxt_hwrm_get_rings(struct bnxt * bp)7750 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7751 {
7752 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7753 struct hwrm_func_qcfg_output *resp;
7754 struct hwrm_func_qcfg_input *req;
7755 int rc;
7756
7757 if (bp->hwrm_spec_code < 0x10601)
7758 return 0;
7759
7760 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7761 if (rc)
7762 return rc;
7763
7764 req->fid = cpu_to_le16(0xffff);
7765 resp = hwrm_req_hold(bp, req);
7766 rc = hwrm_req_send(bp, req);
7767 if (rc) {
7768 hwrm_req_drop(bp, req);
7769 return rc;
7770 }
7771
7772 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7773 if (BNXT_NEW_RM(bp)) {
7774 u16 cp, stats;
7775
7776 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7777 hw_resc->resv_hw_ring_grps =
7778 le32_to_cpu(resp->alloc_hw_ring_grps);
7779 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7780 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7781 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7782 stats = le16_to_cpu(resp->alloc_stat_ctx);
7783 hw_resc->resv_irqs = cp;
7784 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7785 int rx = hw_resc->resv_rx_rings;
7786 int tx = hw_resc->resv_tx_rings;
7787
7788 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7789 rx >>= 1;
7790 if (cp < (rx + tx)) {
7791 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7792 if (rc)
7793 goto get_rings_exit;
7794 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7795 rx <<= 1;
7796 hw_resc->resv_rx_rings = rx;
7797 hw_resc->resv_tx_rings = tx;
7798 }
7799 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7800 hw_resc->resv_hw_ring_grps = rx;
7801 }
7802 hw_resc->resv_cp_rings = cp;
7803 hw_resc->resv_stat_ctxs = stats;
7804 }
7805 get_rings_exit:
7806 hwrm_req_drop(bp, req);
7807 return rc;
7808 }
7809
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7810 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7811 {
7812 struct hwrm_func_qcfg_output *resp;
7813 struct hwrm_func_qcfg_input *req;
7814 int rc;
7815
7816 if (bp->hwrm_spec_code < 0x10601)
7817 return 0;
7818
7819 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7820 if (rc)
7821 return rc;
7822
7823 req->fid = cpu_to_le16(fid);
7824 resp = hwrm_req_hold(bp, req);
7825 rc = hwrm_req_send(bp, req);
7826 if (!rc)
7827 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7828
7829 hwrm_req_drop(bp, req);
7830 return rc;
7831 }
7832
7833 static bool bnxt_rfs_supported(struct bnxt *bp);
7834
7835 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7836 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7837 {
7838 struct hwrm_func_cfg_input *req;
7839 u32 enables = 0;
7840
7841 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7842 return NULL;
7843
7844 req->fid = cpu_to_le16(0xffff);
7845 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7846 req->num_tx_rings = cpu_to_le16(hwr->tx);
7847 if (BNXT_NEW_RM(bp)) {
7848 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7849 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7850 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7851 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7852 enables |= hwr->cp_p5 ?
7853 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7854 } else {
7855 enables |= hwr->cp ?
7856 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7857 enables |= hwr->grp ?
7858 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7859 }
7860 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7861 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7862 0;
7863 req->num_rx_rings = cpu_to_le16(hwr->rx);
7864 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7865 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7866 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7867 req->num_msix = cpu_to_le16(hwr->cp);
7868 } else {
7869 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7870 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7871 }
7872 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7873 req->num_vnics = cpu_to_le16(hwr->vnic);
7874 }
7875 req->enables = cpu_to_le32(enables);
7876 return req;
7877 }
7878
7879 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7880 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7881 {
7882 struct hwrm_func_vf_cfg_input *req;
7883 u32 enables = 0;
7884
7885 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7886 return NULL;
7887
7888 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7889 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7890 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7891 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7892 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7893 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7894 enables |= hwr->cp_p5 ?
7895 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7896 } else {
7897 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7898 enables |= hwr->grp ?
7899 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7900 }
7901 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7902 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7903
7904 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7905 req->num_tx_rings = cpu_to_le16(hwr->tx);
7906 req->num_rx_rings = cpu_to_le16(hwr->rx);
7907 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7908 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7909 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7910 } else {
7911 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7912 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7913 }
7914 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7915 req->num_vnics = cpu_to_le16(hwr->vnic);
7916
7917 req->enables = cpu_to_le32(enables);
7918 return req;
7919 }
7920
7921 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7922 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7923 {
7924 struct hwrm_func_cfg_input *req;
7925 int rc;
7926
7927 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7928 if (!req)
7929 return -ENOMEM;
7930
7931 if (!req->enables) {
7932 hwrm_req_drop(bp, req);
7933 return 0;
7934 }
7935
7936 rc = hwrm_req_send(bp, req);
7937 if (rc)
7938 return rc;
7939
7940 if (bp->hwrm_spec_code < 0x10601)
7941 bp->hw_resc.resv_tx_rings = hwr->tx;
7942
7943 return bnxt_hwrm_get_rings(bp);
7944 }
7945
7946 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7947 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7948 {
7949 struct hwrm_func_vf_cfg_input *req;
7950 int rc;
7951
7952 if (!BNXT_NEW_RM(bp)) {
7953 bp->hw_resc.resv_tx_rings = hwr->tx;
7954 return 0;
7955 }
7956
7957 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7958 if (!req)
7959 return -ENOMEM;
7960
7961 rc = hwrm_req_send(bp, req);
7962 if (rc)
7963 return rc;
7964
7965 return bnxt_hwrm_get_rings(bp);
7966 }
7967
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7968 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7969 {
7970 if (BNXT_PF(bp))
7971 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7972 else
7973 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7974 }
7975
bnxt_nq_rings_in_use(struct bnxt * bp)7976 int bnxt_nq_rings_in_use(struct bnxt *bp)
7977 {
7978 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7979 }
7980
bnxt_cp_rings_in_use(struct bnxt * bp)7981 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7982 {
7983 int cp;
7984
7985 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7986 return bnxt_nq_rings_in_use(bp);
7987
7988 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7989 return cp;
7990 }
7991
bnxt_get_func_stat_ctxs(struct bnxt * bp)7992 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7993 {
7994 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7995 }
7996
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7997 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7998 {
7999 if (!hwr->grp)
8000 return 0;
8001 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8002 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
8003
8004 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
8005 rss_ctx *= hwr->vnic;
8006 return rss_ctx;
8007 }
8008 if (BNXT_VF(bp))
8009 return BNXT_VF_MAX_RSS_CTX;
8010 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
8011 return hwr->grp + 1;
8012 return 1;
8013 }
8014
8015 /* Check if a default RSS map needs to be setup. This function is only
8016 * used on older firmware that does not require reserving RX rings.
8017 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)8018 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
8019 {
8020 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8021
8022 /* The RSS map is valid for RX rings set to resv_rx_rings */
8023 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
8024 hw_resc->resv_rx_rings = bp->rx_nr_rings;
8025 if (!netif_is_rxfh_configured(bp->dev))
8026 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8027 }
8028 }
8029
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)8030 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
8031 {
8032 if (bp->flags & BNXT_FLAG_RFS) {
8033 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
8034 return 2 + bp->num_rss_ctx;
8035 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8036 return rx_rings + 1;
8037 }
8038 return 1;
8039 }
8040
bnxt_get_total_resources(struct bnxt * bp,struct bnxt_hw_rings * hwr)8041 static void bnxt_get_total_resources(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8042 {
8043 hwr->cp = bnxt_nq_rings_in_use(bp);
8044 hwr->cp_p5 = 0;
8045 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8046 hwr->cp_p5 = bnxt_cp_rings_in_use(bp);
8047 hwr->tx = bp->tx_nr_rings;
8048 hwr->rx = bp->rx_nr_rings;
8049 hwr->grp = hwr->rx;
8050 hwr->vnic = bnxt_get_total_vnics(bp, hwr->rx);
8051 hwr->rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr);
8052 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8053 hwr->rx <<= 1;
8054 hwr->stat = bnxt_get_func_stat_ctxs(bp);
8055 }
8056
bnxt_need_reserve_rings(struct bnxt * bp)8057 static bool bnxt_need_reserve_rings(struct bnxt *bp)
8058 {
8059 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8060 struct bnxt_hw_rings hwr;
8061
8062 bnxt_get_total_resources(bp, &hwr);
8063
8064 /* Old firmware does not need RX ring reservations but we still
8065 * need to setup a default RSS map when needed. With new firmware
8066 * we go through RX ring reservations first and then set up the
8067 * RSS map for the successfully reserved RX rings when needed.
8068 */
8069 if (!BNXT_NEW_RM(bp))
8070 bnxt_check_rss_tbl_no_rmgr(bp);
8071
8072 if (hw_resc->resv_tx_rings != hwr.tx && bp->hwrm_spec_code >= 0x10601)
8073 return true;
8074
8075 if (!BNXT_NEW_RM(bp))
8076 return false;
8077
8078 if (hw_resc->resv_rx_rings != hwr.rx ||
8079 hw_resc->resv_vnics != hwr.vnic ||
8080 hw_resc->resv_stat_ctxs != hwr.stat ||
8081 hw_resc->resv_rsscos_ctxs != hwr.rss_ctx ||
8082 (hw_resc->resv_hw_ring_grps != hwr.grp &&
8083 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
8084 return true;
8085 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8086 if (hw_resc->resv_cp_rings != hwr.cp_p5)
8087 return true;
8088 } else if (hw_resc->resv_cp_rings != hwr.cp) {
8089 return true;
8090 }
8091 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
8092 hw_resc->resv_irqs != hwr.cp)
8093 return true;
8094 return false;
8095 }
8096
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8097 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8098 {
8099 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8100
8101 hwr->tx = hw_resc->resv_tx_rings;
8102 if (BNXT_NEW_RM(bp)) {
8103 hwr->rx = hw_resc->resv_rx_rings;
8104 hwr->cp = hw_resc->resv_irqs;
8105 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8106 hwr->cp_p5 = hw_resc->resv_cp_rings;
8107 hwr->grp = hw_resc->resv_hw_ring_grps;
8108 hwr->vnic = hw_resc->resv_vnics;
8109 hwr->stat = hw_resc->resv_stat_ctxs;
8110 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
8111 }
8112 }
8113
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)8114 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8115 {
8116 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
8117 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
8118 }
8119
8120 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
8121
__bnxt_reserve_rings(struct bnxt * bp)8122 static int __bnxt_reserve_rings(struct bnxt *bp)
8123 {
8124 struct bnxt_en_dev *edev = bp->edev[BNXT_AUXDEV_RDMA];
8125 struct bnxt_hw_rings hwr = {0};
8126 int rx_rings, old_rx_rings, rc;
8127 int cp = bp->cp_nr_rings;
8128 int ulp_msix = 0;
8129 bool sh = false;
8130 int tx_cp;
8131
8132 if (!bnxt_need_reserve_rings(bp))
8133 return 0;
8134
8135 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(edev)) {
8136 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
8137 if (!ulp_msix)
8138 bnxt_set_ulp_stat_ctxs(bp, 0);
8139 else
8140 bnxt_set_dflt_ulp_stat_ctxs(bp);
8141
8142 if (ulp_msix > bp->ulp_num_msix_want)
8143 ulp_msix = bp->ulp_num_msix_want;
8144 hwr.cp = cp + ulp_msix;
8145 } else {
8146 hwr.cp = bnxt_nq_rings_in_use(bp);
8147 }
8148
8149 hwr.tx = bp->tx_nr_rings;
8150 hwr.rx = bp->rx_nr_rings;
8151 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8152 sh = true;
8153 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8154 hwr.cp_p5 = hwr.rx + hwr.tx;
8155
8156 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
8157
8158 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8159 hwr.rx <<= 1;
8160 hwr.grp = bp->rx_nr_rings;
8161 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8162 hwr.stat = bnxt_get_func_stat_ctxs(bp);
8163 old_rx_rings = bp->hw_resc.resv_rx_rings;
8164
8165 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8166 if (rc)
8167 return rc;
8168
8169 bnxt_copy_reserved_rings(bp, &hwr);
8170
8171 rx_rings = hwr.rx;
8172 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8173 if (hwr.rx >= 2) {
8174 rx_rings = hwr.rx >> 1;
8175 } else {
8176 if (netif_running(bp->dev))
8177 return -ENOMEM;
8178
8179 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8180 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8181 bp->dev->hw_features &= ~NETIF_F_LRO;
8182 bp->dev->features &= ~NETIF_F_LRO;
8183 bnxt_set_ring_params(bp);
8184 }
8185 }
8186 rx_rings = min_t(int, rx_rings, hwr.grp);
8187 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8188 if (bnxt_ulp_registered(edev) && hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8189 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8190 hwr.cp = min_t(int, hwr.cp, hwr.stat);
8191 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8192 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8193 hwr.rx = rx_rings << 1;
8194 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8195 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8196 if (hwr.tx != bp->tx_nr_rings) {
8197 netdev_warn(bp->dev,
8198 "Able to reserve only %d out of %d requested TX rings\n",
8199 hwr.tx, bp->tx_nr_rings);
8200 }
8201 bp->tx_nr_rings = hwr.tx;
8202
8203 /* If we cannot reserve all the RX rings, reset the RSS map only
8204 * if absolutely necessary
8205 */
8206 if (rx_rings != bp->rx_nr_rings) {
8207 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8208 rx_rings, bp->rx_nr_rings);
8209 if (netif_is_rxfh_configured(bp->dev) &&
8210 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8211 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8212 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8213 ethtool_rxfh_indir_lost(bp->dev);
8214 }
8215 }
8216 bp->rx_nr_rings = rx_rings;
8217 bp->cp_nr_rings = hwr.cp;
8218
8219 /* Fall back if we cannot reserve enough HW RSS contexts */
8220 if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
8221 hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
8222 bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
8223
8224 if (!bnxt_rings_ok(bp, &hwr))
8225 return -ENOMEM;
8226
8227 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8228 !netif_is_rxfh_configured(bp->dev))
8229 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8230
8231 if (!bnxt_ulp_registered(edev) && BNXT_NEW_RM(bp)) {
8232 int resv_msix, resv_ctx, ulp_ctxs;
8233 struct bnxt_hw_resc *hw_resc;
8234
8235 hw_resc = &bp->hw_resc;
8236 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8237 ulp_msix = min_t(int, resv_msix, ulp_msix);
8238 bnxt_set_ulp_msix_num(bp, ulp_msix);
8239 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
8240 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8241 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8242 }
8243
8244 return rc;
8245 }
8246
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8247 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8248 {
8249 struct hwrm_func_vf_cfg_input *req;
8250 u32 flags;
8251
8252 if (!BNXT_NEW_RM(bp))
8253 return 0;
8254
8255 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8256 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8257 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8258 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8259 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8260 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8261 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8262 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8263 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8264
8265 req->flags = cpu_to_le32(flags);
8266 return hwrm_req_send_silent(bp, req);
8267 }
8268
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8269 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8270 {
8271 struct hwrm_func_cfg_input *req;
8272 u32 flags;
8273
8274 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8275 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8276 if (BNXT_NEW_RM(bp)) {
8277 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8278 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8279 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8280 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8281 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8282 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8283 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8284 else
8285 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8286 }
8287
8288 req->flags = cpu_to_le32(flags);
8289 return hwrm_req_send_silent(bp, req);
8290 }
8291
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8292 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8293 {
8294 if (bp->hwrm_spec_code < 0x10801)
8295 return 0;
8296
8297 if (BNXT_PF(bp))
8298 return bnxt_hwrm_check_pf_rings(bp, hwr);
8299
8300 return bnxt_hwrm_check_vf_rings(bp, hwr);
8301 }
8302
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8303 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8304 {
8305 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8306 struct hwrm_ring_aggint_qcaps_output *resp;
8307 struct hwrm_ring_aggint_qcaps_input *req;
8308 int rc;
8309
8310 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8311 coal_cap->num_cmpl_dma_aggr_max = 63;
8312 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8313 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8314 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8315 coal_cap->int_lat_tmr_min_max = 65535;
8316 coal_cap->int_lat_tmr_max_max = 65535;
8317 coal_cap->num_cmpl_aggr_int_max = 65535;
8318 coal_cap->timer_units = 80;
8319
8320 if (bp->hwrm_spec_code < 0x10902)
8321 return;
8322
8323 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8324 return;
8325
8326 resp = hwrm_req_hold(bp, req);
8327 rc = hwrm_req_send_silent(bp, req);
8328 if (!rc) {
8329 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8330 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8331 coal_cap->num_cmpl_dma_aggr_max =
8332 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8333 coal_cap->num_cmpl_dma_aggr_during_int_max =
8334 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8335 coal_cap->cmpl_aggr_dma_tmr_max =
8336 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8337 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8338 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8339 coal_cap->int_lat_tmr_min_max =
8340 le16_to_cpu(resp->int_lat_tmr_min_max);
8341 coal_cap->int_lat_tmr_max_max =
8342 le16_to_cpu(resp->int_lat_tmr_max_max);
8343 coal_cap->num_cmpl_aggr_int_max =
8344 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8345 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8346 }
8347 hwrm_req_drop(bp, req);
8348 }
8349
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8350 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8351 {
8352 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8353
8354 return usec * 1000 / coal_cap->timer_units;
8355 }
8356
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8357 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8358 struct bnxt_coal *hw_coal,
8359 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8360 {
8361 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8362 u16 val, tmr, max, flags = hw_coal->flags;
8363 u32 cmpl_params = coal_cap->cmpl_params;
8364
8365 max = hw_coal->bufs_per_record * 128;
8366 if (hw_coal->budget)
8367 max = hw_coal->bufs_per_record * hw_coal->budget;
8368 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8369
8370 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8371 req->num_cmpl_aggr_int = cpu_to_le16(val);
8372
8373 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8374 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8375
8376 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8377 coal_cap->num_cmpl_dma_aggr_during_int_max);
8378 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8379
8380 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8381 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8382 req->int_lat_tmr_max = cpu_to_le16(tmr);
8383
8384 /* min timer set to 1/2 of interrupt timer */
8385 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8386 val = tmr / 2;
8387 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8388 req->int_lat_tmr_min = cpu_to_le16(val);
8389 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8390 }
8391
8392 /* buf timer set to 1/4 of interrupt timer */
8393 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8394 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8395
8396 if (cmpl_params &
8397 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8398 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8399 val = clamp_t(u16, tmr, 1,
8400 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8401 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8402 req->enables |=
8403 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8404 }
8405
8406 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8407 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8408 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8409 req->flags = cpu_to_le16(flags);
8410 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8411 }
8412
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8413 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8414 struct bnxt_coal *hw_coal)
8415 {
8416 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8417 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8418 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8419 u32 nq_params = coal_cap->nq_params;
8420 u16 tmr;
8421 int rc;
8422
8423 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8424 return 0;
8425
8426 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8427 if (rc)
8428 return rc;
8429
8430 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8431 req->flags =
8432 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8433
8434 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8435 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8436 req->int_lat_tmr_min = cpu_to_le16(tmr);
8437 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8438 return hwrm_req_send(bp, req);
8439 }
8440
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8441 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8442 {
8443 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8444 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8445 struct bnxt_coal coal;
8446 int rc;
8447
8448 /* Tick values in micro seconds.
8449 * 1 coal_buf x bufs_per_record = 1 completion record.
8450 */
8451 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8452
8453 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8454 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8455
8456 if (!bnapi->rx_ring)
8457 return -ENODEV;
8458
8459 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8460 if (rc)
8461 return rc;
8462
8463 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8464
8465 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8466
8467 return hwrm_req_send(bp, req_rx);
8468 }
8469
8470 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8471 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8472 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8473 {
8474 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8475
8476 req->ring_id = cpu_to_le16(ring_id);
8477 return hwrm_req_send(bp, req);
8478 }
8479
8480 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8481 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8482 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8483 {
8484 struct bnxt_tx_ring_info *txr;
8485 int i, rc;
8486
8487 bnxt_for_each_napi_tx(i, bnapi, txr) {
8488 u16 ring_id;
8489
8490 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8491 req->ring_id = cpu_to_le16(ring_id);
8492 rc = hwrm_req_send(bp, req);
8493 if (rc)
8494 return rc;
8495 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8496 return 0;
8497 }
8498 return 0;
8499 }
8500
bnxt_hwrm_set_coal(struct bnxt * bp)8501 int bnxt_hwrm_set_coal(struct bnxt *bp)
8502 {
8503 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8504 int i, rc;
8505
8506 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8507 if (rc)
8508 return rc;
8509
8510 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8511 if (rc) {
8512 hwrm_req_drop(bp, req_rx);
8513 return rc;
8514 }
8515
8516 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8517 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8518
8519 hwrm_req_hold(bp, req_rx);
8520 hwrm_req_hold(bp, req_tx);
8521 for (i = 0; i < bp->cp_nr_rings; i++) {
8522 struct bnxt_napi *bnapi = bp->bnapi[i];
8523 struct bnxt_coal *hw_coal;
8524
8525 if (!bnapi->rx_ring)
8526 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8527 else
8528 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8529 if (rc)
8530 break;
8531
8532 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8533 continue;
8534
8535 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8536 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8537 if (rc)
8538 break;
8539 }
8540 if (bnapi->rx_ring)
8541 hw_coal = &bp->rx_coal;
8542 else
8543 hw_coal = &bp->tx_coal;
8544 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8545 }
8546 hwrm_req_drop(bp, req_rx);
8547 hwrm_req_drop(bp, req_tx);
8548 return rc;
8549 }
8550
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8551 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8552 {
8553 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8554 struct hwrm_stat_ctx_free_input *req;
8555 int i;
8556
8557 if (!bp->bnapi)
8558 return;
8559
8560 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8561 return;
8562
8563 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8564 return;
8565 if (BNXT_FW_MAJ(bp) <= 20) {
8566 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8567 hwrm_req_drop(bp, req);
8568 return;
8569 }
8570 hwrm_req_hold(bp, req0);
8571 }
8572 hwrm_req_hold(bp, req);
8573 for (i = 0; i < bp->cp_nr_rings; i++) {
8574 struct bnxt_napi *bnapi = bp->bnapi[i];
8575 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8576
8577 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8578 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8579 if (req0) {
8580 req0->stat_ctx_id = req->stat_ctx_id;
8581 hwrm_req_send(bp, req0);
8582 }
8583 hwrm_req_send(bp, req);
8584
8585 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8586 }
8587 }
8588 hwrm_req_drop(bp, req);
8589 if (req0)
8590 hwrm_req_drop(bp, req0);
8591 }
8592
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8593 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8594 {
8595 struct hwrm_stat_ctx_alloc_output *resp;
8596 struct hwrm_stat_ctx_alloc_input *req;
8597 int rc, i;
8598
8599 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8600 return 0;
8601
8602 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8603 if (rc)
8604 return rc;
8605
8606 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8607 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8608
8609 resp = hwrm_req_hold(bp, req);
8610 for (i = 0; i < bp->cp_nr_rings; i++) {
8611 struct bnxt_napi *bnapi = bp->bnapi[i];
8612 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8613
8614 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8615
8616 rc = hwrm_req_send(bp, req);
8617 if (rc)
8618 break;
8619
8620 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8621
8622 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8623 }
8624 hwrm_req_drop(bp, req);
8625 return rc;
8626 }
8627
bnxt_hwrm_func_qcfg(struct bnxt * bp)8628 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8629 {
8630 struct hwrm_func_qcfg_output *resp;
8631 struct hwrm_func_qcfg_input *req;
8632 u16 flags;
8633 int rc;
8634
8635 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8636 if (rc)
8637 return rc;
8638
8639 req->fid = cpu_to_le16(0xffff);
8640 resp = hwrm_req_hold(bp, req);
8641 rc = hwrm_req_send(bp, req);
8642 if (rc)
8643 goto func_qcfg_exit;
8644
8645 flags = le16_to_cpu(resp->flags);
8646 #ifdef CONFIG_BNXT_SRIOV
8647 if (BNXT_VF(bp)) {
8648 struct bnxt_vf_info *vf = &bp->vf;
8649
8650 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8651 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8652 vf->flags |= BNXT_VF_TRUST;
8653 else
8654 vf->flags &= ~BNXT_VF_TRUST;
8655 } else {
8656 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8657 }
8658 #endif
8659 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8660 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8661 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8662 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8663 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8664 }
8665 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8666 bp->flags |= BNXT_FLAG_MULTI_HOST;
8667
8668 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8669 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8670
8671 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8672 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8673 if (resp->roce_bidi_opt_mode &
8674 FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
8675 bp->cos0_cos1_shared = 1;
8676 else
8677 bp->cos0_cos1_shared = 0;
8678
8679 switch (resp->port_partition_type) {
8680 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8681 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8682 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8683 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8684 bp->port_partition_type = resp->port_partition_type;
8685 break;
8686 }
8687 if (bp->hwrm_spec_code < 0x10707 ||
8688 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8689 bp->br_mode = BRIDGE_MODE_VEB;
8690 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8691 bp->br_mode = BRIDGE_MODE_VEPA;
8692 else
8693 bp->br_mode = BRIDGE_MODE_UNDEF;
8694
8695 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8696 if (!bp->max_mtu)
8697 bp->max_mtu = BNXT_MAX_MTU;
8698
8699 if (bp->db_size)
8700 goto func_qcfg_exit;
8701
8702 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8703 if (BNXT_CHIP_P5(bp)) {
8704 if (BNXT_PF(bp))
8705 bp->db_offset = DB_PF_OFFSET_P5;
8706 else
8707 bp->db_offset = DB_VF_OFFSET_P5;
8708 }
8709 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8710 1024);
8711 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8712 bp->db_size <= bp->db_offset)
8713 bp->db_size = pci_resource_len(bp->pdev, 2);
8714
8715 func_qcfg_exit:
8716 hwrm_req_drop(bp, req);
8717 return rc;
8718 }
8719
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8720 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8721 u8 init_val, u8 init_offset,
8722 bool init_mask_set)
8723 {
8724 ctxm->init_value = init_val;
8725 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8726 if (init_mask_set)
8727 ctxm->init_offset = init_offset * 4;
8728 else
8729 ctxm->init_value = 0;
8730 }
8731
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8732 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8733 {
8734 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8735 u16 type;
8736
8737 for (type = 0; type < ctx_max; type++) {
8738 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8739 int n = 1;
8740
8741 if (!ctxm->max_entries || ctxm->pg_info)
8742 continue;
8743
8744 if (ctxm->instance_bmap)
8745 n = hweight32(ctxm->instance_bmap);
8746 ctxm->pg_info = kzalloc_objs(*ctxm->pg_info, n);
8747 if (!ctxm->pg_info)
8748 return -ENOMEM;
8749 }
8750 return 0;
8751 }
8752
8753 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8754 struct bnxt_ctx_mem_type *ctxm, bool force);
8755
8756 #define BNXT_CTX_INIT_VALID(flags) \
8757 (!!((flags) & \
8758 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8759
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8760 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8761 {
8762 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8763 struct hwrm_func_backing_store_qcaps_v2_input *req;
8764 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8765 u16 type, next_type = 0;
8766 int rc;
8767
8768 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8769 if (rc)
8770 return rc;
8771
8772 if (!ctx) {
8773 ctx = kzalloc_obj(*ctx);
8774 if (!ctx)
8775 return -ENOMEM;
8776 bp->ctx = ctx;
8777 }
8778
8779 resp = hwrm_req_hold(bp, req);
8780
8781 for (type = 0; type < BNXT_CTX_V2_MAX; type = next_type) {
8782 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8783 u8 init_val, init_off, i;
8784 u32 max_entries;
8785 u16 entry_size;
8786 __le32 *p;
8787 u32 flags;
8788
8789 req->type = cpu_to_le16(type);
8790 rc = hwrm_req_send(bp, req);
8791 if (rc)
8792 goto ctx_done;
8793 flags = le32_to_cpu(resp->flags);
8794 next_type = le16_to_cpu(resp->next_valid_type);
8795 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8796 bnxt_free_one_ctx_mem(bp, ctxm, true);
8797 continue;
8798 }
8799 entry_size = le16_to_cpu(resp->entry_size);
8800 max_entries = le32_to_cpu(resp->max_num_entries);
8801 if (ctxm->mem_valid) {
8802 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8803 ctxm->entry_size != entry_size ||
8804 ctxm->max_entries != max_entries)
8805 bnxt_free_one_ctx_mem(bp, ctxm, true);
8806 else
8807 continue;
8808 }
8809 ctxm->type = type;
8810 ctxm->entry_size = entry_size;
8811 ctxm->flags = flags;
8812 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8813 ctxm->entry_multiple = resp->entry_multiple;
8814 ctxm->max_entries = max_entries;
8815 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8816 init_val = resp->ctx_init_value;
8817 init_off = resp->ctx_init_offset;
8818 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8819 BNXT_CTX_INIT_VALID(flags));
8820 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8821 BNXT_MAX_SPLIT_ENTRY);
8822 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8823 i++, p++)
8824 ctxm->split[i] = le32_to_cpu(*p);
8825 }
8826 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8827
8828 ctx_done:
8829 hwrm_req_drop(bp, req);
8830 return rc;
8831 }
8832
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8833 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8834 {
8835 struct hwrm_func_backing_store_qcaps_output *resp;
8836 struct hwrm_func_backing_store_qcaps_input *req;
8837 int rc;
8838
8839 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8840 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8841 return 0;
8842
8843 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8844 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8845
8846 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8847 if (rc)
8848 return rc;
8849
8850 resp = hwrm_req_hold(bp, req);
8851 rc = hwrm_req_send_silent(bp, req);
8852 if (!rc) {
8853 struct bnxt_ctx_mem_type *ctxm;
8854 struct bnxt_ctx_mem_info *ctx;
8855 u8 init_val, init_idx = 0;
8856 u16 init_mask;
8857
8858 ctx = bp->ctx;
8859 if (!ctx) {
8860 ctx = kzalloc_obj(*ctx);
8861 if (!ctx) {
8862 rc = -ENOMEM;
8863 goto ctx_err;
8864 }
8865 bp->ctx = ctx;
8866 }
8867 init_val = resp->ctx_kind_initializer;
8868 init_mask = le16_to_cpu(resp->ctx_init_mask);
8869
8870 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8871 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8872 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8873 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8874 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8875 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8876 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8877 (init_mask & (1 << init_idx++)) != 0);
8878
8879 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8880 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8881 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8882 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8883 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8884 (init_mask & (1 << init_idx++)) != 0);
8885
8886 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8887 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8888 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8889 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8890 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8891 (init_mask & (1 << init_idx++)) != 0);
8892
8893 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8894 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8895 ctxm->max_entries = ctxm->vnic_entries +
8896 le16_to_cpu(resp->vnic_max_ring_table_entries);
8897 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8898 bnxt_init_ctx_initializer(ctxm, init_val,
8899 resp->vnic_init_offset,
8900 (init_mask & (1 << init_idx++)) != 0);
8901
8902 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8903 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8904 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8905 bnxt_init_ctx_initializer(ctxm, init_val,
8906 resp->stat_init_offset,
8907 (init_mask & (1 << init_idx++)) != 0);
8908
8909 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8910 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8911 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8912 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8913 ctxm->entry_multiple = resp->tqm_entries_multiple;
8914 if (!ctxm->entry_multiple)
8915 ctxm->entry_multiple = 1;
8916
8917 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8918
8919 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8920 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8921 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8922 ctxm->mrav_num_entries_units =
8923 le16_to_cpu(resp->mrav_num_entries_units);
8924 bnxt_init_ctx_initializer(ctxm, init_val,
8925 resp->mrav_init_offset,
8926 (init_mask & (1 << init_idx++)) != 0);
8927
8928 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8929 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8930 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8931
8932 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8933 if (!ctx->tqm_fp_rings_count)
8934 ctx->tqm_fp_rings_count = bp->max_q;
8935 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8936 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8937
8938 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8939 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8940 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8941
8942 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8943 } else {
8944 rc = 0;
8945 }
8946 ctx_err:
8947 hwrm_req_drop(bp, req);
8948 return rc;
8949 }
8950
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8951 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8952 __le64 *pg_dir)
8953 {
8954 if (!rmem->nr_pages)
8955 return;
8956
8957 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8958 if (rmem->depth >= 1) {
8959 if (rmem->depth == 2)
8960 *pg_attr |= 2;
8961 else
8962 *pg_attr |= 1;
8963 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8964 } else {
8965 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8966 }
8967 }
8968
8969 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8970 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8971 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8972 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8973 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8974 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8975
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8976 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8977 {
8978 struct hwrm_func_backing_store_cfg_input *req;
8979 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8980 struct bnxt_ctx_pg_info *ctx_pg;
8981 struct bnxt_ctx_mem_type *ctxm;
8982 void **__req = (void **)&req;
8983 u32 req_len = sizeof(*req);
8984 __le32 *num_entries;
8985 __le64 *pg_dir;
8986 u32 flags = 0;
8987 u8 *pg_attr;
8988 u32 ena;
8989 int rc;
8990 int i;
8991
8992 if (!ctx)
8993 return 0;
8994
8995 if (req_len > bp->hwrm_max_ext_req_len)
8996 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8997 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8998 if (rc)
8999 return rc;
9000
9001 req->enables = cpu_to_le32(enables);
9002 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
9003 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9004 ctx_pg = ctxm->pg_info;
9005 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
9006 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
9007 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
9008 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
9009 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9010 &req->qpc_pg_size_qpc_lvl,
9011 &req->qpc_page_dir);
9012
9013 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
9014 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
9015 }
9016 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
9017 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9018 ctx_pg = ctxm->pg_info;
9019 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
9020 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
9021 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
9022 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9023 &req->srq_pg_size_srq_lvl,
9024 &req->srq_page_dir);
9025 }
9026 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
9027 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9028 ctx_pg = ctxm->pg_info;
9029 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
9030 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
9031 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
9032 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9033 &req->cq_pg_size_cq_lvl,
9034 &req->cq_page_dir);
9035 }
9036 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
9037 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9038 ctx_pg = ctxm->pg_info;
9039 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
9040 req->vnic_num_ring_table_entries =
9041 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
9042 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
9043 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9044 &req->vnic_pg_size_vnic_lvl,
9045 &req->vnic_page_dir);
9046 }
9047 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
9048 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9049 ctx_pg = ctxm->pg_info;
9050 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
9051 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
9052 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9053 &req->stat_pg_size_stat_lvl,
9054 &req->stat_page_dir);
9055 }
9056 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
9057 u32 units;
9058
9059 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9060 ctx_pg = ctxm->pg_info;
9061 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
9062 units = ctxm->mrav_num_entries_units;
9063 if (units) {
9064 u32 num_mr, num_ah = ctxm->mrav_av_entries;
9065 u32 entries;
9066
9067 num_mr = ctx_pg->entries - num_ah;
9068 entries = ((num_mr / units) << 16) | (num_ah / units);
9069 req->mrav_num_entries = cpu_to_le32(entries);
9070 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
9071 }
9072 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
9073 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9074 &req->mrav_pg_size_mrav_lvl,
9075 &req->mrav_page_dir);
9076 }
9077 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
9078 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9079 ctx_pg = ctxm->pg_info;
9080 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
9081 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
9082 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9083 &req->tim_pg_size_tim_lvl,
9084 &req->tim_page_dir);
9085 }
9086 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9087 for (i = 0, num_entries = &req->tqm_sp_num_entries,
9088 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
9089 pg_dir = &req->tqm_sp_page_dir,
9090 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
9091 ctx_pg = ctxm->pg_info;
9092 i < BNXT_MAX_TQM_RINGS;
9093 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
9094 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
9095 if (!(enables & ena))
9096 continue;
9097
9098 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
9099 *num_entries = cpu_to_le32(ctx_pg->entries);
9100 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
9101 }
9102 req->flags = cpu_to_le32(flags);
9103 return hwrm_req_send(bp, req);
9104 }
9105
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9106 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
9107 struct bnxt_ctx_pg_info *ctx_pg)
9108 {
9109 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9110
9111 rmem->page_size = BNXT_PAGE_SIZE;
9112 rmem->pg_arr = ctx_pg->ctx_pg_arr;
9113 rmem->dma_arr = ctx_pg->ctx_dma_arr;
9114 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
9115 if (rmem->depth >= 1)
9116 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
9117 return bnxt_alloc_ring(bp, rmem);
9118 }
9119
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)9120 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
9121 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
9122 u8 depth, struct bnxt_ctx_mem_type *ctxm)
9123 {
9124 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9125 int rc;
9126
9127 if (!mem_size)
9128 return -EINVAL;
9129
9130 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9131 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
9132 ctx_pg->nr_pages = 0;
9133 return -EINVAL;
9134 }
9135 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
9136 int nr_tbls, i;
9137
9138 rmem->depth = 2;
9139 ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES);
9140 if (!ctx_pg->ctx_pg_tbl)
9141 return -ENOMEM;
9142 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
9143 rmem->nr_pages = nr_tbls;
9144 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9145 if (rc)
9146 return rc;
9147 for (i = 0; i < nr_tbls; i++) {
9148 struct bnxt_ctx_pg_info *pg_tbl;
9149
9150 pg_tbl = kzalloc_obj(*pg_tbl);
9151 if (!pg_tbl)
9152 return -ENOMEM;
9153 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
9154 rmem = &pg_tbl->ring_mem;
9155 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
9156 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
9157 rmem->depth = 1;
9158 rmem->nr_pages = MAX_CTX_PAGES;
9159 rmem->ctx_mem = ctxm;
9160 if (i == (nr_tbls - 1)) {
9161 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
9162
9163 if (rem)
9164 rmem->nr_pages = rem;
9165 }
9166 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
9167 if (rc)
9168 break;
9169 }
9170 } else {
9171 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9172 if (rmem->nr_pages > 1 || depth)
9173 rmem->depth = 1;
9174 rmem->ctx_mem = ctxm;
9175 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9176 }
9177 return rc;
9178 }
9179
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)9180 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9181 struct bnxt_ctx_pg_info *ctx_pg,
9182 void *buf, size_t offset, size_t head,
9183 size_t tail)
9184 {
9185 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9186 size_t nr_pages = ctx_pg->nr_pages;
9187 int page_size = rmem->page_size;
9188 size_t len = 0, total_len = 0;
9189 u16 depth = rmem->depth;
9190
9191 tail %= nr_pages * page_size;
9192 do {
9193 if (depth > 1) {
9194 int i = head / (page_size * MAX_CTX_PAGES);
9195 struct bnxt_ctx_pg_info *pg_tbl;
9196
9197 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9198 rmem = &pg_tbl->ring_mem;
9199 }
9200 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9201 head += len;
9202 offset += len;
9203 total_len += len;
9204 if (head >= nr_pages * page_size)
9205 head = 0;
9206 } while (head != tail);
9207 return total_len;
9208 }
9209
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9210 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9211 struct bnxt_ctx_pg_info *ctx_pg)
9212 {
9213 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9214
9215 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9216 ctx_pg->ctx_pg_tbl) {
9217 int i, nr_tbls = rmem->nr_pages;
9218
9219 for (i = 0; i < nr_tbls; i++) {
9220 struct bnxt_ctx_pg_info *pg_tbl;
9221 struct bnxt_ring_mem_info *rmem2;
9222
9223 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9224 if (!pg_tbl)
9225 continue;
9226 rmem2 = &pg_tbl->ring_mem;
9227 bnxt_free_ring(bp, rmem2);
9228 ctx_pg->ctx_pg_arr[i] = NULL;
9229 kfree(pg_tbl);
9230 ctx_pg->ctx_pg_tbl[i] = NULL;
9231 }
9232 kfree(ctx_pg->ctx_pg_tbl);
9233 ctx_pg->ctx_pg_tbl = NULL;
9234 }
9235 bnxt_free_ring(bp, rmem);
9236 ctx_pg->nr_pages = 0;
9237 }
9238
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)9239 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9240 struct bnxt_ctx_mem_type *ctxm, u32 entries,
9241 u8 pg_lvl)
9242 {
9243 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9244 int i, rc = 0, n = 1;
9245 u32 mem_size;
9246
9247 if (!ctxm->entry_size || !ctx_pg)
9248 return -EINVAL;
9249 if (ctxm->instance_bmap)
9250 n = hweight32(ctxm->instance_bmap);
9251 if (ctxm->entry_multiple)
9252 entries = roundup(entries, ctxm->entry_multiple);
9253 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9254 mem_size = entries * ctxm->entry_size;
9255 for (i = 0; i < n && !rc; i++) {
9256 ctx_pg[i].entries = entries;
9257 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9258 ctxm->init_value ? ctxm : NULL);
9259 }
9260 if (!rc)
9261 ctxm->mem_valid = 1;
9262 return rc;
9263 }
9264
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)9265 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9266 struct bnxt_ctx_mem_type *ctxm,
9267 bool last)
9268 {
9269 struct hwrm_func_backing_store_cfg_v2_input *req;
9270 u32 instance_bmap = ctxm->instance_bmap;
9271 int i, j, rc = 0, n = 1;
9272 __le32 *p;
9273
9274 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9275 return 0;
9276
9277 if (instance_bmap)
9278 n = hweight32(ctxm->instance_bmap);
9279 else
9280 instance_bmap = 1;
9281
9282 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9283 if (rc)
9284 return rc;
9285 hwrm_req_hold(bp, req);
9286 req->type = cpu_to_le16(ctxm->type);
9287 req->entry_size = cpu_to_le16(ctxm->entry_size);
9288 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9289 bnxt_bs_trace_avail(bp, ctxm->type)) {
9290 struct bnxt_bs_trace_info *bs_trace;
9291 u32 enables;
9292
9293 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9294 req->enables = cpu_to_le32(enables);
9295 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9296 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9297 }
9298 req->subtype_valid_cnt = ctxm->split_entry_cnt;
9299 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9300 p[i] = cpu_to_le32(ctxm->split[i]);
9301 for (i = 0, j = 0; j < n && !rc; i++) {
9302 struct bnxt_ctx_pg_info *ctx_pg;
9303
9304 if (!(instance_bmap & (1 << i)))
9305 continue;
9306 req->instance = cpu_to_le16(i);
9307 ctx_pg = &ctxm->pg_info[j++];
9308 if (!ctx_pg->entries)
9309 continue;
9310 req->num_entries = cpu_to_le32(ctx_pg->entries);
9311 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9312 &req->page_size_pbl_level,
9313 &req->page_dir);
9314 if (last && j == n)
9315 req->flags =
9316 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9317 rc = hwrm_req_send(bp, req);
9318 }
9319 hwrm_req_drop(bp, req);
9320 return rc;
9321 }
9322
bnxt_backing_store_cfg_v2(struct bnxt * bp)9323 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9324 {
9325 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9326 struct bnxt_ctx_mem_type *ctxm;
9327 u16 last_type = BNXT_CTX_INV;
9328 int rc = 0;
9329 u16 type;
9330
9331 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9332 ctxm = &ctx->ctx_arr[type];
9333 if (!bnxt_bs_trace_avail(bp, type))
9334 continue;
9335 if (!ctxm->mem_valid) {
9336 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9337 ctxm->max_entries, 1);
9338 if (rc) {
9339 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9340 type);
9341 continue;
9342 }
9343 bnxt_bs_trace_init(bp, ctxm);
9344 }
9345 last_type = type;
9346 }
9347
9348 if (last_type == BNXT_CTX_INV) {
9349 for (type = 0; type < BNXT_CTX_MAX; type++) {
9350 ctxm = &ctx->ctx_arr[type];
9351 if (ctxm->mem_valid)
9352 last_type = type;
9353 }
9354 if (last_type == BNXT_CTX_INV)
9355 return 0;
9356 }
9357 ctx->ctx_arr[last_type].last = 1;
9358
9359 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9360 ctxm = &ctx->ctx_arr[type];
9361
9362 if (!ctxm->mem_valid)
9363 continue;
9364 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9365 if (rc)
9366 return rc;
9367 }
9368 return 0;
9369 }
9370
9371 /**
9372 * __bnxt_copy_ctx_mem - copy host context memory
9373 * @bp: The driver context
9374 * @ctxm: The pointer to the context memory type
9375 * @buf: The destination buffer or NULL to just obtain the length
9376 * @offset: The buffer offset to copy the data to
9377 * @head: The head offset of context memory to copy from
9378 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9379 *
9380 * This function is called for debugging purposes to dump the host context
9381 * used by the chip.
9382 *
9383 * Return: Length of memory copied
9384 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9385 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9386 struct bnxt_ctx_mem_type *ctxm, void *buf,
9387 size_t offset, size_t head, size_t tail)
9388 {
9389 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9390 size_t len = 0, total_len = 0;
9391 int i, n = 1;
9392
9393 if (!ctx_pg)
9394 return 0;
9395
9396 if (ctxm->instance_bmap)
9397 n = hweight32(ctxm->instance_bmap);
9398 for (i = 0; i < n; i++) {
9399 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9400 tail);
9401 offset += len;
9402 total_len += len;
9403 }
9404 return total_len;
9405 }
9406
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9407 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9408 void *buf, size_t offset)
9409 {
9410 size_t tail = ctxm->max_entries * ctxm->entry_size;
9411
9412 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9413 }
9414
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9415 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9416 struct bnxt_ctx_mem_type *ctxm, bool force)
9417 {
9418 struct bnxt_ctx_pg_info *ctx_pg;
9419 int i, n = 1;
9420
9421 ctxm->last = 0;
9422
9423 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9424 return;
9425
9426 ctx_pg = ctxm->pg_info;
9427 if (ctx_pg) {
9428 if (ctxm->instance_bmap)
9429 n = hweight32(ctxm->instance_bmap);
9430 for (i = 0; i < n; i++)
9431 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9432
9433 kfree(ctx_pg);
9434 ctxm->pg_info = NULL;
9435 ctxm->mem_valid = 0;
9436 }
9437 memset(ctxm, 0, sizeof(*ctxm));
9438 }
9439
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9440 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9441 {
9442 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9443 u16 type;
9444
9445 if (!ctx)
9446 return;
9447
9448 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9449 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9450
9451 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9452 if (force) {
9453 kfree(ctx);
9454 bp->ctx = NULL;
9455 }
9456 }
9457
bnxt_alloc_ctx_mem(struct bnxt * bp)9458 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9459 {
9460 struct bnxt_ctx_mem_type *ctxm;
9461 struct bnxt_ctx_mem_info *ctx;
9462 u32 l2_qps, qp1_qps, max_qps;
9463 u32 ena, entries_sp, entries;
9464 u32 srqs, max_srqs, min;
9465 u32 num_mr, num_ah;
9466 u32 extra_srqs = 0;
9467 u32 extra_qps = 0;
9468 u32 fast_qpmd_qps;
9469 u8 pg_lvl = 1;
9470 int i, rc;
9471
9472 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9473 if (rc) {
9474 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9475 rc);
9476 return rc;
9477 }
9478 ctx = bp->ctx;
9479 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9480 return 0;
9481
9482 ena = 0;
9483 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9484 goto skip_legacy;
9485
9486 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9487 l2_qps = ctxm->qp_l2_entries;
9488 qp1_qps = ctxm->qp_qp1_entries;
9489 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9490 max_qps = ctxm->max_entries;
9491 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9492 srqs = ctxm->srq_l2_entries;
9493 max_srqs = ctxm->max_entries;
9494 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9495 pg_lvl = 2;
9496 if (BNXT_SW_RES_LMT(bp)) {
9497 extra_qps = max_qps - l2_qps - qp1_qps;
9498 extra_srqs = max_srqs - srqs;
9499 } else {
9500 extra_qps = min_t(u32, 65536,
9501 max_qps - l2_qps - qp1_qps);
9502 /* allocate extra qps if fw supports RoCE fast qp
9503 * destroy feature
9504 */
9505 extra_qps += fast_qpmd_qps;
9506 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9507 }
9508 if (fast_qpmd_qps)
9509 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9510 }
9511
9512 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9513 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9514 pg_lvl);
9515 if (rc)
9516 return rc;
9517
9518 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9519 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9520 if (rc)
9521 return rc;
9522
9523 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9524 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9525 extra_qps * 2, pg_lvl);
9526 if (rc)
9527 return rc;
9528
9529 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9530 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9531 if (rc)
9532 return rc;
9533
9534 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9535 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9536 if (rc)
9537 return rc;
9538
9539 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9540 goto skip_rdma;
9541
9542 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9543 if (BNXT_SW_RES_LMT(bp) &&
9544 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9545 num_ah = ctxm->mrav_av_entries;
9546 num_mr = ctxm->max_entries - num_ah;
9547 } else {
9548 /* 128K extra is needed to accommodate static AH context
9549 * allocation by f/w.
9550 */
9551 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9552 num_ah = min_t(u32, num_mr, 1024 * 128);
9553 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9554 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9555 ctxm->mrav_av_entries = num_ah;
9556 }
9557
9558 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9559 if (rc)
9560 return rc;
9561 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9562
9563 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9564 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9565 if (rc)
9566 return rc;
9567 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9568
9569 skip_rdma:
9570 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9571 min = ctxm->min_entries;
9572 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9573 2 * (extra_qps + qp1_qps) + min;
9574 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9575 if (rc)
9576 return rc;
9577
9578 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9579 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9580 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9581 if (rc)
9582 return rc;
9583 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9584 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9585 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9586
9587 skip_legacy:
9588 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9589 rc = bnxt_backing_store_cfg_v2(bp);
9590 else
9591 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9592 if (rc) {
9593 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9594 rc);
9595 return rc;
9596 }
9597 ctx->flags |= BNXT_CTX_FLAG_INITED;
9598 return 0;
9599 }
9600
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9601 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9602 {
9603 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9604 u16 page_attr;
9605 int rc;
9606
9607 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9608 return 0;
9609
9610 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9611 if (rc)
9612 return rc;
9613
9614 if (BNXT_PAGE_SIZE == 0x2000)
9615 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9616 else if (BNXT_PAGE_SIZE == 0x10000)
9617 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9618 else
9619 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9620 req->pg_size_lvl = cpu_to_le16(page_attr |
9621 bp->fw_crash_mem->ring_mem.depth);
9622 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9623 req->size = cpu_to_le32(bp->fw_crash_len);
9624 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9625 return hwrm_req_send(bp, req);
9626 }
9627
bnxt_free_crash_dump_mem(struct bnxt * bp)9628 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9629 {
9630 if (bp->fw_crash_mem) {
9631 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9632 kfree(bp->fw_crash_mem);
9633 bp->fw_crash_mem = NULL;
9634 }
9635 }
9636
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9637 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9638 {
9639 u32 mem_size = 0;
9640 int rc;
9641
9642 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9643 return 0;
9644
9645 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9646 if (rc)
9647 return rc;
9648
9649 mem_size = round_up(mem_size, 4);
9650
9651 /* keep and use the existing pages */
9652 if (bp->fw_crash_mem &&
9653 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9654 goto alloc_done;
9655
9656 if (bp->fw_crash_mem)
9657 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9658 else
9659 bp->fw_crash_mem = kzalloc_obj(*bp->fw_crash_mem);
9660 if (!bp->fw_crash_mem)
9661 return -ENOMEM;
9662
9663 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9664 if (rc) {
9665 bnxt_free_crash_dump_mem(bp);
9666 return rc;
9667 }
9668
9669 alloc_done:
9670 bp->fw_crash_len = mem_size;
9671 return 0;
9672 }
9673
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9674 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9675 {
9676 struct hwrm_func_resource_qcaps_output *resp;
9677 struct hwrm_func_resource_qcaps_input *req;
9678 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9679 int rc;
9680
9681 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9682 if (rc)
9683 return rc;
9684
9685 req->fid = cpu_to_le16(0xffff);
9686 resp = hwrm_req_hold(bp, req);
9687 rc = hwrm_req_send_silent(bp, req);
9688 if (rc)
9689 goto hwrm_func_resc_qcaps_exit;
9690
9691 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9692 if (!all)
9693 goto hwrm_func_resc_qcaps_exit;
9694
9695 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9696 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9697 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9698 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9699 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9700 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9701 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9702 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9703 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9704 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9705 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9706 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9707 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9708 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9709 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9710 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9711
9712 if (hw_resc->max_rsscos_ctxs >=
9713 hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
9714 bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
9715
9716 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9717 u16 max_msix = le16_to_cpu(resp->max_msix);
9718
9719 hw_resc->max_nqs = max_msix;
9720 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9721 }
9722
9723 if (BNXT_PF(bp)) {
9724 struct bnxt_pf_info *pf = &bp->pf;
9725
9726 pf->vf_resv_strategy =
9727 le16_to_cpu(resp->vf_reservation_strategy);
9728 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9729 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9730 }
9731 hwrm_func_resc_qcaps_exit:
9732 hwrm_req_drop(bp, req);
9733 return rc;
9734 }
9735
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9736 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9737 {
9738 struct hwrm_port_mac_ptp_qcfg_output *resp;
9739 struct hwrm_port_mac_ptp_qcfg_input *req;
9740 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9741 u8 flags;
9742 int rc;
9743
9744 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9745 rc = -ENODEV;
9746 goto no_ptp;
9747 }
9748
9749 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9750 if (rc)
9751 goto no_ptp;
9752
9753 req->port_id = cpu_to_le16(bp->pf.port_id);
9754 resp = hwrm_req_hold(bp, req);
9755 rc = hwrm_req_send(bp, req);
9756 if (rc)
9757 goto exit;
9758
9759 flags = resp->flags;
9760 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9761 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9762 rc = -ENODEV;
9763 goto exit;
9764 }
9765 if (!ptp) {
9766 ptp = kzalloc_obj(*ptp);
9767 if (!ptp) {
9768 rc = -ENOMEM;
9769 goto exit;
9770 }
9771 ptp->bp = bp;
9772 bp->ptp_cfg = ptp;
9773 }
9774
9775 if (flags &
9776 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9777 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9778 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9779 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9780 } else if (BNXT_CHIP_P5(bp)) {
9781 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9782 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9783 } else {
9784 rc = -ENODEV;
9785 goto exit;
9786 }
9787 ptp->rtc_configured =
9788 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9789 rc = bnxt_ptp_init(bp);
9790 if (rc)
9791 netdev_warn(bp->dev, "PTP initialization failed.\n");
9792 exit:
9793 hwrm_req_drop(bp, req);
9794 if (!rc)
9795 return 0;
9796
9797 no_ptp:
9798 bnxt_ptp_clear(bp);
9799 kfree(ptp);
9800 bp->ptp_cfg = NULL;
9801 return rc;
9802 }
9803
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9804 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9805 {
9806 u32 flags, flags_ext, flags_ext2, flags_ext3;
9807 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9808 struct hwrm_func_qcaps_output *resp;
9809 struct hwrm_func_qcaps_input *req;
9810 int rc;
9811
9812 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9813 if (rc)
9814 return rc;
9815
9816 req->fid = cpu_to_le16(0xffff);
9817 resp = hwrm_req_hold(bp, req);
9818 rc = hwrm_req_send(bp, req);
9819 if (rc)
9820 goto hwrm_func_qcaps_exit;
9821
9822 flags = le32_to_cpu(resp->flags);
9823 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9824 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9825 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9826 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9827 if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
9828 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
9829 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9830 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9831 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9832 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9833 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9834 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9835 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9836 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9837 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9838 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9839 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9840 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9841 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9842 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9843
9844 flags_ext = le32_to_cpu(resp->flags_ext);
9845 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9846 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9847 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9848 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9849 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
9850 bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
9851 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9852 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9853 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9854 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9855 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9856 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9857 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9858 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9859 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9860 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9861 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9862 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9863 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9864 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9865
9866 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9867 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9868 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9869 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9870 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9871 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9872 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9873 if (flags_ext2 &
9874 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9875 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9876 if (BNXT_PF(bp) &&
9877 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9878 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9879
9880 flags_ext3 = le32_to_cpu(resp->flags_ext3);
9881 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9882 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9883 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9884 bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9885
9886 bp->tx_push_thresh = 0;
9887 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9888 BNXT_FW_MAJ(bp) > 217)
9889 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9890
9891 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9892 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9893 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9894 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9895 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9896 if (!hw_resc->max_hw_ring_grps)
9897 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9898 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9899 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9900 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9901
9902 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9903 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9904 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9905 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9906 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9907 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9908
9909 if (BNXT_PF(bp)) {
9910 struct bnxt_pf_info *pf = &bp->pf;
9911
9912 pf->fw_fid = le16_to_cpu(resp->fid);
9913 pf->port_id = le16_to_cpu(resp->port_id);
9914 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9915 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9916 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9917 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9918 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9919 bp->flags |= BNXT_FLAG_WOL_CAP;
9920 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9921 bp->fw_cap |= BNXT_FW_CAP_PTP;
9922 } else {
9923 bnxt_ptp_clear(bp);
9924 kfree(bp->ptp_cfg);
9925 bp->ptp_cfg = NULL;
9926 }
9927 } else {
9928 #ifdef CONFIG_BNXT_SRIOV
9929 struct bnxt_vf_info *vf = &bp->vf;
9930
9931 vf->fw_fid = le16_to_cpu(resp->fid);
9932 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9933 #endif
9934 }
9935 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9936
9937 hwrm_func_qcaps_exit:
9938 hwrm_req_drop(bp, req);
9939 return rc;
9940 }
9941
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9942 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9943 {
9944 struct hwrm_dbg_qcaps_output *resp;
9945 struct hwrm_dbg_qcaps_input *req;
9946 int rc;
9947
9948 bp->fw_dbg_cap = 0;
9949 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9950 return;
9951
9952 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9953 if (rc)
9954 return;
9955
9956 req->fid = cpu_to_le16(0xffff);
9957 resp = hwrm_req_hold(bp, req);
9958 rc = hwrm_req_send(bp, req);
9959 if (rc)
9960 goto hwrm_dbg_qcaps_exit;
9961
9962 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9963
9964 hwrm_dbg_qcaps_exit:
9965 hwrm_req_drop(bp, req);
9966 }
9967
9968 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9969
bnxt_hwrm_func_qcaps(struct bnxt * bp)9970 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9971 {
9972 int rc;
9973
9974 rc = __bnxt_hwrm_func_qcaps(bp);
9975 if (rc)
9976 return rc;
9977
9978 bnxt_hwrm_dbg_qcaps(bp);
9979
9980 rc = bnxt_hwrm_queue_qportcfg(bp);
9981 if (rc) {
9982 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9983 return rc;
9984 }
9985 if (bp->hwrm_spec_code >= 0x10803) {
9986 rc = bnxt_alloc_ctx_mem(bp);
9987 if (rc)
9988 return rc;
9989 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9990 if (!rc)
9991 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9992 }
9993 return 0;
9994 }
9995
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9996 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9997 {
9998 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9999 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
10000 u32 flags;
10001 int rc;
10002
10003 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
10004 return 0;
10005
10006 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
10007 if (rc)
10008 return rc;
10009
10010 resp = hwrm_req_hold(bp, req);
10011 rc = hwrm_req_send(bp, req);
10012 if (rc)
10013 goto hwrm_cfa_adv_qcaps_exit;
10014
10015 flags = le32_to_cpu(resp->flags);
10016 if (flags &
10017 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
10018 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
10019
10020 if (flags &
10021 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
10022 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
10023
10024 if (flags &
10025 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
10026 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
10027
10028 hwrm_cfa_adv_qcaps_exit:
10029 hwrm_req_drop(bp, req);
10030 return rc;
10031 }
10032
__bnxt_alloc_fw_health(struct bnxt * bp)10033 static int __bnxt_alloc_fw_health(struct bnxt *bp)
10034 {
10035 if (bp->fw_health)
10036 return 0;
10037
10038 bp->fw_health = kzalloc_obj(*bp->fw_health);
10039 if (!bp->fw_health)
10040 return -ENOMEM;
10041
10042 mutex_init(&bp->fw_health->lock);
10043 return 0;
10044 }
10045
bnxt_alloc_fw_health(struct bnxt * bp)10046 static int bnxt_alloc_fw_health(struct bnxt *bp)
10047 {
10048 int rc;
10049
10050 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10051 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10052 return 0;
10053
10054 rc = __bnxt_alloc_fw_health(bp);
10055 if (rc) {
10056 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10057 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10058 return rc;
10059 }
10060
10061 return 0;
10062 }
10063
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)10064 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
10065 {
10066 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
10067 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
10068 BNXT_FW_HEALTH_WIN_MAP_OFF);
10069 }
10070
bnxt_inv_fw_health_reg(struct bnxt * bp)10071 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
10072 {
10073 struct bnxt_fw_health *fw_health = bp->fw_health;
10074 u32 reg_type;
10075
10076 if (!fw_health)
10077 return;
10078
10079 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
10080 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
10081 fw_health->status_reliable = false;
10082
10083 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
10084 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
10085 fw_health->resets_reliable = false;
10086 }
10087
bnxt_try_map_fw_health_reg(struct bnxt * bp)10088 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
10089 {
10090 void __iomem *hs;
10091 u32 status_loc;
10092 u32 reg_type;
10093 u32 sig;
10094
10095 if (bp->fw_health)
10096 bp->fw_health->status_reliable = false;
10097
10098 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
10099 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
10100
10101 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
10102 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
10103 if (!bp->chip_num) {
10104 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
10105 bp->chip_num = readl(bp->bar0 +
10106 BNXT_FW_HEALTH_WIN_BASE +
10107 BNXT_GRC_REG_CHIP_NUM);
10108 }
10109 if (!BNXT_CHIP_P5_PLUS(bp))
10110 return;
10111
10112 status_loc = BNXT_GRC_REG_STATUS_P5 |
10113 BNXT_FW_HEALTH_REG_TYPE_BAR0;
10114 } else {
10115 status_loc = readl(hs + offsetof(struct hcomm_status,
10116 fw_status_loc));
10117 }
10118
10119 if (__bnxt_alloc_fw_health(bp)) {
10120 netdev_warn(bp->dev, "no memory for firmware status checks\n");
10121 return;
10122 }
10123
10124 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
10125 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
10126 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
10127 __bnxt_map_fw_health_reg(bp, status_loc);
10128 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
10129 BNXT_FW_HEALTH_WIN_OFF(status_loc);
10130 }
10131
10132 bp->fw_health->status_reliable = true;
10133 }
10134
bnxt_map_fw_health_regs(struct bnxt * bp)10135 static int bnxt_map_fw_health_regs(struct bnxt *bp)
10136 {
10137 struct bnxt_fw_health *fw_health = bp->fw_health;
10138 u32 reg_base = 0xffffffff;
10139 int i;
10140
10141 bp->fw_health->status_reliable = false;
10142 bp->fw_health->resets_reliable = false;
10143 /* Only pre-map the monitoring GRC registers using window 3 */
10144 for (i = 0; i < 4; i++) {
10145 u32 reg = fw_health->regs[i];
10146
10147 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
10148 continue;
10149 if (reg_base == 0xffffffff)
10150 reg_base = reg & BNXT_GRC_BASE_MASK;
10151 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
10152 return -ERANGE;
10153 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
10154 }
10155 bp->fw_health->status_reliable = true;
10156 bp->fw_health->resets_reliable = true;
10157 if (reg_base == 0xffffffff)
10158 return 0;
10159
10160 __bnxt_map_fw_health_reg(bp, reg_base);
10161 return 0;
10162 }
10163
bnxt_remap_fw_health_regs(struct bnxt * bp)10164 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
10165 {
10166 if (!bp->fw_health)
10167 return;
10168
10169 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
10170 bp->fw_health->status_reliable = true;
10171 bp->fw_health->resets_reliable = true;
10172 } else {
10173 bnxt_try_map_fw_health_reg(bp);
10174 }
10175 }
10176
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)10177 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10178 {
10179 struct bnxt_fw_health *fw_health = bp->fw_health;
10180 struct hwrm_error_recovery_qcfg_output *resp;
10181 struct hwrm_error_recovery_qcfg_input *req;
10182 int rc, i;
10183
10184 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10185 return 0;
10186
10187 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10188 if (rc)
10189 return rc;
10190
10191 resp = hwrm_req_hold(bp, req);
10192 rc = hwrm_req_send(bp, req);
10193 if (rc)
10194 goto err_recovery_out;
10195 fw_health->flags = le32_to_cpu(resp->flags);
10196 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10197 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10198 rc = -EINVAL;
10199 goto err_recovery_out;
10200 }
10201 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10202 fw_health->master_func_wait_dsecs =
10203 le32_to_cpu(resp->master_func_wait_period);
10204 fw_health->normal_func_wait_dsecs =
10205 le32_to_cpu(resp->normal_func_wait_period);
10206 fw_health->post_reset_wait_dsecs =
10207 le32_to_cpu(resp->master_func_wait_period_after_reset);
10208 fw_health->post_reset_max_wait_dsecs =
10209 le32_to_cpu(resp->max_bailout_time_after_reset);
10210 fw_health->regs[BNXT_FW_HEALTH_REG] =
10211 le32_to_cpu(resp->fw_health_status_reg);
10212 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10213 le32_to_cpu(resp->fw_heartbeat_reg);
10214 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10215 le32_to_cpu(resp->fw_reset_cnt_reg);
10216 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10217 le32_to_cpu(resp->reset_inprogress_reg);
10218 fw_health->fw_reset_inprog_reg_mask =
10219 le32_to_cpu(resp->reset_inprogress_reg_mask);
10220 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10221 if (fw_health->fw_reset_seq_cnt >= 16) {
10222 rc = -EINVAL;
10223 goto err_recovery_out;
10224 }
10225 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10226 fw_health->fw_reset_seq_regs[i] =
10227 le32_to_cpu(resp->reset_reg[i]);
10228 fw_health->fw_reset_seq_vals[i] =
10229 le32_to_cpu(resp->reset_reg_val[i]);
10230 fw_health->fw_reset_seq_delay_msec[i] =
10231 resp->delay_after_reset[i];
10232 }
10233 err_recovery_out:
10234 hwrm_req_drop(bp, req);
10235 if (!rc)
10236 rc = bnxt_map_fw_health_regs(bp);
10237 if (rc)
10238 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10239 return rc;
10240 }
10241
bnxt_hwrm_func_reset(struct bnxt * bp)10242 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10243 {
10244 struct hwrm_func_reset_input *req;
10245 int rc;
10246
10247 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10248 if (rc)
10249 return rc;
10250
10251 req->enables = 0;
10252 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10253 return hwrm_req_send(bp, req);
10254 }
10255
bnxt_nvm_cfg_ver_get(struct bnxt * bp)10256 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10257 {
10258 struct hwrm_nvm_get_dev_info_output nvm_info;
10259
10260 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10261 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10262 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10263 nvm_info.nvm_cfg_ver_upd);
10264 }
10265
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)10266 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10267 {
10268 struct hwrm_queue_qportcfg_output *resp;
10269 struct hwrm_queue_qportcfg_input *req;
10270 u8 i, j, *qptr;
10271 bool no_rdma;
10272 int rc = 0;
10273
10274 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10275 if (rc)
10276 return rc;
10277
10278 resp = hwrm_req_hold(bp, req);
10279 rc = hwrm_req_send(bp, req);
10280 if (rc)
10281 goto qportcfg_exit;
10282
10283 if (!resp->max_configurable_queues) {
10284 rc = -EINVAL;
10285 goto qportcfg_exit;
10286 }
10287 bp->max_tc = resp->max_configurable_queues;
10288 bp->max_lltc = resp->max_configurable_lossless_queues;
10289 if (bp->max_tc > BNXT_MAX_QUEUE)
10290 bp->max_tc = BNXT_MAX_QUEUE;
10291
10292 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10293 qptr = &resp->queue_id0;
10294 for (i = 0, j = 0; i < bp->max_tc; i++) {
10295 bp->q_info[j].queue_id = *qptr;
10296 bp->q_ids[i] = *qptr++;
10297 bp->q_info[j].queue_profile = *qptr++;
10298 bp->tc_to_qidx[j] = j;
10299 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10300 (no_rdma && BNXT_PF(bp)))
10301 j++;
10302 }
10303 bp->max_q = bp->max_tc;
10304 bp->max_tc = max_t(u8, j, 1);
10305
10306 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10307 bp->max_tc = 1;
10308
10309 if (bp->max_lltc > bp->max_tc)
10310 bp->max_lltc = bp->max_tc;
10311
10312 qportcfg_exit:
10313 hwrm_req_drop(bp, req);
10314 return rc;
10315 }
10316
bnxt_hwrm_poll(struct bnxt * bp)10317 static int bnxt_hwrm_poll(struct bnxt *bp)
10318 {
10319 struct hwrm_ver_get_input *req;
10320 int rc;
10321
10322 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10323 if (rc)
10324 return rc;
10325
10326 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10327 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10328 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10329
10330 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10331 rc = hwrm_req_send(bp, req);
10332 return rc;
10333 }
10334
bnxt_hwrm_ver_get(struct bnxt * bp)10335 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10336 {
10337 struct hwrm_ver_get_output *resp;
10338 struct hwrm_ver_get_input *req;
10339 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10340 u32 dev_caps_cfg, hwrm_ver;
10341 int rc, len, max_tmo_secs;
10342
10343 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10344 if (rc)
10345 return rc;
10346
10347 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10348 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10349 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10350 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10351 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10352
10353 resp = hwrm_req_hold(bp, req);
10354 rc = hwrm_req_send(bp, req);
10355 if (rc)
10356 goto hwrm_ver_get_exit;
10357
10358 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10359
10360 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10361 resp->hwrm_intf_min_8b << 8 |
10362 resp->hwrm_intf_upd_8b;
10363 if (resp->hwrm_intf_maj_8b < 1) {
10364 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10365 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10366 resp->hwrm_intf_upd_8b);
10367 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10368 }
10369
10370 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10371 HWRM_VERSION_UPDATE;
10372
10373 if (bp->hwrm_spec_code > hwrm_ver)
10374 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10375 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10376 HWRM_VERSION_UPDATE);
10377 else
10378 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10379 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10380 resp->hwrm_intf_upd_8b);
10381
10382 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10383 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10384 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10385 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10386 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10387 len = FW_VER_STR_LEN;
10388 } else {
10389 fw_maj = resp->hwrm_fw_maj_8b;
10390 fw_min = resp->hwrm_fw_min_8b;
10391 fw_bld = resp->hwrm_fw_bld_8b;
10392 fw_rsv = resp->hwrm_fw_rsvd_8b;
10393 len = BC_HWRM_STR_LEN;
10394 }
10395 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10396 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10397 fw_rsv);
10398
10399 if (strlen(resp->active_pkg_name)) {
10400 int fw_ver_len = strlen(bp->fw_ver_str);
10401
10402 snprintf(bp->fw_ver_str + fw_ver_len,
10403 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10404 resp->active_pkg_name);
10405 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10406 }
10407
10408 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10409 if (!bp->hwrm_cmd_timeout)
10410 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10411 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10412 if (!bp->hwrm_cmd_max_timeout)
10413 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10414 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10415 #ifdef CONFIG_DETECT_HUNG_TASK
10416 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10417 max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10418 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10419 max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10420 }
10421 #endif
10422
10423 if (resp->hwrm_intf_maj_8b >= 1) {
10424 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10425 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10426 }
10427 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10428 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10429
10430 bp->chip_num = le16_to_cpu(resp->chip_num);
10431 bp->chip_rev = resp->chip_rev;
10432 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10433 !resp->chip_metal)
10434 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10435
10436 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10437 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10438 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10439 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10440
10441 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10442 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10443
10444 if (dev_caps_cfg &
10445 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10446 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10447
10448 if (dev_caps_cfg &
10449 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10450 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10451
10452 if (dev_caps_cfg &
10453 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10454 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10455
10456 hwrm_ver_get_exit:
10457 hwrm_req_drop(bp, req);
10458 return rc;
10459 }
10460
bnxt_hwrm_fw_set_time(struct bnxt * bp)10461 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10462 {
10463 struct hwrm_fw_set_time_input *req;
10464 struct tm tm;
10465 time64_t now = ktime_get_real_seconds();
10466 int rc;
10467
10468 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10469 bp->hwrm_spec_code < 0x10400)
10470 return -EOPNOTSUPP;
10471
10472 time64_to_tm(now, 0, &tm);
10473 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10474 if (rc)
10475 return rc;
10476
10477 req->year = cpu_to_le16(1900 + tm.tm_year);
10478 req->month = 1 + tm.tm_mon;
10479 req->day = tm.tm_mday;
10480 req->hour = tm.tm_hour;
10481 req->minute = tm.tm_min;
10482 req->second = tm.tm_sec;
10483 return hwrm_req_send(bp, req);
10484 }
10485
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10486 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10487 {
10488 u64 sw_tmp;
10489
10490 hw &= mask;
10491 sw_tmp = (*sw & ~mask) | hw;
10492 if (hw < (*sw & mask))
10493 sw_tmp += mask + 1;
10494 WRITE_ONCE(*sw, sw_tmp);
10495 }
10496
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10497 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10498 int count, bool ignore_zero)
10499 {
10500 int i;
10501
10502 for (i = 0; i < count; i++) {
10503 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10504
10505 if (ignore_zero && !hw)
10506 continue;
10507
10508 if (masks[i] == -1ULL)
10509 sw_stats[i] = hw;
10510 else
10511 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10512 }
10513 }
10514
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10515 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10516 {
10517 if (!stats->hw_stats)
10518 return;
10519
10520 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10521 stats->hw_masks, stats->len / 8, false);
10522 }
10523
bnxt_accumulate_all_stats(struct bnxt * bp)10524 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10525 {
10526 struct bnxt_stats_mem *ring0_stats;
10527 bool ignore_zero = false;
10528 int i;
10529
10530 /* Chip bug. Counter intermittently becomes 0. */
10531 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10532 ignore_zero = true;
10533
10534 for (i = 0; i < bp->cp_nr_rings; i++) {
10535 struct bnxt_napi *bnapi = bp->bnapi[i];
10536 struct bnxt_cp_ring_info *cpr;
10537 struct bnxt_stats_mem *stats;
10538
10539 cpr = &bnapi->cp_ring;
10540 stats = &cpr->stats;
10541 if (!i)
10542 ring0_stats = stats;
10543 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10544 ring0_stats->hw_masks,
10545 ring0_stats->len / 8, ignore_zero);
10546 }
10547 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10548 struct bnxt_stats_mem *stats = &bp->port_stats;
10549 __le64 *hw_stats = stats->hw_stats;
10550 u64 *sw_stats = stats->sw_stats;
10551 u64 *masks = stats->hw_masks;
10552 int cnt;
10553
10554 cnt = sizeof(struct rx_port_stats) / 8;
10555 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10556
10557 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10558 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10559 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10560 cnt = sizeof(struct tx_port_stats) / 8;
10561 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10562 }
10563 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10564 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10565 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10566 }
10567 }
10568
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10569 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10570 {
10571 struct hwrm_port_qstats_input *req;
10572 struct bnxt_pf_info *pf = &bp->pf;
10573 int rc;
10574
10575 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10576 return 0;
10577
10578 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10579 return -EOPNOTSUPP;
10580
10581 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10582 if (rc)
10583 return rc;
10584
10585 req->flags = flags;
10586 req->port_id = cpu_to_le16(pf->port_id);
10587 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10588 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10589 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10590 return hwrm_req_send(bp, req);
10591 }
10592
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10593 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10594 {
10595 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10596 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10597 struct hwrm_port_qstats_ext_output *resp_qs;
10598 struct hwrm_port_qstats_ext_input *req_qs;
10599 struct bnxt_pf_info *pf = &bp->pf;
10600 u32 tx_stat_size;
10601 int rc;
10602
10603 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10604 return 0;
10605
10606 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10607 return -EOPNOTSUPP;
10608
10609 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10610 if (rc)
10611 return rc;
10612
10613 req_qs->flags = flags;
10614 req_qs->port_id = cpu_to_le16(pf->port_id);
10615 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10616 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10617 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10618 sizeof(struct tx_port_stats_ext) : 0;
10619 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10620 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10621 resp_qs = hwrm_req_hold(bp, req_qs);
10622 rc = hwrm_req_send(bp, req_qs);
10623 if (!rc) {
10624 bp->fw_rx_stats_ext_size =
10625 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10626 if (BNXT_FW_MAJ(bp) < 220 &&
10627 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10628 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10629
10630 bp->fw_tx_stats_ext_size = tx_stat_size ?
10631 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10632 } else {
10633 bp->fw_rx_stats_ext_size = 0;
10634 bp->fw_tx_stats_ext_size = 0;
10635 }
10636 hwrm_req_drop(bp, req_qs);
10637
10638 if (flags)
10639 return rc;
10640
10641 if (bp->fw_tx_stats_ext_size <=
10642 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10643 bp->pri2cos_valid = 0;
10644 return rc;
10645 }
10646
10647 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10648 if (rc)
10649 return rc;
10650
10651 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10652
10653 resp_qc = hwrm_req_hold(bp, req_qc);
10654 rc = hwrm_req_send(bp, req_qc);
10655 if (!rc) {
10656 u8 *pri2cos;
10657 int i, j;
10658
10659 pri2cos = &resp_qc->pri0_cos_queue_id;
10660 for (i = 0; i < 8; i++) {
10661 u8 queue_id = pri2cos[i];
10662 u8 queue_idx;
10663
10664 /* Per port queue IDs start from 0, 10, 20, etc */
10665 queue_idx = queue_id % 10;
10666 if (queue_idx > BNXT_MAX_QUEUE) {
10667 bp->pri2cos_valid = false;
10668 hwrm_req_drop(bp, req_qc);
10669 return rc;
10670 }
10671 for (j = 0; j < bp->max_q; j++) {
10672 if (bp->q_ids[j] == queue_id)
10673 bp->pri2cos_idx[i] = queue_idx;
10674 }
10675 }
10676 bp->pri2cos_valid = true;
10677 }
10678 hwrm_req_drop(bp, req_qc);
10679
10680 return rc;
10681 }
10682
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10683 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10684 {
10685 bnxt_hwrm_tunnel_dst_port_free(bp,
10686 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10687 bnxt_hwrm_tunnel_dst_port_free(bp,
10688 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10689 }
10690
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10691 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10692 {
10693 int rc, i;
10694 u32 tpa_flags = 0;
10695
10696 if (set_tpa)
10697 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10698 else if (BNXT_NO_FW_ACCESS(bp))
10699 return 0;
10700 for (i = 0; i < bp->nr_vnics; i++) {
10701 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10702 if (rc) {
10703 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10704 i, rc);
10705 return rc;
10706 }
10707 }
10708 return 0;
10709 }
10710
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10711 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10712 {
10713 int i;
10714
10715 for (i = 0; i < bp->nr_vnics; i++)
10716 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10717 }
10718
bnxt_clear_vnic(struct bnxt * bp)10719 static void bnxt_clear_vnic(struct bnxt *bp)
10720 {
10721 if (!bp->vnic_info)
10722 return;
10723
10724 bnxt_hwrm_clear_vnic_filter(bp);
10725 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10726 /* clear all RSS setting before free vnic ctx */
10727 bnxt_hwrm_clear_vnic_rss(bp);
10728 bnxt_hwrm_vnic_ctx_free(bp);
10729 }
10730 /* before free the vnic, undo the vnic tpa settings */
10731 if (bp->flags & BNXT_FLAG_TPA)
10732 bnxt_set_tpa(bp, false);
10733 bnxt_hwrm_vnic_free(bp);
10734 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10735 bnxt_hwrm_vnic_ctx_free(bp);
10736 }
10737
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10738 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10739 bool irq_re_init)
10740 {
10741 bnxt_clear_vnic(bp);
10742 bnxt_hwrm_ring_free(bp, close_path);
10743 bnxt_hwrm_ring_grp_free(bp);
10744 if (irq_re_init) {
10745 bnxt_hwrm_stat_ctx_free(bp);
10746 bnxt_hwrm_free_tunnel_ports(bp);
10747 }
10748 }
10749
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10750 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10751 {
10752 struct hwrm_func_cfg_input *req;
10753 u8 evb_mode;
10754 int rc;
10755
10756 if (br_mode == BRIDGE_MODE_VEB)
10757 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10758 else if (br_mode == BRIDGE_MODE_VEPA)
10759 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10760 else
10761 return -EINVAL;
10762
10763 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10764 if (rc)
10765 return rc;
10766
10767 req->fid = cpu_to_le16(0xffff);
10768 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10769 req->evb_mode = evb_mode;
10770 return hwrm_req_send(bp, req);
10771 }
10772
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10773 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10774 {
10775 struct hwrm_func_cfg_input *req;
10776 int rc;
10777
10778 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10779 return 0;
10780
10781 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10782 if (rc)
10783 return rc;
10784
10785 req->fid = cpu_to_le16(0xffff);
10786 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10787 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10788 if (size == 128)
10789 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10790
10791 return hwrm_req_send(bp, req);
10792 }
10793
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10794 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10795 {
10796 int rc;
10797
10798 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10799 goto skip_rss_ctx;
10800
10801 /* allocate context for vnic */
10802 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10803 if (rc) {
10804 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10805 vnic->vnic_id, rc);
10806 goto vnic_setup_err;
10807 }
10808 bp->rsscos_nr_ctxs++;
10809
10810 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10811 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10812 if (rc) {
10813 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10814 vnic->vnic_id, rc);
10815 goto vnic_setup_err;
10816 }
10817 bp->rsscos_nr_ctxs++;
10818 }
10819
10820 skip_rss_ctx:
10821 /* configure default vnic, ring grp */
10822 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10823 if (rc) {
10824 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10825 vnic->vnic_id, rc);
10826 goto vnic_setup_err;
10827 }
10828
10829 /* Enable RSS hashing on vnic */
10830 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10831 if (rc) {
10832 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10833 vnic->vnic_id, rc);
10834 goto vnic_setup_err;
10835 }
10836
10837 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10838 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10839 if (rc) {
10840 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10841 vnic->vnic_id, rc);
10842 }
10843 }
10844
10845 vnic_setup_err:
10846 return rc;
10847 }
10848
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10849 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10850 u8 valid)
10851 {
10852 struct hwrm_vnic_update_input *req;
10853 int rc;
10854
10855 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10856 if (rc)
10857 return rc;
10858
10859 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10860
10861 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10862 req->mru = cpu_to_le16(vnic->mru);
10863
10864 req->enables = cpu_to_le32(valid);
10865
10866 return hwrm_req_send(bp, req);
10867 }
10868
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10869 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10870 {
10871 int rc;
10872
10873 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10874 if (rc) {
10875 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10876 vnic->vnic_id, rc);
10877 return rc;
10878 }
10879 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10880 if (rc)
10881 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10882 vnic->vnic_id, rc);
10883 return rc;
10884 }
10885
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10886 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10887 {
10888 int rc, i, nr_ctxs;
10889
10890 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10891 for (i = 0; i < nr_ctxs; i++) {
10892 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10893 if (rc) {
10894 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10895 vnic->vnic_id, i, rc);
10896 break;
10897 }
10898 bp->rsscos_nr_ctxs++;
10899 }
10900 if (i < nr_ctxs)
10901 return -ENOMEM;
10902
10903 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10904 if (rc)
10905 return rc;
10906
10907 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10908 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10909 if (rc) {
10910 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10911 vnic->vnic_id, rc);
10912 }
10913 }
10914 return rc;
10915 }
10916
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10917 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10918 {
10919 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10920 return __bnxt_setup_vnic_p5(bp, vnic);
10921 else
10922 return __bnxt_setup_vnic(bp, vnic);
10923 }
10924
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10925 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10926 struct bnxt_vnic_info *vnic,
10927 u16 start_rx_ring_idx, int rx_rings)
10928 {
10929 int rc;
10930
10931 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10932 if (rc) {
10933 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10934 vnic->vnic_id, rc);
10935 return rc;
10936 }
10937 return bnxt_setup_vnic(bp, vnic);
10938 }
10939
bnxt_alloc_rfs_vnics(struct bnxt * bp)10940 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10941 {
10942 struct bnxt_vnic_info *vnic;
10943 int i, rc = 0;
10944
10945 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10946 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10947 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10948 }
10949
10950 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10951 return 0;
10952
10953 for (i = 0; i < bp->rx_nr_rings; i++) {
10954 u16 vnic_id = i + 1;
10955 u16 ring_id = i;
10956
10957 if (vnic_id >= bp->nr_vnics)
10958 break;
10959
10960 vnic = &bp->vnic_info[vnic_id];
10961 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10962 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10963 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10964 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10965 break;
10966 }
10967 return rc;
10968 }
10969
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10970 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10971 bool all)
10972 {
10973 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10974 struct bnxt_filter_base *usr_fltr, *tmp;
10975 struct bnxt_ntuple_filter *ntp_fltr;
10976 int i;
10977
10978 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10979 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10980 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10981 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10982 }
10983 if (!all)
10984 return;
10985
10986 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10987 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10988 usr_fltr->fw_vnic_id == rss_ctx->index) {
10989 ntp_fltr = container_of(usr_fltr,
10990 struct bnxt_ntuple_filter,
10991 base);
10992 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10993 bnxt_del_ntp_filter(bp, ntp_fltr);
10994 bnxt_del_one_usr_fltr(bp, usr_fltr);
10995 }
10996 }
10997
10998 if (vnic->rss_table)
10999 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
11000 vnic->rss_table,
11001 vnic->rss_table_dma_addr);
11002 bp->num_rss_ctx--;
11003 }
11004
bnxt_vnic_has_rx_ring(struct bnxt * bp,struct bnxt_vnic_info * vnic,int rxr_id)11005 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
11006 int rxr_id)
11007 {
11008 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
11009 int i, vnic_rx;
11010
11011 /* Ntuple VNIC always has all the rx rings. Any change of ring id
11012 * must be updated because a future filter may use it.
11013 */
11014 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
11015 return true;
11016
11017 for (i = 0; i < tbl_size; i++) {
11018 if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
11019 vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
11020 else
11021 vnic_rx = bp->rss_indir_tbl[i];
11022
11023 if (rxr_id == vnic_rx)
11024 return true;
11025 }
11026
11027 return false;
11028 }
11029
bnxt_set_vnic_mru_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 mru,int rxr_id)11030 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
11031 u16 mru, int rxr_id)
11032 {
11033 int rc;
11034
11035 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
11036 return 0;
11037
11038 if (mru) {
11039 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
11040 if (rc) {
11041 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
11042 vnic->vnic_id, rc);
11043 return rc;
11044 }
11045 }
11046 vnic->mru = mru;
11047 bnxt_hwrm_vnic_update(bp, vnic,
11048 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
11049
11050 return 0;
11051 }
11052
bnxt_set_rss_ctx_vnic_mru(struct bnxt * bp,u16 mru,int rxr_id)11053 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
11054 {
11055 struct ethtool_rxfh_context *ctx;
11056 unsigned long context;
11057 int rc;
11058
11059 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11060 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11061 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
11062
11063 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
11064 if (rc)
11065 return rc;
11066 }
11067
11068 return 0;
11069 }
11070
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)11071 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
11072 {
11073 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
11074 struct ethtool_rxfh_context *ctx;
11075 unsigned long context;
11076
11077 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11078 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11079 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
11080
11081 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
11082 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
11083 __bnxt_setup_vnic_p5(bp, vnic)) {
11084 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
11085 rss_ctx->index);
11086 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
11087 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
11088 }
11089 }
11090 }
11091
bnxt_clear_rss_ctxs(struct bnxt * bp)11092 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
11093 {
11094 struct ethtool_rxfh_context *ctx;
11095 unsigned long context;
11096
11097 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11098 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11099
11100 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
11101 }
11102 }
11103
11104 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)11105 static bool bnxt_promisc_ok(struct bnxt *bp)
11106 {
11107 #ifdef CONFIG_BNXT_SRIOV
11108 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
11109 return false;
11110 #endif
11111 return true;
11112 }
11113
bnxt_setup_nitroa0_vnic(struct bnxt * bp)11114 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
11115 {
11116 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
11117 unsigned int rc = 0;
11118
11119 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
11120 if (rc) {
11121 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11122 rc);
11123 return rc;
11124 }
11125
11126 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
11127 if (rc) {
11128 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11129 rc);
11130 return rc;
11131 }
11132 return rc;
11133 }
11134
11135 static int bnxt_cfg_rx_mode(struct bnxt *, struct netdev_hw_addr_list *, bool);
11136 static bool bnxt_mc_list_updated(struct bnxt *, u32 *,
11137 const struct netdev_hw_addr_list *);
11138
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)11139 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
11140 {
11141 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
11142 int rc = 0;
11143 unsigned int rx_nr_rings = bp->rx_nr_rings;
11144
11145 if (irq_re_init) {
11146 rc = bnxt_hwrm_stat_ctx_alloc(bp);
11147 if (rc) {
11148 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
11149 rc);
11150 goto err_out;
11151 }
11152 }
11153
11154 rc = bnxt_hwrm_ring_alloc(bp);
11155 if (rc) {
11156 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
11157 goto err_out;
11158 }
11159
11160 rc = bnxt_hwrm_ring_grp_alloc(bp);
11161 if (rc) {
11162 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
11163 goto err_out;
11164 }
11165
11166 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11167 rx_nr_rings--;
11168
11169 /* default vnic 0 */
11170 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
11171 if (rc) {
11172 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
11173 goto err_out;
11174 }
11175
11176 if (BNXT_VF(bp))
11177 bnxt_hwrm_func_qcfg(bp);
11178
11179 rc = bnxt_setup_vnic(bp, vnic);
11180 if (rc)
11181 goto err_out;
11182 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11183 bnxt_hwrm_update_rss_hash_cfg(bp);
11184
11185 if (bp->flags & BNXT_FLAG_RFS) {
11186 rc = bnxt_alloc_rfs_vnics(bp);
11187 if (rc)
11188 goto err_out;
11189 }
11190
11191 if (bp->flags & BNXT_FLAG_TPA) {
11192 rc = bnxt_set_tpa(bp, true);
11193 if (rc)
11194 goto err_out;
11195 }
11196
11197 if (BNXT_VF(bp))
11198 bnxt_update_vf_mac(bp);
11199
11200 /* Filter for default vnic 0 */
11201 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11202 if (rc) {
11203 if (BNXT_VF(bp) && rc == -ENODEV)
11204 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11205 else
11206 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11207 goto err_out;
11208 }
11209 vnic->uc_filter_count = 1;
11210
11211 vnic->rx_mask = 0;
11212 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11213 goto skip_rx_mask;
11214
11215 if (bp->dev->flags & IFF_BROADCAST)
11216 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11217
11218 if (bp->dev->flags & IFF_PROMISC)
11219 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11220
11221 if (bp->dev->flags & IFF_ALLMULTI) {
11222 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11223 vnic->mc_list_count = 0;
11224 } else if (bp->dev->flags & IFF_MULTICAST) {
11225 u32 mask = 0;
11226
11227 bnxt_mc_list_updated(bp, &mask, &bp->dev->mc);
11228 vnic->rx_mask |= mask;
11229 }
11230
11231 rc = bnxt_cfg_rx_mode(bp, &bp->dev->uc, true);
11232 if (rc)
11233 goto err_out;
11234
11235 skip_rx_mask:
11236 rc = bnxt_hwrm_set_coal(bp);
11237 if (rc)
11238 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11239 rc);
11240
11241 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11242 rc = bnxt_setup_nitroa0_vnic(bp);
11243 if (rc)
11244 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11245 rc);
11246 }
11247
11248 if (BNXT_VF(bp)) {
11249 bnxt_hwrm_func_qcfg(bp);
11250 netdev_update_features(bp->dev);
11251 }
11252
11253 return 0;
11254
11255 err_out:
11256 bnxt_hwrm_resource_free(bp, 0, true);
11257
11258 return rc;
11259 }
11260
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)11261 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11262 {
11263 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11264 return 0;
11265 }
11266
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)11267 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11268 {
11269 bnxt_init_cp_rings(bp);
11270 bnxt_init_rx_rings(bp);
11271 bnxt_init_tx_rings(bp);
11272 bnxt_init_ring_grps(bp, irq_re_init);
11273 bnxt_init_vnics(bp);
11274
11275 return bnxt_init_chip(bp, irq_re_init);
11276 }
11277
bnxt_set_real_num_queues(struct bnxt * bp)11278 static int bnxt_set_real_num_queues(struct bnxt *bp)
11279 {
11280 int rc;
11281 struct net_device *dev = bp->dev;
11282
11283 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11284 bp->tx_nr_rings_xdp);
11285 if (rc)
11286 return rc;
11287
11288 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11289 if (rc)
11290 return rc;
11291
11292 #ifdef CONFIG_RFS_ACCEL
11293 if (bp->flags & BNXT_FLAG_RFS)
11294 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11295 #endif
11296
11297 return rc;
11298 }
11299
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)11300 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11301 bool shared)
11302 {
11303 int _rx = *rx, _tx = *tx;
11304
11305 if (shared) {
11306 *rx = min_t(int, _rx, max);
11307 *tx = min_t(int, _tx, max);
11308 } else {
11309 if (max < 2)
11310 return -ENOMEM;
11311
11312 while (_rx + _tx > max) {
11313 if (_rx > _tx && _rx > 1)
11314 _rx--;
11315 else if (_tx > 1)
11316 _tx--;
11317 }
11318 *rx = _rx;
11319 *tx = _tx;
11320 }
11321 return 0;
11322 }
11323
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)11324 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11325 {
11326 return (tx - tx_xdp) / tx_sets + tx_xdp;
11327 }
11328
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)11329 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11330 {
11331 int tcs = bp->num_tc;
11332
11333 if (!tcs)
11334 tcs = 1;
11335 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11336 }
11337
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)11338 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11339 {
11340 int tcs = bp->num_tc;
11341
11342 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11343 bp->tx_nr_rings_xdp;
11344 }
11345
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)11346 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11347 bool sh)
11348 {
11349 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11350
11351 if (tx_cp != *tx) {
11352 int tx_saved = tx_cp, rc;
11353
11354 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11355 if (rc)
11356 return rc;
11357 if (tx_cp != tx_saved)
11358 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
11359 return 0;
11360 }
11361 return __bnxt_trim_rings(bp, rx, tx, max, sh);
11362 }
11363
bnxt_setup_msix(struct bnxt * bp)11364 static void bnxt_setup_msix(struct bnxt *bp)
11365 {
11366 const int len = sizeof(bp->irq_tbl[0].name);
11367 struct net_device *dev = bp->dev;
11368 int tcs, i;
11369
11370 tcs = bp->num_tc;
11371 if (tcs) {
11372 int i, off, count;
11373
11374 for (i = 0; i < tcs; i++) {
11375 count = bp->tx_nr_rings_per_tc;
11376 off = BNXT_TC_TO_RING_BASE(bp, i);
11377 netdev_set_tc_queue(dev, i, count, off);
11378 }
11379 }
11380
11381 for (i = 0; i < bp->cp_nr_rings; i++) {
11382 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11383 char *attr;
11384
11385 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11386 attr = "TxRx";
11387 else if (i < bp->rx_nr_rings)
11388 attr = "rx";
11389 else
11390 attr = "tx";
11391
11392 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11393 attr, i);
11394 bp->irq_tbl[map_idx].handler = bnxt_msix;
11395 }
11396 }
11397
11398 static int bnxt_init_int_mode(struct bnxt *bp);
11399
bnxt_change_msix(struct bnxt * bp,int total)11400 static int bnxt_change_msix(struct bnxt *bp, int total)
11401 {
11402 struct msi_map map;
11403 int i;
11404
11405 /* add MSIX to the end if needed */
11406 for (i = bp->total_irqs; i < total; i++) {
11407 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11408 if (map.index < 0)
11409 return bp->total_irqs;
11410 bp->irq_tbl[i].vector = map.virq;
11411 bp->total_irqs++;
11412 }
11413
11414 /* trim MSIX from the end if needed */
11415 for (i = bp->total_irqs; i > total; i--) {
11416 map.index = i - 1;
11417 map.virq = bp->irq_tbl[i - 1].vector;
11418 pci_msix_free_irq(bp->pdev, map);
11419 bp->total_irqs--;
11420 }
11421 return bp->total_irqs;
11422 }
11423
bnxt_setup_int_mode(struct bnxt * bp)11424 static int bnxt_setup_int_mode(struct bnxt *bp)
11425 {
11426 int rc;
11427
11428 if (!bp->irq_tbl) {
11429 rc = bnxt_init_int_mode(bp);
11430 if (rc || !bp->irq_tbl)
11431 return rc ?: -ENODEV;
11432 }
11433
11434 bnxt_setup_msix(bp);
11435
11436 rc = bnxt_set_real_num_queues(bp);
11437 return rc;
11438 }
11439
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11440 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11441 {
11442 return bp->hw_resc.max_rsscos_ctxs;
11443 }
11444
bnxt_get_max_func_vnics(struct bnxt * bp)11445 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11446 {
11447 return bp->hw_resc.max_vnics;
11448 }
11449
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11450 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11451 {
11452 return bp->hw_resc.max_stat_ctxs;
11453 }
11454
bnxt_get_max_func_cp_rings(struct bnxt * bp)11455 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11456 {
11457 return bp->hw_resc.max_cp_rings;
11458 }
11459
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11460 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11461 {
11462 unsigned int cp = bp->hw_resc.max_cp_rings;
11463
11464 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11465 cp -= bnxt_get_ulp_msix_num(bp);
11466
11467 return cp;
11468 }
11469
bnxt_get_max_func_irqs(struct bnxt * bp)11470 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11471 {
11472 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11473
11474 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11475 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11476
11477 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11478 }
11479
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11480 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11481 {
11482 bp->hw_resc.max_irqs = max_irqs;
11483 }
11484
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11485 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11486 {
11487 unsigned int cp;
11488
11489 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11490 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11491 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11492 else
11493 return cp - bp->cp_nr_rings;
11494 }
11495
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11496 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11497 {
11498 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11499 }
11500
bnxt_get_avail_msix(struct bnxt * bp,int num)11501 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11502 {
11503 int max_irq = bnxt_get_max_func_irqs(bp);
11504 int total_req = bp->cp_nr_rings + num;
11505
11506 if (max_irq < total_req) {
11507 num = max_irq - bp->cp_nr_rings;
11508 if (num <= 0)
11509 return 0;
11510 }
11511 return num;
11512 }
11513
bnxt_get_num_msix(struct bnxt * bp)11514 static int bnxt_get_num_msix(struct bnxt *bp)
11515 {
11516 if (!BNXT_NEW_RM(bp))
11517 return bnxt_get_max_func_irqs(bp);
11518
11519 return bnxt_nq_rings_in_use(bp);
11520 }
11521
bnxt_init_int_mode(struct bnxt * bp)11522 static int bnxt_init_int_mode(struct bnxt *bp)
11523 {
11524 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11525
11526 total_vecs = bnxt_get_num_msix(bp);
11527 max = bnxt_get_max_func_irqs(bp);
11528 if (total_vecs > max)
11529 total_vecs = max;
11530
11531 if (!total_vecs)
11532 return 0;
11533
11534 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11535 min = 2;
11536
11537 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11538 PCI_IRQ_MSIX);
11539 ulp_msix = bnxt_get_ulp_msix_num(bp);
11540 if (total_vecs < 0 || total_vecs < ulp_msix) {
11541 rc = -ENODEV;
11542 goto msix_setup_exit;
11543 }
11544
11545 tbl_size = total_vecs;
11546 if (pci_msix_can_alloc_dyn(bp->pdev))
11547 tbl_size = max;
11548 bp->irq_tbl = kzalloc_objs(*bp->irq_tbl, tbl_size);
11549 if (bp->irq_tbl) {
11550 for (i = 0; i < total_vecs; i++)
11551 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11552
11553 bp->total_irqs = total_vecs;
11554 /* Trim rings based upon num of vectors allocated */
11555 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11556 total_vecs - ulp_msix, min == 1);
11557 if (rc)
11558 goto msix_setup_exit;
11559
11560 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11561 bp->cp_nr_rings = (min == 1) ?
11562 max_t(int, tx_cp, bp->rx_nr_rings) :
11563 tx_cp + bp->rx_nr_rings;
11564
11565 } else {
11566 rc = -ENOMEM;
11567 goto msix_setup_exit;
11568 }
11569 return 0;
11570
11571 msix_setup_exit:
11572 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11573 kfree(bp->irq_tbl);
11574 bp->irq_tbl = NULL;
11575 pci_free_irq_vectors(bp->pdev);
11576 return rc;
11577 }
11578
bnxt_clear_int_mode(struct bnxt * bp)11579 static void bnxt_clear_int_mode(struct bnxt *bp)
11580 {
11581 pci_free_irq_vectors(bp->pdev);
11582
11583 kfree(bp->irq_tbl);
11584 bp->irq_tbl = NULL;
11585 }
11586
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11587 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11588 {
11589 struct bnxt_en_dev *edev = bp->edev[BNXT_AUXDEV_RDMA];
11590 bool irq_cleared = false;
11591 bool irq_change = false;
11592 int tcs = bp->num_tc;
11593 int irqs_required;
11594 int rc;
11595
11596 if (!bnxt_need_reserve_rings(bp))
11597 return 0;
11598
11599 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(edev)) {
11600 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11601
11602 if (ulp_msix > bp->ulp_num_msix_want)
11603 ulp_msix = bp->ulp_num_msix_want;
11604 irqs_required = ulp_msix + bp->cp_nr_rings;
11605 } else {
11606 irqs_required = bnxt_get_num_msix(bp);
11607 }
11608
11609 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11610 irq_change = true;
11611 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11612 bnxt_ulp_irq_stop(bp);
11613 bnxt_clear_int_mode(bp);
11614 irq_cleared = true;
11615 }
11616 }
11617 rc = __bnxt_reserve_rings(bp);
11618 if (irq_cleared) {
11619 if (!rc)
11620 rc = bnxt_init_int_mode(bp);
11621 bnxt_ulp_irq_restart(bp, rc);
11622 } else if (irq_change && !rc) {
11623 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11624 rc = -ENOSPC;
11625 }
11626 if (rc) {
11627 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11628 return rc;
11629 }
11630 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11631 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11632 netdev_err(bp->dev, "tx ring reservation failure\n");
11633 netdev_reset_tc(bp->dev);
11634 bp->num_tc = 0;
11635 if (bp->tx_nr_rings_xdp)
11636 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11637 else
11638 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11639 return -ENOMEM;
11640 }
11641 return 0;
11642 }
11643
bnxt_tx_queue_stop(struct bnxt * bp,int idx)11644 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11645 {
11646 struct bnxt_tx_ring_info *txr;
11647 struct netdev_queue *txq;
11648 struct bnxt_napi *bnapi;
11649 int i;
11650
11651 bnapi = bp->bnapi[idx];
11652 bnxt_for_each_napi_tx(i, bnapi, txr) {
11653 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11654 synchronize_net();
11655
11656 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11657 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11658 if (txq) {
11659 __netif_tx_lock_bh(txq);
11660 netif_tx_stop_queue(txq);
11661 __netif_tx_unlock_bh(txq);
11662 }
11663 }
11664
11665 if (!bp->tph_mode)
11666 continue;
11667
11668 bnxt_hwrm_tx_ring_free(bp, txr, true);
11669 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11670 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11671 bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11672 }
11673 }
11674
bnxt_tx_queue_start(struct bnxt * bp,int idx)11675 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11676 {
11677 struct bnxt_tx_ring_info *txr;
11678 struct netdev_queue *txq;
11679 struct bnxt_napi *bnapi;
11680 int rc, i;
11681
11682 bnapi = bp->bnapi[idx];
11683 /* All rings have been reserved and previously allocated.
11684 * Reallocating with the same parameters should never fail.
11685 */
11686 bnxt_for_each_napi_tx(i, bnapi, txr) {
11687 if (!bp->tph_mode)
11688 goto start_tx;
11689
11690 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11691 if (rc)
11692 return rc;
11693
11694 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11695 if (rc)
11696 return rc;
11697
11698 txr->tx_prod = 0;
11699 txr->tx_cons = 0;
11700 txr->tx_hw_cons = 0;
11701 start_tx:
11702 WRITE_ONCE(txr->dev_state, 0);
11703 synchronize_net();
11704
11705 if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11706 continue;
11707
11708 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11709 if (txq)
11710 netif_tx_start_queue(txq);
11711 }
11712
11713 return 0;
11714 }
11715
bnxt_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)11716 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11717 const cpumask_t *mask)
11718 {
11719 struct bnxt_irq *irq;
11720 u16 tag;
11721 int err;
11722
11723 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11724
11725 if (!irq->bp->tph_mode)
11726 return;
11727
11728 cpumask_copy(irq->cpu_mask, mask);
11729
11730 if (irq->ring_nr >= irq->bp->rx_nr_rings)
11731 return;
11732
11733 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11734 cpumask_first(irq->cpu_mask), &tag))
11735 return;
11736
11737 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11738 return;
11739
11740 netdev_lock(irq->bp->dev);
11741 if (netif_running(irq->bp->dev)) {
11742 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11743 if (err)
11744 netdev_err(irq->bp->dev,
11745 "RX queue restart failed: err=%d\n", err);
11746 }
11747 netdev_unlock(irq->bp->dev);
11748 }
11749
bnxt_irq_affinity_release(struct kref * ref)11750 static void bnxt_irq_affinity_release(struct kref *ref)
11751 {
11752 struct irq_affinity_notify *notify =
11753 container_of(ref, struct irq_affinity_notify, kref);
11754 struct bnxt_irq *irq;
11755
11756 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11757
11758 if (!irq->bp->tph_mode)
11759 return;
11760
11761 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11762 netdev_err(irq->bp->dev,
11763 "Setting ST=0 for MSIX entry %d failed\n",
11764 irq->msix_nr);
11765 return;
11766 }
11767 }
11768
bnxt_release_irq_notifier(struct bnxt_irq * irq)11769 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11770 {
11771 irq_set_affinity_notifier(irq->vector, NULL);
11772 }
11773
bnxt_register_irq_notifier(struct bnxt * bp,struct bnxt_irq * irq)11774 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11775 {
11776 struct irq_affinity_notify *notify;
11777
11778 irq->bp = bp;
11779
11780 /* Nothing to do if TPH is not enabled */
11781 if (!bp->tph_mode)
11782 return;
11783
11784 /* Register IRQ affinity notifier */
11785 notify = &irq->affinity_notify;
11786 notify->irq = irq->vector;
11787 notify->notify = bnxt_irq_affinity_notify;
11788 notify->release = bnxt_irq_affinity_release;
11789
11790 irq_set_affinity_notifier(irq->vector, notify);
11791 }
11792
bnxt_free_irq(struct bnxt * bp)11793 static void bnxt_free_irq(struct bnxt *bp)
11794 {
11795 struct bnxt_irq *irq;
11796 int i;
11797
11798 #ifdef CONFIG_RFS_ACCEL
11799 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11800 bp->dev->rx_cpu_rmap = NULL;
11801 #endif
11802 if (!bp->irq_tbl || !bp->bnapi)
11803 return;
11804
11805 for (i = 0; i < bp->cp_nr_rings; i++) {
11806 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11807
11808 irq = &bp->irq_tbl[map_idx];
11809 if (irq->requested) {
11810 if (irq->have_cpumask) {
11811 irq_update_affinity_hint(irq->vector, NULL);
11812 free_cpumask_var(irq->cpu_mask);
11813 irq->have_cpumask = 0;
11814 }
11815
11816 bnxt_release_irq_notifier(irq);
11817
11818 free_irq(irq->vector, bp->bnapi[i]);
11819 }
11820
11821 irq->requested = 0;
11822 }
11823
11824 /* Disable TPH support */
11825 pcie_disable_tph(bp->pdev);
11826 bp->tph_mode = 0;
11827 }
11828
bnxt_request_irq(struct bnxt * bp)11829 static int bnxt_request_irq(struct bnxt *bp)
11830 {
11831 struct cpu_rmap *rmap = NULL;
11832 int i, j, rc = 0;
11833 unsigned long flags = 0;
11834
11835 rc = bnxt_setup_int_mode(bp);
11836 if (rc) {
11837 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11838 rc);
11839 return rc;
11840 }
11841 #ifdef CONFIG_RFS_ACCEL
11842 rmap = bp->dev->rx_cpu_rmap;
11843 #endif
11844
11845 /* Enable TPH support as part of IRQ request */
11846 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11847 if (!rc)
11848 bp->tph_mode = PCI_TPH_ST_IV_MODE;
11849
11850 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11851 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11852 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11853
11854 if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11855 rmap && bp->bnapi[i]->rx_ring) {
11856 rc = irq_cpu_rmap_add(rmap, irq->vector);
11857 if (rc)
11858 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11859 j);
11860 j++;
11861 }
11862
11863 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11864 bp->bnapi[i]);
11865 if (rc)
11866 break;
11867
11868 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11869 irq->requested = 1;
11870
11871 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11872 int numa_node = dev_to_node(&bp->pdev->dev);
11873 u16 tag;
11874
11875 irq->have_cpumask = 1;
11876 irq->msix_nr = map_idx;
11877 irq->ring_nr = i;
11878 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11879 irq->cpu_mask);
11880 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11881 if (rc) {
11882 netdev_warn(bp->dev,
11883 "Update affinity hint failed, IRQ = %d\n",
11884 irq->vector);
11885 break;
11886 }
11887
11888 bnxt_register_irq_notifier(bp, irq);
11889
11890 /* Init ST table entry */
11891 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11892 cpumask_first(irq->cpu_mask),
11893 &tag))
11894 continue;
11895
11896 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11897 }
11898 }
11899 return rc;
11900 }
11901
bnxt_del_napi(struct bnxt * bp)11902 static void bnxt_del_napi(struct bnxt *bp)
11903 {
11904 int i;
11905
11906 if (!bp->bnapi)
11907 return;
11908
11909 for (i = 0; i < bp->rx_nr_rings; i++)
11910 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11911 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11912 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11913
11914 for (i = 0; i < bp->cp_nr_rings; i++) {
11915 struct bnxt_napi *bnapi = bp->bnapi[i];
11916
11917 __netif_napi_del_locked(&bnapi->napi);
11918 }
11919 /* We called __netif_napi_del_locked(), we need
11920 * to respect an RCU grace period before freeing napi structures.
11921 */
11922 synchronize_net();
11923 }
11924
bnxt_init_napi(struct bnxt * bp)11925 static void bnxt_init_napi(struct bnxt *bp)
11926 {
11927 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11928 unsigned int cp_nr_rings = bp->cp_nr_rings;
11929 struct bnxt_napi *bnapi;
11930 int i;
11931
11932 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11933 poll_fn = bnxt_poll_p5;
11934 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11935 cp_nr_rings--;
11936
11937 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11938
11939 for (i = 0; i < cp_nr_rings; i++) {
11940 bnapi = bp->bnapi[i];
11941 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11942 bnapi->index);
11943 }
11944 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11945 bnapi = bp->bnapi[cp_nr_rings];
11946 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11947 }
11948 }
11949
bnxt_disable_napi(struct bnxt * bp)11950 static void bnxt_disable_napi(struct bnxt *bp)
11951 {
11952 int i;
11953
11954 if (!bp->bnapi ||
11955 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11956 return;
11957
11958 for (i = 0; i < bp->cp_nr_rings; i++) {
11959 struct bnxt_napi *bnapi = bp->bnapi[i];
11960 struct bnxt_cp_ring_info *cpr;
11961
11962 cpr = &bnapi->cp_ring;
11963 if (bnapi->tx_fault)
11964 cpr->sw_stats->tx.tx_resets++;
11965 if (bnapi->in_reset)
11966 cpr->sw_stats->rx.rx_resets++;
11967 napi_disable_locked(&bnapi->napi);
11968 }
11969 }
11970
bnxt_enable_napi(struct bnxt * bp)11971 static void bnxt_enable_napi(struct bnxt *bp)
11972 {
11973 int i;
11974
11975 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11976 for (i = 0; i < bp->cp_nr_rings; i++) {
11977 struct bnxt_napi *bnapi = bp->bnapi[i];
11978 struct bnxt_cp_ring_info *cpr;
11979
11980 bnapi->tx_fault = 0;
11981
11982 cpr = &bnapi->cp_ring;
11983 bnapi->in_reset = false;
11984
11985 if (bnapi->rx_ring) {
11986 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11987 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11988 }
11989 napi_enable_locked(&bnapi->napi);
11990 }
11991 }
11992
bnxt_tx_disable(struct bnxt * bp)11993 void bnxt_tx_disable(struct bnxt *bp)
11994 {
11995 int i;
11996 struct bnxt_tx_ring_info *txr;
11997
11998 if (bp->tx_ring) {
11999 for (i = 0; i < bp->tx_nr_rings; i++) {
12000 txr = &bp->tx_ring[i];
12001 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
12002 }
12003 }
12004 /* Make sure napi polls see @dev_state change */
12005 synchronize_net();
12006 /* Drop carrier first to prevent TX timeout */
12007 netif_carrier_off(bp->dev);
12008 /* Stop all TX queues */
12009 netif_tx_disable(bp->dev);
12010 }
12011
bnxt_tx_enable(struct bnxt * bp)12012 void bnxt_tx_enable(struct bnxt *bp)
12013 {
12014 int i;
12015 struct bnxt_tx_ring_info *txr;
12016
12017 for (i = 0; i < bp->tx_nr_rings; i++) {
12018 txr = &bp->tx_ring[i];
12019 WRITE_ONCE(txr->dev_state, 0);
12020 }
12021 /* Make sure napi polls see @dev_state change */
12022 synchronize_net();
12023 netif_tx_wake_all_queues(bp->dev);
12024 if (BNXT_LINK_IS_UP(bp))
12025 netif_carrier_on(bp->dev);
12026 }
12027
bnxt_report_fec(struct bnxt_link_info * link_info)12028 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
12029 {
12030 u8 active_fec = link_info->active_fec_sig_mode &
12031 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
12032
12033 switch (active_fec) {
12034 default:
12035 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
12036 return "None";
12037 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
12038 return "Clause 74 BaseR";
12039 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
12040 return "Clause 91 RS(528,514)";
12041 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
12042 return "Clause 91 RS544_1XN";
12043 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
12044 return "Clause 91 RS(544,514)";
12045 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
12046 return "Clause 91 RS272_1XN";
12047 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
12048 return "Clause 91 RS(272,257)";
12049 }
12050 }
12051
bnxt_link_down_reason(struct bnxt_link_info * link_info)12052 static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
12053 {
12054 u8 reason = link_info->link_down_reason;
12055
12056 /* Multiple bits can be set, we report 1 bit only in order of
12057 * priority.
12058 */
12059 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
12060 return "(Remote fault)";
12061 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
12062 return "(OTP Speed limit violation)";
12063 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
12064 return "(Cable removed)";
12065 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
12066 return "(Module fault)";
12067 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
12068 return "(BMC request down)";
12069 return "";
12070 }
12071
bnxt_report_link(struct bnxt * bp)12072 void bnxt_report_link(struct bnxt *bp)
12073 {
12074 if (BNXT_LINK_IS_UP(bp)) {
12075 const char *signal = "";
12076 const char *flow_ctrl;
12077 const char *duplex;
12078 u32 speed;
12079 u16 fec;
12080
12081 netif_carrier_on(bp->dev);
12082 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
12083 if (speed == SPEED_UNKNOWN) {
12084 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
12085 return;
12086 }
12087 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
12088 duplex = "full";
12089 else
12090 duplex = "half";
12091 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
12092 flow_ctrl = "ON - receive & transmit";
12093 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
12094 flow_ctrl = "ON - transmit";
12095 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
12096 flow_ctrl = "ON - receive";
12097 else
12098 flow_ctrl = "none";
12099 if (bp->link_info.phy_qcfg_resp.option_flags &
12100 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
12101 u8 sig_mode = bp->link_info.active_fec_sig_mode &
12102 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
12103 switch (sig_mode) {
12104 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
12105 signal = "(NRZ) ";
12106 break;
12107 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
12108 signal = "(PAM4 56Gbps) ";
12109 break;
12110 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
12111 signal = "(PAM4 112Gbps) ";
12112 break;
12113 default:
12114 break;
12115 }
12116 }
12117 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
12118 speed, signal, duplex, flow_ctrl);
12119 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
12120 netdev_info(bp->dev, "EEE is %s\n",
12121 bp->eee.eee_active ? "active" :
12122 "not active");
12123 fec = bp->link_info.fec_cfg;
12124 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
12125 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
12126 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
12127 bnxt_report_fec(&bp->link_info));
12128 } else {
12129 char *str = bnxt_link_down_reason(&bp->link_info);
12130
12131 netif_carrier_off(bp->dev);
12132 netdev_err(bp->dev, "NIC Link is Down %s\n", str);
12133 }
12134 }
12135
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)12136 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
12137 {
12138 if (!resp->supported_speeds_auto_mode &&
12139 !resp->supported_speeds_force_mode &&
12140 !resp->supported_pam4_speeds_auto_mode &&
12141 !resp->supported_pam4_speeds_force_mode &&
12142 !resp->supported_speeds2_auto_mode &&
12143 !resp->supported_speeds2_force_mode)
12144 return true;
12145 return false;
12146 }
12147
bnxt_hwrm_phy_qcaps(struct bnxt * bp)12148 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
12149 {
12150 struct bnxt_link_info *link_info = &bp->link_info;
12151 struct hwrm_port_phy_qcaps_output *resp;
12152 struct hwrm_port_phy_qcaps_input *req;
12153 int rc = 0;
12154
12155 if (bp->hwrm_spec_code < 0x10201)
12156 return 0;
12157
12158 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
12159 if (rc)
12160 return rc;
12161
12162 resp = hwrm_req_hold(bp, req);
12163 rc = hwrm_req_send(bp, req);
12164 if (rc)
12165 goto hwrm_phy_qcaps_exit;
12166
12167 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
12168 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
12169 struct ethtool_keee *eee = &bp->eee;
12170 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
12171
12172 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
12173 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
12174 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
12175 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
12176 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
12177 }
12178
12179 if (bp->hwrm_spec_code >= 0x10a01) {
12180 if (bnxt_phy_qcaps_no_speed(resp)) {
12181 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
12182 netdev_warn(bp->dev, "Ethernet link disabled\n");
12183 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
12184 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
12185 netdev_info(bp->dev, "Ethernet link enabled\n");
12186 /* Phy re-enabled, reprobe the speeds */
12187 link_info->support_auto_speeds = 0;
12188 link_info->support_pam4_auto_speeds = 0;
12189 link_info->support_auto_speeds2 = 0;
12190 }
12191 }
12192 if (resp->supported_speeds_auto_mode)
12193 link_info->support_auto_speeds =
12194 le16_to_cpu(resp->supported_speeds_auto_mode);
12195 if (resp->supported_pam4_speeds_auto_mode)
12196 link_info->support_pam4_auto_speeds =
12197 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12198 if (resp->supported_speeds2_auto_mode)
12199 link_info->support_auto_speeds2 =
12200 le16_to_cpu(resp->supported_speeds2_auto_mode);
12201
12202 bp->port_count = resp->port_cnt;
12203
12204 hwrm_phy_qcaps_exit:
12205 hwrm_req_drop(bp, req);
12206 return rc;
12207 }
12208
bnxt_hwrm_mac_qcaps(struct bnxt * bp)12209 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12210 {
12211 struct hwrm_port_mac_qcaps_output *resp;
12212 struct hwrm_port_mac_qcaps_input *req;
12213 int rc;
12214
12215 if (bp->hwrm_spec_code < 0x10a03)
12216 return;
12217
12218 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12219 if (rc)
12220 return;
12221
12222 resp = hwrm_req_hold(bp, req);
12223 rc = hwrm_req_send_silent(bp, req);
12224 if (!rc)
12225 bp->mac_flags = resp->flags;
12226 hwrm_req_drop(bp, req);
12227 }
12228
bnxt_support_dropped(u16 advertising,u16 supported)12229 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12230 {
12231 u16 diff = advertising ^ supported;
12232
12233 return ((supported | diff) != supported);
12234 }
12235
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)12236 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12237 {
12238 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12239
12240 /* Check if any advertised speeds are no longer supported. The caller
12241 * holds the link_lock mutex, so we can modify link_info settings.
12242 */
12243 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12244 if (bnxt_support_dropped(link_info->advertising,
12245 link_info->support_auto_speeds2)) {
12246 link_info->advertising = link_info->support_auto_speeds2;
12247 return true;
12248 }
12249 return false;
12250 }
12251 if (bnxt_support_dropped(link_info->advertising,
12252 link_info->support_auto_speeds)) {
12253 link_info->advertising = link_info->support_auto_speeds;
12254 return true;
12255 }
12256 if (bnxt_support_dropped(link_info->advertising_pam4,
12257 link_info->support_pam4_auto_speeds)) {
12258 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12259 return true;
12260 }
12261 return false;
12262 }
12263
bnxt_update_link(struct bnxt * bp,bool chng_link_state)12264 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12265 {
12266 struct bnxt_link_info *link_info = &bp->link_info;
12267 struct hwrm_port_phy_qcfg_output *resp;
12268 struct hwrm_port_phy_qcfg_input *req;
12269 u8 link_state = link_info->link_state;
12270 bool support_changed;
12271 int rc;
12272
12273 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12274 if (rc)
12275 return rc;
12276
12277 resp = hwrm_req_hold(bp, req);
12278 rc = hwrm_req_send(bp, req);
12279 if (rc) {
12280 hwrm_req_drop(bp, req);
12281 if (BNXT_VF(bp) && rc == -ENODEV) {
12282 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12283 rc = 0;
12284 }
12285 return rc;
12286 }
12287
12288 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12289 link_info->phy_link_status = resp->link;
12290 link_info->duplex = resp->duplex_cfg;
12291 if (bp->hwrm_spec_code >= 0x10800)
12292 link_info->duplex = resp->duplex_state;
12293 link_info->pause = resp->pause;
12294 link_info->auto_mode = resp->auto_mode;
12295 link_info->auto_pause_setting = resp->auto_pause;
12296 link_info->lp_pause = resp->link_partner_adv_pause;
12297 link_info->force_pause_setting = resp->force_pause;
12298 link_info->duplex_setting = resp->duplex_cfg;
12299 if (link_info->phy_link_status == BNXT_LINK_LINK) {
12300 link_info->link_speed = le16_to_cpu(resp->link_speed);
12301 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12302 link_info->active_lanes = resp->active_lanes;
12303 } else {
12304 link_info->link_speed = 0;
12305 link_info->active_lanes = 0;
12306 }
12307 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12308 link_info->force_pam4_link_speed =
12309 le16_to_cpu(resp->force_pam4_link_speed);
12310 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12311 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12312 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12313 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12314 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12315 link_info->auto_pam4_link_speeds =
12316 le16_to_cpu(resp->auto_pam4_link_speed_mask);
12317 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12318 link_info->lp_auto_link_speeds =
12319 le16_to_cpu(resp->link_partner_adv_speeds);
12320 link_info->lp_auto_pam4_link_speeds =
12321 resp->link_partner_pam4_adv_speeds;
12322 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12323 link_info->phy_ver[0] = resp->phy_maj;
12324 link_info->phy_ver[1] = resp->phy_min;
12325 link_info->phy_ver[2] = resp->phy_bld;
12326 link_info->media_type = resp->media_type;
12327 link_info->phy_type = resp->phy_type;
12328 link_info->transceiver = resp->xcvr_pkg_type;
12329 link_info->phy_addr = resp->eee_config_phy_addr &
12330 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12331 link_info->module_status = resp->module_status;
12332 link_info->link_down_reason = resp->link_down_reason;
12333
12334 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12335 struct ethtool_keee *eee = &bp->eee;
12336 u16 fw_speeds;
12337
12338 eee->eee_active = 0;
12339 if (resp->eee_config_phy_addr &
12340 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12341 eee->eee_active = 1;
12342 fw_speeds = le16_to_cpu(
12343 resp->link_partner_adv_eee_link_speed_mask);
12344 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12345 }
12346
12347 /* Pull initial EEE config */
12348 if (!chng_link_state) {
12349 if (resp->eee_config_phy_addr &
12350 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12351 eee->eee_enabled = 1;
12352
12353 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12354 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12355
12356 if (resp->eee_config_phy_addr &
12357 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12358 __le32 tmr;
12359
12360 eee->tx_lpi_enabled = 1;
12361 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12362 eee->tx_lpi_timer = le32_to_cpu(tmr) &
12363 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12364 }
12365 }
12366 }
12367
12368 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12369 if (bp->hwrm_spec_code >= 0x10504) {
12370 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12371 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12372 }
12373 /* TODO: need to add more logic to report VF link */
12374 if (chng_link_state) {
12375 if (link_info->phy_link_status == BNXT_LINK_LINK)
12376 link_info->link_state = BNXT_LINK_STATE_UP;
12377 else
12378 link_info->link_state = BNXT_LINK_STATE_DOWN;
12379 if (link_state != link_info->link_state)
12380 bnxt_report_link(bp);
12381 } else {
12382 /* always link down if not require to update link state */
12383 link_info->link_state = BNXT_LINK_STATE_DOWN;
12384 }
12385 hwrm_req_drop(bp, req);
12386
12387 if (!BNXT_PHY_CFG_ABLE(bp))
12388 return 0;
12389
12390 support_changed = bnxt_support_speed_dropped(link_info);
12391 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12392 bnxt_hwrm_set_link_setting(bp, true, false);
12393 return 0;
12394 }
12395
bnxt_get_port_module_status(struct bnxt * bp)12396 static void bnxt_get_port_module_status(struct bnxt *bp)
12397 {
12398 struct bnxt_link_info *link_info = &bp->link_info;
12399 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12400 u8 module_status;
12401
12402 if (bnxt_update_link(bp, true))
12403 return;
12404
12405 module_status = link_info->module_status;
12406 switch (module_status) {
12407 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12408 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12409 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12410 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12411 bp->pf.port_id);
12412 if (bp->hwrm_spec_code >= 0x10201) {
12413 netdev_warn(bp->dev, "Module part number %s\n",
12414 resp->phy_vendor_partnumber);
12415 }
12416 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12417 netdev_warn(bp->dev, "TX is disabled\n");
12418 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12419 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12420 }
12421 }
12422
12423 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12424 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12425 {
12426 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12427 if (bp->hwrm_spec_code >= 0x10201)
12428 req->auto_pause =
12429 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12430 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12431 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12432 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12433 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12434 req->enables |=
12435 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12436 } else {
12437 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12438 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12439 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12440 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12441 req->enables |=
12442 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12443 if (bp->hwrm_spec_code >= 0x10201) {
12444 req->auto_pause = req->force_pause;
12445 req->enables |= cpu_to_le32(
12446 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12447 }
12448 }
12449 }
12450
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12451 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12452 {
12453 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12454 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12455 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12456 req->enables |=
12457 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12458 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12459 } else if (bp->link_info.advertising) {
12460 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12461 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12462 }
12463 if (bp->link_info.advertising_pam4) {
12464 req->enables |=
12465 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12466 req->auto_link_pam4_speed_mask =
12467 cpu_to_le16(bp->link_info.advertising_pam4);
12468 }
12469 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12470 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12471 } else {
12472 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12473 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12474 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12475 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12476 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12477 (u32)bp->link_info.req_link_speed);
12478 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12479 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12480 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12481 } else {
12482 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12483 }
12484 }
12485
12486 /* tell chimp that the setting takes effect immediately */
12487 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12488 }
12489
bnxt_hwrm_set_pause(struct bnxt * bp)12490 int bnxt_hwrm_set_pause(struct bnxt *bp)
12491 {
12492 struct hwrm_port_phy_cfg_input *req;
12493 int rc;
12494
12495 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12496 if (rc)
12497 return rc;
12498
12499 bnxt_hwrm_set_pause_common(bp, req);
12500
12501 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12502 bp->link_info.force_link_chng)
12503 bnxt_hwrm_set_link_common(bp, req);
12504
12505 rc = hwrm_req_send(bp, req);
12506 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12507 /* since changing of pause setting doesn't trigger any link
12508 * change event, the driver needs to update the current pause
12509 * result upon successfully return of the phy_cfg command
12510 */
12511 bp->link_info.pause =
12512 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12513 bp->link_info.auto_pause_setting = 0;
12514 if (!bp->link_info.force_link_chng)
12515 bnxt_report_link(bp);
12516 }
12517 bp->link_info.force_link_chng = false;
12518 return rc;
12519 }
12520
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12521 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12522 struct hwrm_port_phy_cfg_input *req)
12523 {
12524 struct ethtool_keee *eee = &bp->eee;
12525
12526 if (eee->eee_enabled) {
12527 u16 eee_speeds;
12528 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12529
12530 if (eee->tx_lpi_enabled)
12531 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12532 else
12533 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12534
12535 req->flags |= cpu_to_le32(flags);
12536 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12537 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12538 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12539 } else {
12540 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12541 }
12542 }
12543
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)12544 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12545 {
12546 struct hwrm_port_phy_cfg_input *req;
12547 int rc;
12548
12549 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12550 if (rc)
12551 return rc;
12552
12553 if (set_pause)
12554 bnxt_hwrm_set_pause_common(bp, req);
12555
12556 bnxt_hwrm_set_link_common(bp, req);
12557
12558 if (set_eee)
12559 bnxt_hwrm_set_eee(bp, req);
12560 return hwrm_req_send(bp, req);
12561 }
12562
bnxt_hwrm_shutdown_link(struct bnxt * bp)12563 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12564 {
12565 struct hwrm_port_phy_cfg_input *req;
12566 int rc;
12567
12568 if (!BNXT_SINGLE_PF(bp))
12569 return 0;
12570
12571 if (pci_num_vf(bp->pdev) &&
12572 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12573 return 0;
12574
12575 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12576 if (rc)
12577 return rc;
12578
12579 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12580 rc = hwrm_req_send(bp, req);
12581 if (!rc) {
12582 mutex_lock(&bp->link_lock);
12583 /* Device is not obliged link down in certain scenarios, even
12584 * when forced. Setting the state unknown is consistent with
12585 * driver startup and will force link state to be reported
12586 * during subsequent open based on PORT_PHY_QCFG.
12587 */
12588 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12589 mutex_unlock(&bp->link_lock);
12590 }
12591 return rc;
12592 }
12593
bnxt_fw_reset_via_optee(struct bnxt * bp)12594 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12595 {
12596 #ifdef CONFIG_TEE_BNXT_FW
12597 int rc = tee_bnxt_fw_load();
12598
12599 if (rc)
12600 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12601
12602 return rc;
12603 #else
12604 netdev_err(bp->dev, "OP-TEE not supported\n");
12605 return -ENODEV;
12606 #endif
12607 }
12608
bnxt_try_recover_fw(struct bnxt * bp)12609 static int bnxt_try_recover_fw(struct bnxt *bp)
12610 {
12611 if (bp->fw_health && bp->fw_health->status_reliable) {
12612 int retry = 0, rc;
12613 u32 sts;
12614
12615 do {
12616 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12617 rc = bnxt_hwrm_poll(bp);
12618 if (!BNXT_FW_IS_BOOTING(sts) &&
12619 !BNXT_FW_IS_RECOVERING(sts))
12620 break;
12621 retry++;
12622 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12623
12624 if (!BNXT_FW_IS_HEALTHY(sts)) {
12625 netdev_err(bp->dev,
12626 "Firmware not responding, status: 0x%x\n",
12627 sts);
12628 rc = -ENODEV;
12629 }
12630 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12631 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12632 return bnxt_fw_reset_via_optee(bp);
12633 }
12634 return rc;
12635 }
12636
12637 return -ENODEV;
12638 }
12639
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12640 void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12641 {
12642 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12643
12644 if (!BNXT_NEW_RM(bp))
12645 return; /* no resource reservations required */
12646
12647 hw_resc->resv_cp_rings = 0;
12648 hw_resc->resv_stat_ctxs = 0;
12649 hw_resc->resv_irqs = 0;
12650 hw_resc->resv_tx_rings = 0;
12651 hw_resc->resv_rx_rings = 0;
12652 hw_resc->resv_hw_ring_grps = 0;
12653 hw_resc->resv_vnics = 0;
12654 hw_resc->resv_rsscos_ctxs = 0;
12655 if (!fw_reset) {
12656 bp->tx_nr_rings = 0;
12657 bp->rx_nr_rings = 0;
12658 }
12659 }
12660
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12661 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12662 {
12663 int rc;
12664
12665 if (!BNXT_NEW_RM(bp))
12666 return 0; /* no resource reservations required */
12667
12668 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12669 if (rc)
12670 netdev_err(bp->dev, "resc_qcaps failed\n");
12671
12672 bnxt_clear_reservations(bp, fw_reset);
12673
12674 return rc;
12675 }
12676
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12677 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12678 {
12679 struct hwrm_func_drv_if_change_output *resp;
12680 struct hwrm_func_drv_if_change_input *req;
12681 bool resc_reinit = false;
12682 bool caps_change = false;
12683 int rc, retry = 0;
12684 bool fw_reset;
12685 u32 flags = 0;
12686
12687 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12688 bp->fw_reset_state = 0;
12689
12690 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12691 return 0;
12692
12693 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12694 if (rc)
12695 return rc;
12696
12697 if (up)
12698 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12699 resp = hwrm_req_hold(bp, req);
12700
12701 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12702 while (retry < BNXT_FW_IF_RETRY) {
12703 rc = hwrm_req_send(bp, req);
12704 if (rc != -EAGAIN)
12705 break;
12706
12707 msleep(50);
12708 retry++;
12709 }
12710
12711 if (rc == -EAGAIN) {
12712 hwrm_req_drop(bp, req);
12713 return rc;
12714 } else if (!rc) {
12715 flags = le32_to_cpu(resp->flags);
12716 } else if (up) {
12717 rc = bnxt_try_recover_fw(bp);
12718 fw_reset = true;
12719 }
12720 hwrm_req_drop(bp, req);
12721 if (rc)
12722 return rc;
12723
12724 if (!up) {
12725 bnxt_inv_fw_health_reg(bp);
12726 return 0;
12727 }
12728
12729 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12730 resc_reinit = true;
12731 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12732 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12733 fw_reset = true;
12734 else
12735 bnxt_remap_fw_health_regs(bp);
12736
12737 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12738 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12739 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12740 return -ENODEV;
12741 }
12742 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12743 caps_change = true;
12744
12745 if (resc_reinit || fw_reset || caps_change) {
12746 if (fw_reset || caps_change) {
12747 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12748 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12749 bnxt_ulp_irq_stop(bp);
12750 bnxt_free_ctx_mem(bp, false);
12751 bnxt_dcb_free(bp);
12752 rc = bnxt_fw_init_one(bp);
12753 if (rc) {
12754 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12755 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12756 return rc;
12757 }
12758 /* IRQ will be initialized later in bnxt_request_irq()*/
12759 bnxt_clear_int_mode(bp);
12760 }
12761 rc = bnxt_cancel_reservations(bp, fw_reset);
12762 }
12763 return rc;
12764 }
12765
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12766 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12767 {
12768 struct hwrm_port_led_qcaps_output *resp;
12769 struct hwrm_port_led_qcaps_input *req;
12770 struct bnxt_pf_info *pf = &bp->pf;
12771 int rc;
12772
12773 bp->num_leds = 0;
12774 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12775 return 0;
12776
12777 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12778 if (rc)
12779 return rc;
12780
12781 req->port_id = cpu_to_le16(pf->port_id);
12782 resp = hwrm_req_hold(bp, req);
12783 rc = hwrm_req_send(bp, req);
12784 if (rc) {
12785 hwrm_req_drop(bp, req);
12786 return rc;
12787 }
12788 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12789 int i;
12790
12791 bp->num_leds = resp->num_leds;
12792 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12793 bp->num_leds);
12794 for (i = 0; i < bp->num_leds; i++) {
12795 struct bnxt_led_info *led = &bp->leds[i];
12796 __le16 caps = led->led_state_caps;
12797
12798 if (!led->led_group_id ||
12799 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12800 bp->num_leds = 0;
12801 break;
12802 }
12803 }
12804 }
12805 hwrm_req_drop(bp, req);
12806 return 0;
12807 }
12808
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12809 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12810 {
12811 struct hwrm_wol_filter_alloc_output *resp;
12812 struct hwrm_wol_filter_alloc_input *req;
12813 int rc;
12814
12815 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12816 if (rc)
12817 return rc;
12818
12819 req->port_id = cpu_to_le16(bp->pf.port_id);
12820 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12821 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12822 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12823
12824 resp = hwrm_req_hold(bp, req);
12825 rc = hwrm_req_send(bp, req);
12826 if (!rc)
12827 bp->wol_filter_id = resp->wol_filter_id;
12828 hwrm_req_drop(bp, req);
12829 return rc;
12830 }
12831
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12832 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12833 {
12834 struct hwrm_wol_filter_free_input *req;
12835 int rc;
12836
12837 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12838 if (rc)
12839 return rc;
12840
12841 req->port_id = cpu_to_le16(bp->pf.port_id);
12842 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12843 req->wol_filter_id = bp->wol_filter_id;
12844
12845 return hwrm_req_send(bp, req);
12846 }
12847
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12848 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12849 {
12850 struct hwrm_wol_filter_qcfg_output *resp;
12851 struct hwrm_wol_filter_qcfg_input *req;
12852 u16 next_handle = 0;
12853 int rc;
12854
12855 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12856 if (rc)
12857 return rc;
12858
12859 req->port_id = cpu_to_le16(bp->pf.port_id);
12860 req->handle = cpu_to_le16(handle);
12861 resp = hwrm_req_hold(bp, req);
12862 rc = hwrm_req_send(bp, req);
12863 if (!rc) {
12864 next_handle = le16_to_cpu(resp->next_handle);
12865 if (next_handle != 0) {
12866 if (resp->wol_type ==
12867 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12868 bp->wol = 1;
12869 bp->wol_filter_id = resp->wol_filter_id;
12870 }
12871 }
12872 }
12873 hwrm_req_drop(bp, req);
12874 return next_handle;
12875 }
12876
bnxt_get_wol_settings(struct bnxt * bp)12877 static void bnxt_get_wol_settings(struct bnxt *bp)
12878 {
12879 u16 handle = 0;
12880
12881 bp->wol = 0;
12882 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12883 return;
12884
12885 do {
12886 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12887 } while (handle && handle != 0xffff);
12888 }
12889
bnxt_eee_config_ok(struct bnxt * bp)12890 static bool bnxt_eee_config_ok(struct bnxt *bp)
12891 {
12892 struct ethtool_keee *eee = &bp->eee;
12893 struct bnxt_link_info *link_info = &bp->link_info;
12894
12895 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12896 return true;
12897
12898 if (eee->eee_enabled) {
12899 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12900 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12901
12902 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12903
12904 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12905 eee->eee_enabled = 0;
12906 return false;
12907 }
12908 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12909 linkmode_and(eee->advertised, advertising,
12910 eee->supported);
12911 return false;
12912 }
12913 }
12914 return true;
12915 }
12916
bnxt_update_phy_setting(struct bnxt * bp)12917 static int bnxt_update_phy_setting(struct bnxt *bp)
12918 {
12919 int rc;
12920 bool update_link = false;
12921 bool update_pause = false;
12922 bool update_eee = false;
12923 struct bnxt_link_info *link_info = &bp->link_info;
12924
12925 rc = bnxt_update_link(bp, true);
12926 if (rc) {
12927 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12928 rc);
12929 return rc;
12930 }
12931 if (!BNXT_SINGLE_PF(bp))
12932 return 0;
12933
12934 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12935 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12936 link_info->req_flow_ctrl)
12937 update_pause = true;
12938 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12939 link_info->force_pause_setting != link_info->req_flow_ctrl)
12940 update_pause = true;
12941 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12942 if (BNXT_AUTO_MODE(link_info->auto_mode))
12943 update_link = true;
12944 if (bnxt_force_speed_updated(link_info))
12945 update_link = true;
12946 if (link_info->req_duplex != link_info->duplex_setting)
12947 update_link = true;
12948 } else {
12949 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12950 update_link = true;
12951 if (bnxt_auto_speed_updated(link_info))
12952 update_link = true;
12953 }
12954
12955 /* The last close may have shutdown the link, so need to call
12956 * PHY_CFG to bring it back up.
12957 */
12958 if (!BNXT_LINK_IS_UP(bp))
12959 update_link = true;
12960
12961 if (!bnxt_eee_config_ok(bp))
12962 update_eee = true;
12963
12964 if (update_link)
12965 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12966 else if (update_pause)
12967 rc = bnxt_hwrm_set_pause(bp);
12968 if (rc) {
12969 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12970 rc);
12971 return rc;
12972 }
12973
12974 return rc;
12975 }
12976
12977 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12978
bnxt_reinit_after_abort(struct bnxt * bp)12979 static int bnxt_reinit_after_abort(struct bnxt *bp)
12980 {
12981 int rc;
12982
12983 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12984 return -EBUSY;
12985
12986 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12987 return -ENODEV;
12988
12989 rc = bnxt_fw_init_one(bp);
12990 if (!rc) {
12991 bnxt_clear_int_mode(bp);
12992 rc = bnxt_init_int_mode(bp);
12993 if (!rc) {
12994 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12995 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12996 }
12997 }
12998 return rc;
12999 }
13000
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)13001 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
13002 {
13003 struct bnxt_ntuple_filter *ntp_fltr;
13004 struct bnxt_l2_filter *l2_fltr;
13005
13006 if (list_empty(&fltr->list))
13007 return;
13008
13009 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
13010 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
13011 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
13012 atomic_inc(&l2_fltr->refcnt);
13013 ntp_fltr->l2_fltr = l2_fltr;
13014 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
13015 bnxt_del_ntp_filter(bp, ntp_fltr);
13016 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
13017 fltr->sw_id);
13018 }
13019 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
13020 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
13021 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
13022 bnxt_del_l2_filter(bp, l2_fltr);
13023 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
13024 fltr->sw_id);
13025 }
13026 }
13027 }
13028
bnxt_cfg_usr_fltrs(struct bnxt * bp)13029 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
13030 {
13031 struct bnxt_filter_base *usr_fltr, *tmp;
13032
13033 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
13034 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
13035 }
13036
bnxt_set_xps_mapping(struct bnxt * bp)13037 static int bnxt_set_xps_mapping(struct bnxt *bp)
13038 {
13039 int numa_node = dev_to_node(&bp->pdev->dev);
13040 unsigned int q_idx, map_idx, cpu, i;
13041 const struct cpumask *cpu_mask_ptr;
13042 int nr_cpus = num_online_cpus();
13043 cpumask_t *q_map;
13044 int rc = 0;
13045
13046 q_map = kzalloc_objs(*q_map, bp->tx_nr_rings_per_tc);
13047 if (!q_map)
13048 return -ENOMEM;
13049
13050 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
13051 * Each TC has the same number of TX queues. The nth TX queue for each
13052 * TC will have the same CPU mask.
13053 */
13054 for (i = 0; i < nr_cpus; i++) {
13055 map_idx = i % bp->tx_nr_rings_per_tc;
13056 cpu = cpumask_local_spread(i, numa_node);
13057 cpu_mask_ptr = get_cpu_mask(cpu);
13058 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
13059 }
13060
13061 /* Register CPU mask for each TX queue except the ones marked for XDP */
13062 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
13063 map_idx = q_idx % bp->tx_nr_rings_per_tc;
13064 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
13065 if (rc) {
13066 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
13067 q_idx);
13068 break;
13069 }
13070 }
13071
13072 kfree(q_map);
13073
13074 return rc;
13075 }
13076
bnxt_tx_nr_rings(struct bnxt * bp)13077 static int bnxt_tx_nr_rings(struct bnxt *bp)
13078 {
13079 return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
13080 bp->tx_nr_rings_per_tc;
13081 }
13082
bnxt_tx_nr_rings_per_tc(struct bnxt * bp)13083 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
13084 {
13085 return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
13086 }
13087
bnxt_set_xdp_tx_rings(struct bnxt * bp)13088 static void bnxt_set_xdp_tx_rings(struct bnxt *bp)
13089 {
13090 bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
13091 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
13092 }
13093
bnxt_adj_tx_rings(struct bnxt * bp)13094 static void bnxt_adj_tx_rings(struct bnxt *bp)
13095 {
13096 /* Make adjustments if reserved TX rings are less than requested */
13097 bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
13098 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
13099 if (bp->tx_nr_rings_xdp)
13100 bnxt_set_xdp_tx_rings(bp);
13101 }
13102
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13103 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13104 {
13105 int rc = 0;
13106
13107 netif_carrier_off(bp->dev);
13108 if (irq_re_init) {
13109 /* Reserve rings now if none were reserved at driver probe. */
13110 rc = bnxt_init_dflt_ring_mode(bp);
13111 if (rc) {
13112 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
13113 return rc;
13114 }
13115 }
13116 rc = bnxt_reserve_rings(bp, irq_re_init);
13117 if (rc)
13118 return rc;
13119
13120 bnxt_adj_tx_rings(bp);
13121 rc = bnxt_alloc_mem(bp, irq_re_init);
13122 if (rc) {
13123 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13124 goto open_err_free_mem;
13125 }
13126
13127 if (irq_re_init) {
13128 bnxt_init_napi(bp);
13129 rc = bnxt_request_irq(bp);
13130 if (rc) {
13131 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
13132 goto open_err_irq;
13133 }
13134 }
13135
13136 rc = bnxt_init_nic(bp, irq_re_init);
13137 if (rc) {
13138 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13139 goto open_err_irq;
13140 }
13141
13142 bnxt_enable_napi(bp);
13143 bnxt_debug_dev_init(bp);
13144
13145 if (link_re_init) {
13146 mutex_lock(&bp->link_lock);
13147 rc = bnxt_update_phy_setting(bp);
13148 mutex_unlock(&bp->link_lock);
13149 if (rc) {
13150 netdev_warn(bp->dev, "failed to update phy settings\n");
13151 if (BNXT_SINGLE_PF(bp)) {
13152 bp->link_info.phy_retry = true;
13153 bp->link_info.phy_retry_expires =
13154 jiffies + 5 * HZ;
13155 }
13156 }
13157 }
13158
13159 if (irq_re_init) {
13160 udp_tunnel_nic_reset_ntf(bp->dev);
13161 rc = bnxt_set_xps_mapping(bp);
13162 if (rc)
13163 netdev_warn(bp->dev, "failed to set xps mapping\n");
13164 }
13165
13166 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
13167 if (!static_key_enabled(&bnxt_xdp_locking_key))
13168 static_branch_enable(&bnxt_xdp_locking_key);
13169 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
13170 static_branch_disable(&bnxt_xdp_locking_key);
13171 }
13172 set_bit(BNXT_STATE_OPEN, &bp->state);
13173 bnxt_enable_int(bp);
13174 /* Enable TX queues */
13175 bnxt_tx_enable(bp);
13176 mod_timer(&bp->timer, jiffies + bp->current_interval);
13177 /* Poll link status and check for SFP+ module status */
13178 mutex_lock(&bp->link_lock);
13179 bnxt_get_port_module_status(bp);
13180 mutex_unlock(&bp->link_lock);
13181
13182 /* VF-reps may need to be re-opened after the PF is re-opened */
13183 if (BNXT_PF(bp))
13184 bnxt_vf_reps_open(bp);
13185 bnxt_ptp_init_rtc(bp, true);
13186 bnxt_ptp_cfg_tstamp_filters(bp);
13187 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13188 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
13189 bnxt_cfg_usr_fltrs(bp);
13190 return 0;
13191
13192 open_err_irq:
13193 bnxt_del_napi(bp);
13194
13195 open_err_free_mem:
13196 bnxt_free_skbs(bp);
13197 bnxt_free_irq(bp);
13198 bnxt_free_mem(bp, true);
13199 return rc;
13200 }
13201
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13202 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13203 {
13204 int rc = 0;
13205
13206 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13207 rc = -EIO;
13208 if (!rc)
13209 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13210 if (rc) {
13211 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13212 netif_close(bp->dev);
13213 }
13214 return rc;
13215 }
13216
13217 /* netdev instance lock held, open the NIC half way by allocating all
13218 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
13219 * for offline self tests.
13220 */
bnxt_half_open_nic(struct bnxt * bp)13221 int bnxt_half_open_nic(struct bnxt *bp)
13222 {
13223 int rc = 0;
13224
13225 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13226 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13227 rc = -ENODEV;
13228 goto half_open_err;
13229 }
13230
13231 rc = bnxt_alloc_mem(bp, true);
13232 if (rc) {
13233 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13234 goto half_open_err;
13235 }
13236 bnxt_init_napi(bp);
13237 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13238 rc = bnxt_init_nic(bp, true);
13239 if (rc) {
13240 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13241 bnxt_del_napi(bp);
13242 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13243 goto half_open_err;
13244 }
13245 return 0;
13246
13247 half_open_err:
13248 bnxt_free_skbs(bp);
13249 bnxt_free_mem(bp, true);
13250 netif_close(bp->dev);
13251 return rc;
13252 }
13253
13254 /* netdev instance lock held, this call can only be made after a previous
13255 * successful call to bnxt_half_open_nic().
13256 */
bnxt_half_close_nic(struct bnxt * bp)13257 void bnxt_half_close_nic(struct bnxt *bp)
13258 {
13259 bnxt_hwrm_resource_free(bp, false, true);
13260 bnxt_del_napi(bp);
13261 bnxt_free_skbs(bp);
13262 bnxt_free_mem(bp, true);
13263 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13264 }
13265
bnxt_reenable_sriov(struct bnxt * bp)13266 void bnxt_reenable_sriov(struct bnxt *bp)
13267 {
13268 if (BNXT_PF(bp)) {
13269 struct bnxt_pf_info *pf = &bp->pf;
13270 int n = pf->active_vfs;
13271
13272 if (n)
13273 bnxt_cfg_hw_sriov(bp, &n, true);
13274 }
13275 }
13276
bnxt_open(struct net_device * dev)13277 static int bnxt_open(struct net_device *dev)
13278 {
13279 struct bnxt *bp = netdev_priv(dev);
13280 int rc;
13281
13282 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13283 rc = bnxt_reinit_after_abort(bp);
13284 if (rc) {
13285 if (rc == -EBUSY)
13286 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13287 else
13288 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13289 return -ENODEV;
13290 }
13291 }
13292
13293 rc = bnxt_hwrm_if_change(bp, true);
13294 if (rc)
13295 return rc;
13296
13297 rc = __bnxt_open_nic(bp, true, true);
13298 if (rc) {
13299 bnxt_hwrm_if_change(bp, false);
13300 } else {
13301 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13302 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13303 bnxt_queue_sp_work(bp,
13304 BNXT_RESTART_ULP_SP_EVENT);
13305 }
13306 }
13307
13308 return rc;
13309 }
13310
bnxt_drv_busy(struct bnxt * bp)13311 static bool bnxt_drv_busy(struct bnxt *bp)
13312 {
13313 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13314 test_bit(BNXT_STATE_READ_STATS, &bp->state));
13315 }
13316
13317 static void bnxt_get_ring_stats(struct bnxt *bp,
13318 struct rtnl_link_stats64 *stats);
13319
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13320 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13321 bool link_re_init)
13322 {
13323 /* Close the VF-reps before closing PF */
13324 if (BNXT_PF(bp))
13325 bnxt_vf_reps_close(bp);
13326
13327 /* Change device state to avoid TX queue wake up's */
13328 bnxt_tx_disable(bp);
13329
13330 clear_bit(BNXT_STATE_OPEN, &bp->state);
13331 smp_mb__after_atomic();
13332 while (bnxt_drv_busy(bp))
13333 msleep(20);
13334
13335 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13336 bnxt_clear_rss_ctxs(bp);
13337 /* Flush rings and disable interrupts */
13338 bnxt_shutdown_nic(bp, irq_re_init);
13339
13340 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13341
13342 bnxt_debug_dev_exit(bp);
13343 bnxt_disable_napi(bp);
13344 timer_delete_sync(&bp->timer);
13345 bnxt_free_skbs(bp);
13346
13347 /* Save ring stats before shutdown */
13348 if (bp->bnapi && irq_re_init) {
13349 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13350 bnxt_get_ring_drv_stats(bp, &bp->ring_drv_stats_prev);
13351 }
13352 if (irq_re_init) {
13353 bnxt_free_irq(bp);
13354 bnxt_del_napi(bp);
13355 }
13356 bnxt_free_mem(bp, irq_re_init);
13357 }
13358
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13359 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13360 {
13361 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13362 /* If we get here, it means firmware reset is in progress
13363 * while we are trying to close. We can safely proceed with
13364 * the close because we are holding netdev instance lock.
13365 * Some firmware messages may fail as we proceed to close.
13366 * We set the ABORT_ERR flag here so that the FW reset thread
13367 * will later abort when it gets the netdev instance lock
13368 * and sees the flag.
13369 */
13370 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13371 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13372 }
13373
13374 #ifdef CONFIG_BNXT_SRIOV
13375 if (bp->sriov_cfg) {
13376 int rc;
13377
13378 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13379 !bp->sriov_cfg,
13380 BNXT_SRIOV_CFG_WAIT_TMO);
13381 if (!rc)
13382 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13383 else if (rc < 0)
13384 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13385 }
13386 #endif
13387 __bnxt_close_nic(bp, irq_re_init, link_re_init);
13388 }
13389
bnxt_close(struct net_device * dev)13390 static int bnxt_close(struct net_device *dev)
13391 {
13392 struct bnxt *bp = netdev_priv(dev);
13393
13394 bnxt_close_nic(bp, true, true);
13395 bnxt_hwrm_shutdown_link(bp);
13396 bnxt_hwrm_if_change(bp, false);
13397 return 0;
13398 }
13399
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)13400 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13401 u16 *val)
13402 {
13403 struct hwrm_port_phy_mdio_read_output *resp;
13404 struct hwrm_port_phy_mdio_read_input *req;
13405 int rc;
13406
13407 if (bp->hwrm_spec_code < 0x10a00)
13408 return -EOPNOTSUPP;
13409
13410 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13411 if (rc)
13412 return rc;
13413
13414 req->port_id = cpu_to_le16(bp->pf.port_id);
13415 req->phy_addr = phy_addr;
13416 req->reg_addr = cpu_to_le16(reg & 0x1f);
13417 if (mdio_phy_id_is_c45(phy_addr)) {
13418 req->cl45_mdio = 1;
13419 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13420 req->dev_addr = mdio_phy_id_devad(phy_addr);
13421 req->reg_addr = cpu_to_le16(reg);
13422 }
13423
13424 resp = hwrm_req_hold(bp, req);
13425 rc = hwrm_req_send(bp, req);
13426 if (!rc)
13427 *val = le16_to_cpu(resp->reg_data);
13428 hwrm_req_drop(bp, req);
13429 return rc;
13430 }
13431
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)13432 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13433 u16 val)
13434 {
13435 struct hwrm_port_phy_mdio_write_input *req;
13436 int rc;
13437
13438 if (bp->hwrm_spec_code < 0x10a00)
13439 return -EOPNOTSUPP;
13440
13441 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13442 if (rc)
13443 return rc;
13444
13445 req->port_id = cpu_to_le16(bp->pf.port_id);
13446 req->phy_addr = phy_addr;
13447 req->reg_addr = cpu_to_le16(reg & 0x1f);
13448 if (mdio_phy_id_is_c45(phy_addr)) {
13449 req->cl45_mdio = 1;
13450 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13451 req->dev_addr = mdio_phy_id_devad(phy_addr);
13452 req->reg_addr = cpu_to_le16(reg);
13453 }
13454 req->reg_data = cpu_to_le16(val);
13455
13456 return hwrm_req_send(bp, req);
13457 }
13458
13459 /* netdev instance lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13460 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13461 {
13462 struct mii_ioctl_data *mdio = if_mii(ifr);
13463 struct bnxt *bp = netdev_priv(dev);
13464 int rc;
13465
13466 switch (cmd) {
13467 case SIOCGMIIPHY:
13468 mdio->phy_id = bp->link_info.phy_addr;
13469
13470 fallthrough;
13471 case SIOCGMIIREG: {
13472 u16 mii_regval = 0;
13473
13474 if (!netif_running(dev))
13475 return -EAGAIN;
13476
13477 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13478 &mii_regval);
13479 mdio->val_out = mii_regval;
13480 return rc;
13481 }
13482
13483 case SIOCSMIIREG:
13484 if (!netif_running(dev))
13485 return -EAGAIN;
13486
13487 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13488 mdio->val_in);
13489
13490 default:
13491 /* do nothing */
13492 break;
13493 }
13494 return -EOPNOTSUPP;
13495 }
13496
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13497 static void bnxt_get_ring_stats(struct bnxt *bp,
13498 struct rtnl_link_stats64 *stats)
13499 {
13500 int i;
13501
13502 for (i = 0; i < bp->cp_nr_rings; i++) {
13503 struct bnxt_napi *bnapi = bp->bnapi[i];
13504 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13505 u64 *sw = cpr->stats.sw_stats;
13506
13507 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13508 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13509 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13510
13511 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13512 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13513 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13514
13515 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13516 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13517 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13518
13519 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13520 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13521 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13522
13523 stats->rx_missed_errors +=
13524 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13525
13526 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13527
13528 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13529
13530 stats->rx_dropped +=
13531 cpr->sw_stats->rx.rx_netpoll_discards +
13532 cpr->sw_stats->rx.rx_oom_discards;
13533 }
13534 }
13535
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13536 static void bnxt_add_prev_stats(struct bnxt *bp,
13537 struct rtnl_link_stats64 *stats)
13538 {
13539 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13540
13541 stats->rx_packets += prev_stats->rx_packets;
13542 stats->tx_packets += prev_stats->tx_packets;
13543 stats->rx_bytes += prev_stats->rx_bytes;
13544 stats->tx_bytes += prev_stats->tx_bytes;
13545 stats->rx_missed_errors += prev_stats->rx_missed_errors;
13546 stats->multicast += prev_stats->multicast;
13547 stats->rx_dropped += prev_stats->rx_dropped;
13548 stats->tx_dropped += prev_stats->tx_dropped;
13549 }
13550
13551 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)13552 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13553 {
13554 struct bnxt *bp = netdev_priv(dev);
13555
13556 set_bit(BNXT_STATE_READ_STATS, &bp->state);
13557 /* Make sure bnxt_close_nic() sees that we are reading stats before
13558 * we check the BNXT_STATE_OPEN flag.
13559 */
13560 smp_mb__after_atomic();
13561 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13562 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13563 *stats = bp->net_stats_prev;
13564 return;
13565 }
13566
13567 bnxt_get_ring_stats(bp, stats);
13568 bnxt_add_prev_stats(bp, stats);
13569
13570 if (bp->flags & BNXT_FLAG_PORT_STATS) {
13571 u64 *rx = bp->port_stats.sw_stats;
13572 u64 *tx = bp->port_stats.sw_stats +
13573 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13574
13575 stats->rx_crc_errors =
13576 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13577 stats->rx_frame_errors =
13578 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13579 stats->rx_length_errors =
13580 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13581 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13582 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13583 stats->rx_errors =
13584 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13585 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13586 stats->collisions =
13587 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13588 stats->tx_fifo_errors =
13589 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13590 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13591 }
13592 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13593 }
13594
bnxt_get_one_ring_drv_stats(struct bnxt * bp,struct bnxt_total_ring_drv_stats * stats,struct bnxt_cp_ring_info * cpr)13595 static void bnxt_get_one_ring_drv_stats(struct bnxt *bp,
13596 struct bnxt_total_ring_drv_stats *stats,
13597 struct bnxt_cp_ring_info *cpr)
13598 {
13599 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13600 u64 *hw_stats = cpr->stats.sw_stats;
13601
13602 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13603 stats->rx_total_resets += sw_stats->rx.rx_resets;
13604 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13605 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13606 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13607 stats->rx_total_ring_discards +=
13608 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13609 stats->rx_total_hw_gro_packets += sw_stats->rx.rx_hw_gro_packets;
13610 stats->rx_total_hw_gro_wire_packets += sw_stats->rx.rx_hw_gro_wire_packets;
13611 stats->tx_total_resets += sw_stats->tx.tx_resets;
13612 stats->tx_total_ring_discards +=
13613 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13614 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13615 }
13616
bnxt_get_ring_drv_stats(struct bnxt * bp,struct bnxt_total_ring_drv_stats * stats)13617 void bnxt_get_ring_drv_stats(struct bnxt *bp,
13618 struct bnxt_total_ring_drv_stats *stats)
13619 {
13620 int i;
13621
13622 for (i = 0; i < bp->cp_nr_rings; i++)
13623 bnxt_get_one_ring_drv_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13624 }
13625
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask,const struct netdev_hw_addr_list * mc)13626 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask,
13627 const struct netdev_hw_addr_list *mc)
13628 {
13629 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13630 struct netdev_hw_addr *ha;
13631 u8 *haddr;
13632 int mc_count = 0;
13633 bool update = false;
13634 int off = 0;
13635
13636 netdev_hw_addr_list_for_each(ha, mc) {
13637 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13638 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13639 vnic->mc_list_count = 0;
13640 return false;
13641 }
13642 haddr = ha->addr;
13643 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13644 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13645 update = true;
13646 }
13647 off += ETH_ALEN;
13648 mc_count++;
13649 }
13650 if (mc_count)
13651 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13652
13653 if (mc_count != vnic->mc_list_count) {
13654 vnic->mc_list_count = mc_count;
13655 update = true;
13656 }
13657 return update;
13658 }
13659
bnxt_uc_list_updated(struct bnxt * bp,const struct netdev_hw_addr_list * uc)13660 static bool bnxt_uc_list_updated(struct bnxt *bp,
13661 const struct netdev_hw_addr_list *uc)
13662 {
13663 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13664 struct netdev_hw_addr *ha;
13665 int off = 0;
13666
13667 if (netdev_hw_addr_list_count(uc) != (vnic->uc_filter_count - 1))
13668 return true;
13669
13670 netdev_hw_addr_list_for_each(ha, uc) {
13671 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13672 return true;
13673
13674 off += ETH_ALEN;
13675 }
13676 return false;
13677 }
13678
bnxt_set_rx_mode(struct net_device * dev,struct netdev_hw_addr_list * uc,struct netdev_hw_addr_list * mc)13679 static void bnxt_set_rx_mode(struct net_device *dev,
13680 struct netdev_hw_addr_list *uc,
13681 struct netdev_hw_addr_list *mc)
13682 {
13683 struct bnxt *bp = netdev_priv(dev);
13684 struct bnxt_vnic_info *vnic;
13685 bool mc_update = false;
13686 bool uc_update;
13687 u32 mask;
13688
13689 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13690 return;
13691
13692 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13693 mask = vnic->rx_mask;
13694 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13695 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13696 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13697 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13698
13699 if (dev->flags & IFF_PROMISC)
13700 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13701
13702 uc_update = bnxt_uc_list_updated(bp, uc);
13703
13704 if (dev->flags & IFF_BROADCAST)
13705 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13706 if (dev->flags & IFF_ALLMULTI) {
13707 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13708 vnic->mc_list_count = 0;
13709 } else if (dev->flags & IFF_MULTICAST) {
13710 mc_update = bnxt_mc_list_updated(bp, &mask, mc);
13711 }
13712
13713 if (mask != vnic->rx_mask || uc_update || mc_update) {
13714 vnic->rx_mask = mask;
13715
13716 bnxt_cfg_rx_mode(bp, uc, uc_update);
13717 }
13718 }
13719
bnxt_cfg_rx_mode(struct bnxt * bp,struct netdev_hw_addr_list * uc,bool uc_update)13720 static int bnxt_cfg_rx_mode(struct bnxt *bp, struct netdev_hw_addr_list *uc,
13721 bool uc_update)
13722 {
13723 struct net_device *dev = bp->dev;
13724 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13725 struct netdev_hw_addr *ha;
13726 int i, off = 0, rc;
13727
13728 if (!uc_update)
13729 goto skip_uc;
13730
13731 for (i = 1; i < vnic->uc_filter_count; i++) {
13732 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13733
13734 bnxt_hwrm_l2_filter_free(bp, fltr);
13735 bnxt_del_l2_filter(bp, fltr);
13736 }
13737
13738 vnic->uc_filter_count = 1;
13739
13740 netif_addr_lock_bh(dev);
13741 if (netdev_hw_addr_list_count(uc) > (BNXT_MAX_UC_ADDRS - 1)) {
13742 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13743 } else {
13744 netdev_hw_addr_list_for_each(ha, uc) {
13745 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13746 off += ETH_ALEN;
13747 vnic->uc_filter_count++;
13748 }
13749 }
13750 netif_addr_unlock_bh(dev);
13751
13752 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13753 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13754 if (rc) {
13755 if (BNXT_VF(bp) && rc == -ENODEV) {
13756 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13757 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13758 else
13759 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13760 rc = 0;
13761 } else {
13762 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13763 }
13764 vnic->uc_filter_count = i;
13765 return rc;
13766 }
13767 }
13768 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13769 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13770
13771 skip_uc:
13772 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13773 !bnxt_promisc_ok(bp))
13774 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13775 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13776 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13777 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13778 rc);
13779 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13780 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13781 vnic->mc_list_count = 0;
13782 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13783 }
13784 if (rc)
13785 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13786 rc);
13787
13788 return rc;
13789 }
13790
bnxt_can_reserve_rings(struct bnxt * bp)13791 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13792 {
13793 #ifdef CONFIG_BNXT_SRIOV
13794 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13795 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13796
13797 /* No minimum rings were provisioned by the PF. Don't
13798 * reserve rings by default when device is down.
13799 */
13800 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13801 return true;
13802
13803 if (!netif_running(bp->dev))
13804 return false;
13805 }
13806 #endif
13807 return true;
13808 }
13809
13810 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13811 static bool bnxt_rfs_supported(struct bnxt *bp)
13812 {
13813 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13814 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13815 return true;
13816 return false;
13817 }
13818 /* 212 firmware is broken for aRFS */
13819 if (BNXT_FW_MAJ(bp) == 212)
13820 return false;
13821 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13822 return true;
13823 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13824 return true;
13825 return false;
13826 }
13827
13828 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13829 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13830 {
13831 struct bnxt_hw_rings hwr = {0};
13832 int max_vnics, max_rss_ctxs;
13833
13834 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13835 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13836 return bnxt_rfs_supported(bp);
13837
13838 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13839 return false;
13840
13841 hwr.grp = bp->rx_nr_rings;
13842 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13843 if (new_rss_ctx)
13844 hwr.vnic++;
13845 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13846 max_vnics = bnxt_get_max_func_vnics(bp);
13847 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13848
13849 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13850 if (bp->rx_nr_rings > 1)
13851 netdev_warn(bp->dev,
13852 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13853 min(max_rss_ctxs - 1, max_vnics - 1));
13854 return false;
13855 }
13856
13857 if (!BNXT_NEW_RM(bp))
13858 return true;
13859
13860 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13861 * issue that will mess up the default VNIC if we reduce the
13862 * reservations.
13863 */
13864 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13865 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13866 return true;
13867
13868 bnxt_hwrm_reserve_rings(bp, &hwr);
13869 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13870 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13871 return true;
13872
13873 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13874 hwr.vnic = 1;
13875 hwr.rss_ctx = 0;
13876 bnxt_hwrm_reserve_rings(bp, &hwr);
13877 return false;
13878 }
13879
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13880 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13881 netdev_features_t features)
13882 {
13883 struct bnxt *bp = netdev_priv(dev);
13884 netdev_features_t vlan_features;
13885
13886 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13887 features &= ~NETIF_F_NTUPLE;
13888
13889 if ((features & NETIF_F_GSO_UDP_L4) &&
13890 !(bp->flags & BNXT_FLAG_UDP_GSO_CAP) &&
13891 bp->tx_ring_size < 2 * BNXT_SW_USO_MAX_DESCS)
13892 features &= ~NETIF_F_GSO_UDP_L4;
13893
13894 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13895 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13896
13897 if (!(features & NETIF_F_GRO))
13898 features &= ~NETIF_F_GRO_HW;
13899
13900 if (features & NETIF_F_GRO_HW)
13901 features &= ~NETIF_F_LRO;
13902
13903 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13904 * turned on or off together.
13905 */
13906 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13907 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13908 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13909 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13910 else if (vlan_features)
13911 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13912 }
13913 #ifdef CONFIG_BNXT_SRIOV
13914 if (BNXT_VF(bp) && bp->vf.vlan)
13915 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13916 #endif
13917 return features;
13918 }
13919
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13920 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13921 bool link_re_init, u32 flags, bool update_tpa)
13922 {
13923 bnxt_close_nic(bp, irq_re_init, link_re_init);
13924 bp->flags = flags;
13925 if (update_tpa)
13926 bnxt_set_ring_params(bp);
13927 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13928 }
13929
bnxt_set_features(struct net_device * dev,netdev_features_t features)13930 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13931 {
13932 bool update_tpa = false, update_ntuple = false;
13933 struct bnxt *bp = netdev_priv(dev);
13934 u32 flags = bp->flags;
13935 u32 changes;
13936 int rc = 0;
13937 bool re_init = false;
13938
13939 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
13940 bnxt_min_tx_desc_cnt(bp, features));
13941
13942 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13943 if (features & NETIF_F_GRO_HW)
13944 flags |= BNXT_FLAG_GRO;
13945 else if (features & NETIF_F_LRO)
13946 flags |= BNXT_FLAG_LRO;
13947
13948 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13949 flags &= ~BNXT_FLAG_TPA;
13950
13951 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13952 flags |= BNXT_FLAG_STRIP_VLAN;
13953
13954 if (features & NETIF_F_NTUPLE)
13955 flags |= BNXT_FLAG_RFS;
13956 else
13957 bnxt_clear_usr_fltrs(bp, true);
13958
13959 changes = flags ^ bp->flags;
13960 if (changes & BNXT_FLAG_TPA) {
13961 update_tpa = true;
13962 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13963 (flags & BNXT_FLAG_TPA) == 0 ||
13964 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13965 re_init = true;
13966 }
13967
13968 if (changes & ~BNXT_FLAG_TPA)
13969 re_init = true;
13970
13971 if (changes & BNXT_FLAG_RFS)
13972 update_ntuple = true;
13973
13974 if (flags != bp->flags) {
13975 u32 old_flags = bp->flags;
13976
13977 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13978 bp->flags = flags;
13979 if (update_tpa)
13980 bnxt_set_ring_params(bp);
13981 return rc;
13982 }
13983
13984 if (update_ntuple)
13985 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13986
13987 if (re_init)
13988 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13989
13990 if (update_tpa) {
13991 bp->flags = flags;
13992 rc = bnxt_set_tpa(bp,
13993 (flags & BNXT_FLAG_TPA) ?
13994 true : false);
13995 if (rc)
13996 bp->flags = old_flags;
13997 }
13998 }
13999 return rc;
14000 }
14001
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)14002 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
14003 u8 **nextp)
14004 {
14005 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
14006 int hdr_count = 0;
14007 u8 *nexthdr;
14008 int start;
14009
14010 /* Check that there are at most 2 IPv6 extension headers, no
14011 * fragment header, and each is <= 64 bytes.
14012 */
14013 start = nw_off + sizeof(*ip6h);
14014 nexthdr = &ip6h->nexthdr;
14015 while (ipv6_ext_hdr(*nexthdr)) {
14016 struct ipv6_opt_hdr *hp;
14017 int hdrlen;
14018
14019 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
14020 *nexthdr == NEXTHDR_FRAGMENT)
14021 return false;
14022 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
14023 skb_headlen(skb), NULL);
14024 if (!hp)
14025 return false;
14026 if (*nexthdr == NEXTHDR_AUTH)
14027 hdrlen = ipv6_authlen(hp);
14028 else
14029 hdrlen = ipv6_optlen(hp);
14030
14031 if (hdrlen > 64)
14032 return false;
14033
14034 hdr_count++;
14035 nexthdr = &hp->nexthdr;
14036 start += hdrlen;
14037 }
14038 if (nextp) {
14039 /* Caller will check inner protocol */
14040 if (skb->encapsulation) {
14041 *nextp = nexthdr;
14042 return true;
14043 }
14044 *nextp = NULL;
14045 }
14046 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
14047 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
14048 }
14049
14050 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)14051 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
14052 {
14053 struct udphdr *uh = udp_hdr(skb);
14054 __be16 udp_port = uh->dest;
14055
14056 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
14057 udp_port != bp->vxlan_gpe_port)
14058 return false;
14059 if (skb->inner_protocol == htons(ETH_P_TEB)) {
14060 struct ethhdr *eh = inner_eth_hdr(skb);
14061
14062 switch (eh->h_proto) {
14063 case htons(ETH_P_IP):
14064 return true;
14065 case htons(ETH_P_IPV6):
14066 return bnxt_exthdr_check(bp, skb,
14067 skb_inner_network_offset(skb),
14068 NULL);
14069 }
14070 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
14071 return true;
14072 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
14073 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
14074 NULL);
14075 }
14076 return false;
14077 }
14078
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)14079 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
14080 {
14081 switch (l4_proto) {
14082 case IPPROTO_UDP:
14083 return bnxt_udp_tunl_check(bp, skb);
14084 case IPPROTO_IPIP:
14085 return true;
14086 case IPPROTO_GRE: {
14087 switch (skb->inner_protocol) {
14088 default:
14089 return false;
14090 case htons(ETH_P_IP):
14091 return true;
14092 case htons(ETH_P_IPV6):
14093 fallthrough;
14094 }
14095 }
14096 case IPPROTO_IPV6:
14097 /* Check ext headers of inner ipv6 */
14098 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
14099 NULL);
14100 }
14101 return false;
14102 }
14103
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)14104 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
14105 struct net_device *dev,
14106 netdev_features_t features)
14107 {
14108 struct bnxt *bp = netdev_priv(dev);
14109 u8 *l4_proto;
14110
14111 features = vlan_features_check(skb, features);
14112 switch (vlan_get_protocol(skb)) {
14113 case htons(ETH_P_IP):
14114 if (!skb->encapsulation)
14115 return features;
14116 l4_proto = &ip_hdr(skb)->protocol;
14117 if (bnxt_tunl_check(bp, skb, *l4_proto))
14118 return features;
14119 break;
14120 case htons(ETH_P_IPV6):
14121 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
14122 &l4_proto))
14123 break;
14124 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
14125 return features;
14126 break;
14127 }
14128 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
14129 }
14130
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)14131 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
14132 u32 *reg_buf)
14133 {
14134 struct hwrm_dbg_read_direct_output *resp;
14135 struct hwrm_dbg_read_direct_input *req;
14136 __le32 *dbg_reg_buf;
14137 dma_addr_t mapping;
14138 int rc, i;
14139
14140 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
14141 if (rc)
14142 return rc;
14143
14144 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
14145 &mapping);
14146 if (!dbg_reg_buf) {
14147 rc = -ENOMEM;
14148 goto dbg_rd_reg_exit;
14149 }
14150
14151 req->host_dest_addr = cpu_to_le64(mapping);
14152
14153 resp = hwrm_req_hold(bp, req);
14154 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
14155 req->read_len32 = cpu_to_le32(num_words);
14156
14157 rc = hwrm_req_send(bp, req);
14158 if (rc || resp->error_code) {
14159 rc = -EIO;
14160 goto dbg_rd_reg_exit;
14161 }
14162 for (i = 0; i < num_words; i++)
14163 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
14164
14165 dbg_rd_reg_exit:
14166 hwrm_req_drop(bp, req);
14167 return rc;
14168 }
14169
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)14170 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
14171 u32 ring_id, u32 *prod, u32 *cons)
14172 {
14173 struct hwrm_dbg_ring_info_get_output *resp;
14174 struct hwrm_dbg_ring_info_get_input *req;
14175 int rc;
14176
14177 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
14178 if (rc)
14179 return rc;
14180
14181 req->ring_type = ring_type;
14182 req->fw_ring_id = cpu_to_le32(ring_id);
14183 resp = hwrm_req_hold(bp, req);
14184 rc = hwrm_req_send(bp, req);
14185 if (!rc) {
14186 *prod = le32_to_cpu(resp->producer_index);
14187 *cons = le32_to_cpu(resp->consumer_index);
14188 }
14189 hwrm_req_drop(bp, req);
14190 return rc;
14191 }
14192
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)14193 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
14194 {
14195 struct bnxt_tx_ring_info *txr;
14196 int i = bnapi->index, j;
14197
14198 bnxt_for_each_napi_tx(j, bnapi, txr)
14199 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14200 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14201 txr->tx_cons);
14202 }
14203
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)14204 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14205 {
14206 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14207 int i = bnapi->index;
14208
14209 if (!rxr)
14210 return;
14211
14212 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14213 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14214 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14215 rxr->rx_sw_agg_prod);
14216 }
14217
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)14218 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14219 {
14220 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
14221 int i = bnapi->index, j;
14222
14223 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14224 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14225 for (j = 0; j < cpr->cp_ring_count; j++) {
14226 cpr2 = &cpr->cp_ring_arr[j];
14227 if (!cpr2->bnapi)
14228 continue;
14229 netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
14230 i, j, cpr2->cp_ring_struct.fw_ring_id,
14231 cpr2->cp_raw_cons);
14232 }
14233 }
14234
bnxt_dbg_dump_states(struct bnxt * bp)14235 static void bnxt_dbg_dump_states(struct bnxt *bp)
14236 {
14237 int i;
14238 struct bnxt_napi *bnapi;
14239
14240 for (i = 0; i < bp->cp_nr_rings; i++) {
14241 bnapi = bp->bnapi[i];
14242 if (netif_msg_drv(bp)) {
14243 bnxt_dump_tx_sw_state(bnapi);
14244 bnxt_dump_rx_sw_state(bnapi);
14245 bnxt_dump_cp_sw_state(bnapi);
14246 }
14247 }
14248 }
14249
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)14250 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14251 {
14252 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14253 struct hwrm_ring_reset_input *req;
14254 struct bnxt_napi *bnapi = rxr->bnapi;
14255 struct bnxt_cp_ring_info *cpr;
14256 u16 cp_ring_id;
14257 int rc;
14258
14259 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14260 if (rc)
14261 return rc;
14262
14263 cpr = &bnapi->cp_ring;
14264 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14265 req->cmpl_ring = cpu_to_le16(cp_ring_id);
14266 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14267 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14268 return hwrm_req_send_silent(bp, req);
14269 }
14270
bnxt_reset_task(struct bnxt * bp,bool silent)14271 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14272 {
14273 if (!silent)
14274 bnxt_dbg_dump_states(bp);
14275 if (netif_running(bp->dev)) {
14276 bnxt_close_nic(bp, !silent, false);
14277 bnxt_open_nic(bp, !silent, false);
14278 }
14279 }
14280
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)14281 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14282 {
14283 struct bnxt *bp = netdev_priv(dev);
14284
14285 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
14286 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14287 }
14288
bnxt_fw_health_check(struct bnxt * bp)14289 static void bnxt_fw_health_check(struct bnxt *bp)
14290 {
14291 struct bnxt_fw_health *fw_health = bp->fw_health;
14292 struct pci_dev *pdev = bp->pdev;
14293 u32 val;
14294
14295 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14296 return;
14297
14298 /* Make sure it is enabled before checking the tmr_counter. */
14299 smp_rmb();
14300 if (fw_health->tmr_counter) {
14301 fw_health->tmr_counter--;
14302 return;
14303 }
14304
14305 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14306 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14307 fw_health->arrests++;
14308 goto fw_reset;
14309 }
14310
14311 fw_health->last_fw_heartbeat = val;
14312
14313 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14314 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14315 fw_health->discoveries++;
14316 goto fw_reset;
14317 }
14318
14319 fw_health->tmr_counter = fw_health->tmr_multiplier;
14320 return;
14321
14322 fw_reset:
14323 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14324 }
14325
bnxt_timer(struct timer_list * t)14326 static void bnxt_timer(struct timer_list *t)
14327 {
14328 struct bnxt *bp = timer_container_of(bp, t, timer);
14329 struct net_device *dev = bp->dev;
14330
14331 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14332 return;
14333
14334 if (atomic_read(&bp->intr_sem) != 0)
14335 goto bnxt_restart_timer;
14336
14337 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14338 bnxt_fw_health_check(bp);
14339
14340 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14341 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14342
14343 if (bnxt_tc_flower_enabled(bp))
14344 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14345
14346 #ifdef CONFIG_RFS_ACCEL
14347 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14348 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14349 #endif /*CONFIG_RFS_ACCEL*/
14350
14351 if (bp->link_info.phy_retry) {
14352 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14353 bp->link_info.phy_retry = false;
14354 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14355 } else {
14356 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14357 }
14358 }
14359
14360 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14361 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14362
14363 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14364 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14365
14366 bnxt_restart_timer:
14367 mod_timer(&bp->timer, jiffies + bp->current_interval);
14368 }
14369
bnxt_lock_sp(struct bnxt * bp)14370 static void bnxt_lock_sp(struct bnxt *bp)
14371 {
14372 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14373 * set. If the device is being closed, bnxt_close() may be holding
14374 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14375 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14376 * instance lock.
14377 */
14378 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14379 netdev_lock(bp->dev);
14380 }
14381
bnxt_unlock_sp(struct bnxt * bp)14382 static void bnxt_unlock_sp(struct bnxt *bp)
14383 {
14384 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14385 netdev_unlock(bp->dev);
14386 }
14387
14388 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)14389 static void bnxt_reset(struct bnxt *bp, bool silent)
14390 {
14391 bnxt_lock_sp(bp);
14392 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14393 bnxt_reset_task(bp, silent);
14394 bnxt_unlock_sp(bp);
14395 }
14396
14397 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)14398 static void bnxt_rx_ring_reset(struct bnxt *bp)
14399 {
14400 int i;
14401
14402 bnxt_lock_sp(bp);
14403 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14404 bnxt_unlock_sp(bp);
14405 return;
14406 }
14407 /* Disable and flush TPA before resetting the RX ring */
14408 if (bp->flags & BNXT_FLAG_TPA)
14409 bnxt_set_tpa(bp, false);
14410 for (i = 0; i < bp->rx_nr_rings; i++) {
14411 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14412 struct bnxt_cp_ring_info *cpr;
14413 int rc;
14414
14415 if (!rxr->bnapi->in_reset)
14416 continue;
14417
14418 rc = bnxt_hwrm_rx_ring_reset(bp, i);
14419 if (rc) {
14420 if (rc == -EINVAL || rc == -EOPNOTSUPP)
14421 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14422 else
14423 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14424 rc);
14425 bnxt_reset_task(bp, true);
14426 break;
14427 }
14428 bnxt_free_one_rx_ring_skbs(bp, rxr);
14429 rxr->rx_prod = 0;
14430 rxr->rx_agg_prod = 0;
14431 rxr->rx_sw_agg_prod = 0;
14432 rxr->rx_next_cons = 0;
14433 rxr->bnapi->in_reset = false;
14434 bnxt_alloc_one_rx_ring(bp, i);
14435 cpr = &rxr->bnapi->cp_ring;
14436 cpr->sw_stats->rx.rx_resets++;
14437 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14438 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14439 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14440 }
14441 if (bp->flags & BNXT_FLAG_TPA)
14442 bnxt_set_tpa(bp, true);
14443 bnxt_unlock_sp(bp);
14444 }
14445
bnxt_fw_fatal_close(struct bnxt * bp)14446 static void bnxt_fw_fatal_close(struct bnxt *bp)
14447 {
14448 bnxt_tx_disable(bp);
14449 bnxt_disable_napi(bp);
14450 bnxt_disable_int_sync(bp);
14451 bnxt_free_irq(bp);
14452 bnxt_clear_int_mode(bp);
14453 pci_disable_device(bp->pdev);
14454 }
14455
bnxt_fw_reset_close(struct bnxt * bp)14456 static void bnxt_fw_reset_close(struct bnxt *bp)
14457 {
14458 /* When firmware is in fatal state, quiesce device and disable
14459 * bus master to prevent any potential bad DMAs before freeing
14460 * kernel memory.
14461 */
14462 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14463 u16 val = 0;
14464
14465 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14466 if (val == 0xffff)
14467 bp->fw_reset_min_dsecs = 0;
14468 bnxt_fw_fatal_close(bp);
14469 }
14470 __bnxt_close_nic(bp, true, false);
14471 bnxt_vf_reps_free(bp);
14472 bnxt_clear_int_mode(bp);
14473 bnxt_hwrm_func_drv_unrgtr(bp);
14474 if (pci_is_enabled(bp->pdev))
14475 pci_disable_device(bp->pdev);
14476 bnxt_free_ctx_mem(bp, false);
14477 }
14478
is_bnxt_fw_ok(struct bnxt * bp)14479 static bool is_bnxt_fw_ok(struct bnxt *bp)
14480 {
14481 struct bnxt_fw_health *fw_health = bp->fw_health;
14482 bool no_heartbeat = false, has_reset = false;
14483 u32 val;
14484
14485 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14486 if (val == fw_health->last_fw_heartbeat)
14487 no_heartbeat = true;
14488
14489 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14490 if (val != fw_health->last_fw_reset_cnt)
14491 has_reset = true;
14492
14493 if (!no_heartbeat && has_reset)
14494 return true;
14495
14496 return false;
14497 }
14498
14499 /* netdev instance lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)14500 static void bnxt_force_fw_reset(struct bnxt *bp)
14501 {
14502 struct bnxt_fw_health *fw_health = bp->fw_health;
14503 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14504 u32 wait_dsecs;
14505
14506 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14507 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14508 return;
14509
14510 /* we have to serialize with bnxt_refclk_read()*/
14511 if (ptp) {
14512 unsigned long flags;
14513
14514 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14515 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14516 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14517 } else {
14518 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14519 }
14520 bnxt_fw_reset_close(bp);
14521 wait_dsecs = fw_health->master_func_wait_dsecs;
14522 if (fw_health->primary) {
14523 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14524 wait_dsecs = 0;
14525 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14526 } else {
14527 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14528 wait_dsecs = fw_health->normal_func_wait_dsecs;
14529 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14530 }
14531
14532 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14533 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14534 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14535 }
14536
bnxt_fw_exception(struct bnxt * bp)14537 void bnxt_fw_exception(struct bnxt *bp)
14538 {
14539 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14540 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14541 bnxt_ulp_stop(bp);
14542 bnxt_lock_sp(bp);
14543 bnxt_force_fw_reset(bp);
14544 bnxt_unlock_sp(bp);
14545 }
14546
14547 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14548 * < 0 on error.
14549 */
bnxt_get_registered_vfs(struct bnxt * bp)14550 static int bnxt_get_registered_vfs(struct bnxt *bp)
14551 {
14552 #ifdef CONFIG_BNXT_SRIOV
14553 int rc;
14554
14555 if (!BNXT_PF(bp))
14556 return 0;
14557
14558 rc = bnxt_hwrm_func_qcfg(bp);
14559 if (rc) {
14560 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14561 return rc;
14562 }
14563 if (bp->pf.registered_vfs)
14564 return bp->pf.registered_vfs;
14565 if (bp->sriov_cfg)
14566 return 1;
14567 #endif
14568 return 0;
14569 }
14570
bnxt_fw_reset(struct bnxt * bp)14571 void bnxt_fw_reset(struct bnxt *bp)
14572 {
14573 bnxt_ulp_stop(bp);
14574 bnxt_lock_sp(bp);
14575 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14576 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14577 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14578 int n = 0, tmo;
14579
14580 /* we have to serialize with bnxt_refclk_read()*/
14581 if (ptp) {
14582 unsigned long flags;
14583
14584 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14585 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14586 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14587 } else {
14588 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14589 }
14590 if (bp->pf.active_vfs &&
14591 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14592 n = bnxt_get_registered_vfs(bp);
14593 if (n < 0) {
14594 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14595 n);
14596 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14597 netif_close(bp->dev);
14598 goto fw_reset_exit;
14599 } else if (n > 0) {
14600 u16 vf_tmo_dsecs = n * 10;
14601
14602 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14603 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14604 bp->fw_reset_state =
14605 BNXT_FW_RESET_STATE_POLL_VF;
14606 bnxt_queue_fw_reset_work(bp, HZ / 10);
14607 goto fw_reset_exit;
14608 }
14609 bnxt_fw_reset_close(bp);
14610 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14611 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14612 tmo = HZ / 10;
14613 } else {
14614 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14615 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14616 }
14617 bnxt_queue_fw_reset_work(bp, tmo);
14618 }
14619 fw_reset_exit:
14620 bnxt_unlock_sp(bp);
14621 }
14622
bnxt_chk_missed_irq(struct bnxt * bp)14623 static void bnxt_chk_missed_irq(struct bnxt *bp)
14624 {
14625 int i;
14626
14627 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14628 return;
14629
14630 for (i = 0; i < bp->cp_nr_rings; i++) {
14631 struct bnxt_napi *bnapi = bp->bnapi[i];
14632 struct bnxt_cp_ring_info *cpr;
14633 u32 fw_ring_id;
14634 int j;
14635
14636 if (!bnapi)
14637 continue;
14638
14639 cpr = &bnapi->cp_ring;
14640 for (j = 0; j < cpr->cp_ring_count; j++) {
14641 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14642 u32 val[2];
14643
14644 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14645 continue;
14646
14647 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14648 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14649 continue;
14650 }
14651 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14652 bnxt_dbg_hwrm_ring_info_get(bp,
14653 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14654 fw_ring_id, &val[0], &val[1]);
14655 cpr->sw_stats->cmn.missed_irqs++;
14656 }
14657 }
14658 }
14659
14660 static void bnxt_cfg_ntp_filters(struct bnxt *);
14661
bnxt_init_ethtool_link_settings(struct bnxt * bp)14662 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14663 {
14664 struct bnxt_link_info *link_info = &bp->link_info;
14665
14666 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14667 link_info->autoneg = BNXT_AUTONEG_SPEED;
14668 if (bp->hwrm_spec_code >= 0x10201) {
14669 if (link_info->auto_pause_setting &
14670 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14671 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14672 } else {
14673 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14674 }
14675 bnxt_set_auto_speed(link_info);
14676 } else {
14677 bnxt_set_force_speed(link_info);
14678 link_info->req_duplex = link_info->duplex_setting;
14679 }
14680 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14681 link_info->req_flow_ctrl =
14682 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14683 else
14684 link_info->req_flow_ctrl = link_info->force_pause_setting;
14685 }
14686
bnxt_fw_echo_reply(struct bnxt * bp)14687 static void bnxt_fw_echo_reply(struct bnxt *bp)
14688 {
14689 struct bnxt_fw_health *fw_health = bp->fw_health;
14690 struct hwrm_func_echo_response_input *req;
14691 int rc;
14692
14693 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14694 if (rc)
14695 return;
14696 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14697 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14698 hwrm_req_send(bp, req);
14699 }
14700
bnxt_ulp_restart(struct bnxt * bp)14701 static void bnxt_ulp_restart(struct bnxt *bp)
14702 {
14703 bnxt_ulp_stop(bp);
14704 bnxt_ulp_start(bp);
14705 }
14706
bnxt_sp_task(struct work_struct * work)14707 static void bnxt_sp_task(struct work_struct *work)
14708 {
14709 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14710 struct net_device *dev = bp->dev;
14711
14712 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14713 smp_mb__after_atomic();
14714 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14715 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14716 return;
14717 }
14718
14719 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14720 bnxt_ulp_restart(bp);
14721 bnxt_reenable_sriov(bp);
14722 }
14723
14724 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14725 bnxt_cfg_ntp_filters(bp);
14726 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14727 bnxt_hwrm_exec_fwd_req(bp);
14728 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14729 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14730 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14731 bnxt_hwrm_port_qstats(bp, 0);
14732 bnxt_hwrm_port_qstats_ext(bp, 0);
14733 bnxt_accumulate_all_stats(bp);
14734 }
14735
14736 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14737 int rc;
14738
14739 mutex_lock(&bp->link_lock);
14740 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14741 &bp->sp_event))
14742 bnxt_hwrm_phy_qcaps(bp);
14743
14744 rc = bnxt_update_link(bp, true);
14745 if (rc)
14746 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14747 rc);
14748
14749 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14750 &bp->sp_event))
14751 bnxt_init_ethtool_link_settings(bp);
14752 mutex_unlock(&bp->link_lock);
14753 }
14754 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14755 int rc;
14756
14757 mutex_lock(&bp->link_lock);
14758 rc = bnxt_update_phy_setting(bp);
14759 mutex_unlock(&bp->link_lock);
14760 if (rc) {
14761 netdev_warn(bp->dev, "update phy settings retry failed\n");
14762 } else {
14763 bp->link_info.phy_retry = false;
14764 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14765 }
14766 }
14767 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14768 mutex_lock(&bp->link_lock);
14769 bnxt_get_port_module_status(bp);
14770 mutex_unlock(&bp->link_lock);
14771 }
14772
14773 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14774 bnxt_tc_flow_stats_work(bp);
14775
14776 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14777 bnxt_chk_missed_irq(bp);
14778
14779 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14780 bnxt_fw_echo_reply(bp);
14781
14782 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14783 bnxt_hwmon_notify_event(bp);
14784
14785 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14786 * must be the last functions to be called before exiting.
14787 */
14788 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) {
14789 bnxt_lock_sp(bp);
14790 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14791 bnxt_cfg_rx_mode(bp, &dev->uc, true);
14792 bnxt_unlock_sp(bp);
14793 }
14794
14795 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14796 bnxt_reset(bp, false);
14797
14798 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14799 bnxt_reset(bp, true);
14800
14801 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14802 bnxt_rx_ring_reset(bp);
14803
14804 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14805 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14806 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14807 bnxt_devlink_health_fw_report(bp);
14808 else
14809 bnxt_fw_reset(bp);
14810 }
14811
14812 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14813 if (!is_bnxt_fw_ok(bp))
14814 bnxt_devlink_health_fw_report(bp);
14815 }
14816
14817 smp_mb__before_atomic();
14818 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14819 }
14820
14821 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14822 int *max_cp);
14823
14824 /* Under netdev instance lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14825 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14826 int tx_xdp)
14827 {
14828 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14829 struct bnxt_hw_rings hwr = {0};
14830 int rx_rings = rx;
14831 int rc;
14832
14833 if (tcs)
14834 tx_sets = tcs;
14835
14836 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14837
14838 if (max_rx < rx_rings)
14839 return -ENOMEM;
14840
14841 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14842 rx_rings <<= 1;
14843
14844 hwr.rx = rx_rings;
14845 hwr.tx = tx * tx_sets + tx_xdp;
14846 if (max_tx < hwr.tx)
14847 return -ENOMEM;
14848
14849 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14850
14851 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14852 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14853 if (max_cp < hwr.cp)
14854 return -ENOMEM;
14855 hwr.stat = hwr.cp;
14856 if (BNXT_NEW_RM(bp)) {
14857 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14858 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14859 hwr.grp = rx;
14860 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14861 }
14862 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14863 hwr.cp_p5 = hwr.tx + rx;
14864 rc = bnxt_hwrm_check_rings(bp, &hwr);
14865 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14866 if (!bnxt_ulp_registered(bp->edev[BNXT_AUXDEV_RDMA])) {
14867 hwr.cp += bnxt_get_ulp_msix_num(bp);
14868 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14869 }
14870 if (hwr.cp > bp->total_irqs) {
14871 int total_msix = bnxt_change_msix(bp, hwr.cp);
14872
14873 if (total_msix < hwr.cp) {
14874 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14875 hwr.cp, total_msix);
14876 rc = -ENOSPC;
14877 }
14878 }
14879 }
14880 return rc;
14881 }
14882
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14883 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14884 {
14885 if (bp->bar2) {
14886 pci_iounmap(pdev, bp->bar2);
14887 bp->bar2 = NULL;
14888 }
14889
14890 if (bp->bar1) {
14891 pci_iounmap(pdev, bp->bar1);
14892 bp->bar1 = NULL;
14893 }
14894
14895 if (bp->bar0) {
14896 pci_iounmap(pdev, bp->bar0);
14897 bp->bar0 = NULL;
14898 }
14899 }
14900
bnxt_cleanup_pci(struct bnxt * bp)14901 static void bnxt_cleanup_pci(struct bnxt *bp)
14902 {
14903 bnxt_unmap_bars(bp, bp->pdev);
14904 pci_release_regions(bp->pdev);
14905 if (pci_is_enabled(bp->pdev))
14906 pci_disable_device(bp->pdev);
14907 }
14908
bnxt_init_dflt_coal(struct bnxt * bp)14909 static void bnxt_init_dflt_coal(struct bnxt *bp)
14910 {
14911 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14912 struct bnxt_coal *coal;
14913 u16 flags = 0;
14914
14915 if (coal_cap->cmpl_params &
14916 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14917 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14918
14919 /* Tick values in micro seconds.
14920 * 1 coal_buf x bufs_per_record = 1 completion record.
14921 */
14922 coal = &bp->rx_coal;
14923 coal->coal_ticks = 10;
14924 coal->coal_bufs = 30;
14925 coal->coal_ticks_irq = 1;
14926 coal->coal_bufs_irq = 2;
14927 coal->idle_thresh = 50;
14928 coal->bufs_per_record = 2;
14929 coal->budget = 64; /* NAPI budget */
14930 coal->flags = flags;
14931
14932 coal = &bp->tx_coal;
14933 coal->coal_ticks = 28;
14934 coal->coal_bufs = 30;
14935 coal->coal_ticks_irq = 2;
14936 coal->coal_bufs_irq = 2;
14937 coal->bufs_per_record = 1;
14938 coal->flags = flags;
14939
14940 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14941 }
14942
14943 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14944 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14945 {
14946 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14947
14948 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14949 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14950 return true;
14951 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14952 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14953 return true;
14954 return false;
14955 }
14956
bnxt_hwrm_pfcwd_qcaps(struct bnxt * bp)14957 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14958 {
14959 struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14960 struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14961 int rc;
14962
14963 bp->max_pfcwd_tmo_ms = 0;
14964 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14965 if (rc)
14966 return;
14967 resp = hwrm_req_hold(bp, req);
14968 rc = hwrm_req_send_silent(bp, req);
14969 if (!rc)
14970 bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14971 hwrm_req_drop(bp, req);
14972 }
14973
bnxt_fw_init_one_p1(struct bnxt * bp)14974 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14975 {
14976 int rc;
14977
14978 bp->fw_cap = 0;
14979 rc = bnxt_hwrm_ver_get(bp);
14980 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14981 * so wait before continuing with recovery.
14982 */
14983 if (rc)
14984 msleep(100);
14985 bnxt_try_map_fw_health_reg(bp);
14986 if (rc) {
14987 rc = bnxt_try_recover_fw(bp);
14988 if (rc)
14989 return rc;
14990 rc = bnxt_hwrm_ver_get(bp);
14991 if (rc)
14992 return rc;
14993 }
14994
14995 bnxt_nvm_cfg_ver_get(bp);
14996
14997 rc = bnxt_hwrm_func_reset(bp);
14998 if (rc)
14999 return -ENODEV;
15000
15001 bnxt_hwrm_fw_set_time(bp);
15002 return 0;
15003 }
15004
bnxt_fw_init_one_p2(struct bnxt * bp)15005 static int bnxt_fw_init_one_p2(struct bnxt *bp)
15006 {
15007 int rc;
15008
15009 /* Get the MAX capabilities for this function */
15010 rc = bnxt_hwrm_func_qcaps(bp);
15011 if (rc) {
15012 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
15013 rc);
15014 return -ENODEV;
15015 }
15016
15017 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
15018 if (rc)
15019 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
15020 rc);
15021
15022 if (bnxt_alloc_fw_health(bp)) {
15023 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
15024 } else {
15025 rc = bnxt_hwrm_error_recovery_qcfg(bp);
15026 if (rc)
15027 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
15028 rc);
15029 }
15030
15031 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
15032 if (rc)
15033 return -ENODEV;
15034
15035 rc = bnxt_alloc_crash_dump_mem(bp);
15036 if (rc)
15037 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
15038 rc);
15039 if (!rc) {
15040 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
15041 if (rc) {
15042 bnxt_free_crash_dump_mem(bp);
15043 netdev_warn(bp->dev,
15044 "hwrm crash dump mem failure rc: %d\n", rc);
15045 }
15046 }
15047
15048 if (bnxt_fw_pre_resv_vnics(bp))
15049 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
15050
15051 bnxt_hwrm_pfcwd_qcaps(bp);
15052 bnxt_hwrm_func_qcfg(bp);
15053 bnxt_hwrm_vnic_qcaps(bp);
15054 bnxt_hwrm_port_led_qcaps(bp);
15055 bnxt_ethtool_init(bp);
15056 if (bp->fw_cap & BNXT_FW_CAP_PTP)
15057 __bnxt_hwrm_ptp_qcfg(bp);
15058 bnxt_dcb_init(bp);
15059 bnxt_hwmon_init(bp);
15060 return 0;
15061 }
15062
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)15063 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
15064 {
15065 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
15066 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
15067 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
15068 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
15069 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
15070 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
15071 bp->rss_hash_delta = bp->rss_hash_cfg;
15072 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
15073 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
15074 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
15075 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
15076 }
15077 }
15078
bnxt_set_dflt_rfs(struct bnxt * bp)15079 static void bnxt_set_dflt_rfs(struct bnxt *bp)
15080 {
15081 struct net_device *dev = bp->dev;
15082
15083 dev->hw_features &= ~NETIF_F_NTUPLE;
15084 dev->features &= ~NETIF_F_NTUPLE;
15085 bp->flags &= ~BNXT_FLAG_RFS;
15086 if (bnxt_rfs_supported(bp)) {
15087 dev->hw_features |= NETIF_F_NTUPLE;
15088 if (bnxt_rfs_capable(bp, false)) {
15089 bp->flags |= BNXT_FLAG_RFS;
15090 dev->features |= NETIF_F_NTUPLE;
15091 }
15092 }
15093 }
15094
bnxt_fw_init_one_p3(struct bnxt * bp)15095 static void bnxt_fw_init_one_p3(struct bnxt *bp)
15096 {
15097 struct pci_dev *pdev = bp->pdev;
15098
15099 bnxt_set_dflt_rss_hash_type(bp);
15100 bnxt_set_dflt_rfs(bp);
15101
15102 bnxt_get_wol_settings(bp);
15103 if (bp->flags & BNXT_FLAG_WOL_CAP)
15104 device_set_wakeup_enable(&pdev->dev, bp->wol);
15105 else
15106 device_set_wakeup_capable(&pdev->dev, false);
15107
15108 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
15109 bnxt_hwrm_coal_params_qcaps(bp);
15110 }
15111
15112 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
15113
bnxt_fw_init_one(struct bnxt * bp)15114 int bnxt_fw_init_one(struct bnxt *bp)
15115 {
15116 int rc;
15117
15118 rc = bnxt_fw_init_one_p1(bp);
15119 if (rc) {
15120 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
15121 return rc;
15122 }
15123 rc = bnxt_fw_init_one_p2(bp);
15124 if (rc) {
15125 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
15126 return rc;
15127 }
15128 rc = bnxt_probe_phy(bp, false);
15129 if (rc)
15130 return rc;
15131 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
15132 if (rc)
15133 return rc;
15134
15135 bnxt_fw_init_one_p3(bp);
15136 return 0;
15137 }
15138
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)15139 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
15140 {
15141 struct bnxt_fw_health *fw_health = bp->fw_health;
15142 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
15143 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
15144 u32 reg_type, reg_off, delay_msecs;
15145
15146 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
15147 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
15148 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
15149 switch (reg_type) {
15150 case BNXT_FW_HEALTH_REG_TYPE_CFG:
15151 pci_write_config_dword(bp->pdev, reg_off, val);
15152 break;
15153 case BNXT_FW_HEALTH_REG_TYPE_GRC:
15154 writel(reg_off & BNXT_GRC_BASE_MASK,
15155 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
15156 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
15157 fallthrough;
15158 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
15159 writel(val, bp->bar0 + reg_off);
15160 break;
15161 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
15162 writel(val, bp->bar1 + reg_off);
15163 break;
15164 }
15165 if (delay_msecs) {
15166 pci_read_config_dword(bp->pdev, 0, &val);
15167 msleep(delay_msecs);
15168 }
15169 }
15170
bnxt_hwrm_reset_permitted(struct bnxt * bp)15171 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
15172 {
15173 struct hwrm_func_qcfg_output *resp;
15174 struct hwrm_func_qcfg_input *req;
15175 bool result = true; /* firmware will enforce if unknown */
15176
15177 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
15178 return result;
15179
15180 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
15181 return result;
15182
15183 req->fid = cpu_to_le16(0xffff);
15184 resp = hwrm_req_hold(bp, req);
15185 if (!hwrm_req_send(bp, req))
15186 result = !!(le16_to_cpu(resp->flags) &
15187 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
15188 hwrm_req_drop(bp, req);
15189 return result;
15190 }
15191
bnxt_reset_all(struct bnxt * bp)15192 static void bnxt_reset_all(struct bnxt *bp)
15193 {
15194 struct bnxt_fw_health *fw_health = bp->fw_health;
15195 int i, rc;
15196
15197 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15198 bnxt_fw_reset_via_optee(bp);
15199 bp->fw_reset_timestamp = jiffies;
15200 return;
15201 }
15202
15203 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
15204 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
15205 bnxt_fw_reset_writel(bp, i);
15206 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
15207 struct hwrm_fw_reset_input *req;
15208
15209 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15210 if (!rc) {
15211 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15212 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15213 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15214 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15215 rc = hwrm_req_send(bp, req);
15216 }
15217 if (rc != -ENODEV)
15218 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15219 }
15220 bp->fw_reset_timestamp = jiffies;
15221 }
15222
bnxt_fw_reset_timeout(struct bnxt * bp)15223 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15224 {
15225 return time_after(jiffies, bp->fw_reset_timestamp +
15226 (bp->fw_reset_max_dsecs * HZ / 10));
15227 }
15228
bnxt_fw_reset_abort(struct bnxt * bp,int rc)15229 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15230 {
15231 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15232 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15233 bnxt_dl_health_fw_status_update(bp, false);
15234 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15235 netif_close(bp->dev);
15236 }
15237
bnxt_fw_reset_task(struct work_struct * work)15238 static void bnxt_fw_reset_task(struct work_struct *work)
15239 {
15240 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15241 int rc = 0;
15242
15243 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15244 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15245 return;
15246 }
15247
15248 switch (bp->fw_reset_state) {
15249 case BNXT_FW_RESET_STATE_POLL_VF: {
15250 int n = bnxt_get_registered_vfs(bp);
15251 int tmo;
15252
15253 if (n < 0) {
15254 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15255 n, jiffies_to_msecs(jiffies -
15256 bp->fw_reset_timestamp));
15257 goto fw_reset_abort;
15258 } else if (n > 0) {
15259 if (bnxt_fw_reset_timeout(bp)) {
15260 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15261 bp->fw_reset_state = 0;
15262 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15263 n);
15264 goto ulp_start;
15265 }
15266 bnxt_queue_fw_reset_work(bp, HZ / 10);
15267 return;
15268 }
15269 bp->fw_reset_timestamp = jiffies;
15270 netdev_lock(bp->dev);
15271 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15272 bnxt_fw_reset_abort(bp, rc);
15273 netdev_unlock(bp->dev);
15274 goto ulp_start;
15275 }
15276 bnxt_fw_reset_close(bp);
15277 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15278 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15279 tmo = HZ / 10;
15280 } else {
15281 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15282 tmo = bp->fw_reset_min_dsecs * HZ / 10;
15283 }
15284 netdev_unlock(bp->dev);
15285 bnxt_queue_fw_reset_work(bp, tmo);
15286 return;
15287 }
15288 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15289 u32 val;
15290
15291 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15292 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15293 !bnxt_fw_reset_timeout(bp)) {
15294 bnxt_queue_fw_reset_work(bp, HZ / 5);
15295 return;
15296 }
15297
15298 if (!bp->fw_health->primary) {
15299 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15300
15301 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15302 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15303 return;
15304 }
15305 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15306 }
15307 fallthrough;
15308 case BNXT_FW_RESET_STATE_RESET_FW:
15309 bnxt_reset_all(bp);
15310 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15311 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15312 return;
15313 case BNXT_FW_RESET_STATE_ENABLE_DEV:
15314 bnxt_inv_fw_health_reg(bp);
15315 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15316 !bp->fw_reset_min_dsecs) {
15317 u16 val;
15318
15319 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15320 if (val == 0xffff) {
15321 if (bnxt_fw_reset_timeout(bp)) {
15322 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15323 rc = -ETIMEDOUT;
15324 goto fw_reset_abort;
15325 }
15326 bnxt_queue_fw_reset_work(bp, HZ / 1000);
15327 return;
15328 }
15329 }
15330 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15331 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15332 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15333 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15334 bnxt_dl_remote_reload(bp);
15335 if (pci_enable_device(bp->pdev)) {
15336 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15337 rc = -ENODEV;
15338 goto fw_reset_abort;
15339 }
15340 pci_set_master(bp->pdev);
15341 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15342 fallthrough;
15343 case BNXT_FW_RESET_STATE_POLL_FW:
15344 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15345 rc = bnxt_hwrm_poll(bp);
15346 if (rc) {
15347 if (bnxt_fw_reset_timeout(bp)) {
15348 netdev_err(bp->dev, "Firmware reset aborted\n");
15349 goto fw_reset_abort_status;
15350 }
15351 bnxt_queue_fw_reset_work(bp, HZ / 5);
15352 return;
15353 }
15354 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15355 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15356 fallthrough;
15357 case BNXT_FW_RESET_STATE_OPENING:
15358 while (!netdev_trylock(bp->dev)) {
15359 bnxt_queue_fw_reset_work(bp, HZ / 10);
15360 return;
15361 }
15362 rc = bnxt_open(bp->dev);
15363 if (rc) {
15364 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15365 bnxt_fw_reset_abort(bp, rc);
15366 netdev_unlock(bp->dev);
15367 goto ulp_start;
15368 }
15369
15370 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15371 bp->fw_health->enabled) {
15372 bp->fw_health->last_fw_reset_cnt =
15373 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15374 }
15375 bp->fw_reset_state = 0;
15376 /* Make sure fw_reset_state is 0 before clearing the flag */
15377 smp_mb__before_atomic();
15378 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15379 bnxt_ptp_reapply_pps(bp);
15380 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15381 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15382 bnxt_dl_health_fw_recovery_done(bp);
15383 bnxt_dl_health_fw_status_update(bp, true);
15384 }
15385 netdev_unlock(bp->dev);
15386 bnxt_ulp_start(bp);
15387 bnxt_reenable_sriov(bp);
15388 netdev_lock(bp->dev);
15389 bnxt_vf_reps_alloc(bp);
15390 bnxt_vf_reps_open(bp);
15391 netdev_unlock(bp->dev);
15392 break;
15393 }
15394 return;
15395
15396 fw_reset_abort_status:
15397 if (bp->fw_health->status_reliable ||
15398 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15399 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15400
15401 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15402 }
15403 fw_reset_abort:
15404 netdev_lock(bp->dev);
15405 bnxt_fw_reset_abort(bp, rc);
15406 netdev_unlock(bp->dev);
15407 ulp_start:
15408 if (!rc)
15409 bnxt_ulp_start(bp);
15410 }
15411
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)15412 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15413 {
15414 int rc;
15415 struct bnxt *bp = netdev_priv(dev);
15416
15417 SET_NETDEV_DEV(dev, &pdev->dev);
15418
15419 /* enable device (incl. PCI PM wakeup), and bus-mastering */
15420 rc = pci_enable_device(pdev);
15421 if (rc) {
15422 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15423 goto init_err;
15424 }
15425
15426 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15427 dev_err(&pdev->dev,
15428 "Cannot find PCI device base address, aborting\n");
15429 rc = -ENODEV;
15430 goto init_err_disable;
15431 }
15432
15433 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15434 if (rc) {
15435 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15436 goto init_err_disable;
15437 }
15438
15439 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15440 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15441 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15442 rc = -EIO;
15443 goto init_err_release;
15444 }
15445
15446 pci_set_master(pdev);
15447
15448 bp->dev = dev;
15449 bp->pdev = pdev;
15450
15451 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15452 * determines the BAR size.
15453 */
15454 bp->bar0 = pci_ioremap_bar(pdev, 0);
15455 if (!bp->bar0) {
15456 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15457 rc = -ENOMEM;
15458 goto init_err_release;
15459 }
15460
15461 bp->bar2 = pci_ioremap_bar(pdev, 4);
15462 if (!bp->bar2) {
15463 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15464 rc = -ENOMEM;
15465 goto init_err_release;
15466 }
15467
15468 INIT_WORK(&bp->sp_task, bnxt_sp_task);
15469 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15470
15471 spin_lock_init(&bp->ntp_fltr_lock);
15472 #if BITS_PER_LONG == 32
15473 spin_lock_init(&bp->db_lock);
15474 #endif
15475
15476 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15477 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15478
15479 timer_setup(&bp->timer, bnxt_timer, 0);
15480 bp->current_interval = BNXT_TIMER_INTERVAL;
15481
15482 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15483 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15484
15485 clear_bit(BNXT_STATE_OPEN, &bp->state);
15486 return 0;
15487
15488 init_err_release:
15489 bnxt_unmap_bars(bp, pdev);
15490 pci_release_regions(pdev);
15491
15492 init_err_disable:
15493 pci_disable_device(pdev);
15494
15495 init_err:
15496 return rc;
15497 }
15498
bnxt_change_mac_addr(struct net_device * dev,void * p)15499 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15500 {
15501 struct sockaddr *addr = p;
15502 struct bnxt *bp = netdev_priv(dev);
15503 int rc = 0;
15504
15505 netdev_assert_locked(dev);
15506
15507 if (!is_valid_ether_addr(addr->sa_data))
15508 return -EADDRNOTAVAIL;
15509
15510 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15511 return 0;
15512
15513 rc = bnxt_approve_mac(bp, addr->sa_data, true);
15514 if (rc)
15515 return rc;
15516
15517 eth_hw_addr_set(dev, addr->sa_data);
15518 bnxt_clear_usr_fltrs(bp, true);
15519 if (netif_running(dev)) {
15520 bnxt_close_nic(bp, false, false);
15521 rc = bnxt_open_nic(bp, false, false);
15522 }
15523
15524 return rc;
15525 }
15526
bnxt_change_mtu(struct net_device * dev,int new_mtu)15527 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15528 {
15529 struct bnxt *bp = netdev_priv(dev);
15530
15531 netdev_assert_locked(dev);
15532
15533 if (netif_running(dev))
15534 bnxt_close_nic(bp, true, false);
15535
15536 WRITE_ONCE(dev->mtu, new_mtu);
15537
15538 /* MTU change may change the AGG ring settings if an XDP multi-buffer
15539 * program is attached. We need to set the AGG rings settings and
15540 * rx_skb_func accordingly.
15541 */
15542 if (READ_ONCE(bp->xdp_prog))
15543 bnxt_set_rx_skb_mode(bp, true);
15544
15545 bnxt_set_ring_params(bp);
15546
15547 if (netif_running(dev))
15548 return bnxt_open_nic(bp, true, false);
15549
15550 return 0;
15551 }
15552
bnxt_set_cp_rings(struct bnxt * bp,bool sh)15553 void bnxt_set_cp_rings(struct bnxt *bp, bool sh)
15554 {
15555 int tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15556
15557 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15558 tx_cp + bp->rx_nr_rings;
15559 }
15560
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)15561 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15562 {
15563 struct bnxt *bp = netdev_priv(dev);
15564 bool sh = false;
15565 int rc;
15566
15567 if (tc > bp->max_tc) {
15568 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15569 tc, bp->max_tc);
15570 return -EINVAL;
15571 }
15572
15573 if (bp->num_tc == tc)
15574 return 0;
15575
15576 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15577 sh = true;
15578
15579 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15580 sh, tc, bp->tx_nr_rings_xdp);
15581 if (rc)
15582 return rc;
15583
15584 /* Needs to close the device and do hw resource re-allocations */
15585 if (netif_running(bp->dev))
15586 bnxt_close_nic(bp, true, false);
15587
15588 if (tc) {
15589 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15590 netdev_set_num_tc(dev, tc);
15591 bp->num_tc = tc;
15592 } else {
15593 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15594 netdev_reset_tc(dev);
15595 bp->num_tc = 0;
15596 }
15597 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15598 bnxt_set_cp_rings(bp, sh);
15599
15600 if (netif_running(bp->dev))
15601 return bnxt_open_nic(bp, true, false);
15602
15603 return 0;
15604 }
15605
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)15606 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15607 void *cb_priv)
15608 {
15609 struct bnxt *bp = cb_priv;
15610
15611 if (!bnxt_tc_flower_enabled(bp) ||
15612 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15613 return -EOPNOTSUPP;
15614
15615 switch (type) {
15616 case TC_SETUP_CLSFLOWER:
15617 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15618 default:
15619 return -EOPNOTSUPP;
15620 }
15621 }
15622
15623 LIST_HEAD(bnxt_block_cb_list);
15624
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)15625 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15626 void *type_data)
15627 {
15628 struct bnxt *bp = netdev_priv(dev);
15629
15630 switch (type) {
15631 case TC_SETUP_BLOCK:
15632 return flow_block_cb_setup_simple(type_data,
15633 &bnxt_block_cb_list,
15634 bnxt_setup_tc_block_cb,
15635 bp, bp, true);
15636 case TC_SETUP_QDISC_MQPRIO: {
15637 struct tc_mqprio_qopt *mqprio = type_data;
15638
15639 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15640
15641 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15642 }
15643 default:
15644 return -EOPNOTSUPP;
15645 }
15646 }
15647
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15648 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15649 const struct sk_buff *skb)
15650 {
15651 struct bnxt_vnic_info *vnic;
15652
15653 if (skb)
15654 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15655
15656 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15657 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15658 }
15659
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15660 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15661 u32 idx)
15662 {
15663 struct hlist_head *head;
15664 int bit_id;
15665
15666 spin_lock_bh(&bp->ntp_fltr_lock);
15667 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15668 if (bit_id < 0) {
15669 spin_unlock_bh(&bp->ntp_fltr_lock);
15670 return -ENOMEM;
15671 }
15672
15673 fltr->base.sw_id = (u16)bit_id;
15674 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15675 fltr->base.flags |= BNXT_ACT_RING_DST;
15676 head = &bp->ntp_fltr_hash_tbl[idx];
15677 hlist_add_head_rcu(&fltr->base.hash, head);
15678 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15679 bnxt_insert_usr_fltr(bp, &fltr->base);
15680 bp->ntp_fltr_count++;
15681 spin_unlock_bh(&bp->ntp_fltr_lock);
15682 return 0;
15683 }
15684
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15685 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15686 struct bnxt_ntuple_filter *f2)
15687 {
15688 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15689 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15690 struct flow_keys *keys1 = &f1->fkeys;
15691 struct flow_keys *keys2 = &f2->fkeys;
15692
15693 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15694 keys1->basic.ip_proto != keys2->basic.ip_proto)
15695 return false;
15696
15697 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15698 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15699 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15700 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15701 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15702 return false;
15703 } else {
15704 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15705 &keys2->addrs.v6addrs.src) ||
15706 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15707 &masks2->addrs.v6addrs.src) ||
15708 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15709 &keys2->addrs.v6addrs.dst) ||
15710 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15711 &masks2->addrs.v6addrs.dst))
15712 return false;
15713 }
15714
15715 return keys1->ports.src == keys2->ports.src &&
15716 masks1->ports.src == masks2->ports.src &&
15717 keys1->ports.dst == keys2->ports.dst &&
15718 masks1->ports.dst == masks2->ports.dst &&
15719 keys1->control.flags == keys2->control.flags &&
15720 f1->l2_fltr == f2->l2_fltr;
15721 }
15722
15723 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15724 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15725 struct bnxt_ntuple_filter *fltr, u32 idx)
15726 {
15727 struct bnxt_ntuple_filter *f;
15728 struct hlist_head *head;
15729
15730 head = &bp->ntp_fltr_hash_tbl[idx];
15731 hlist_for_each_entry_rcu(f, head, base.hash) {
15732 if (bnxt_fltr_match(f, fltr))
15733 return f;
15734 }
15735 return NULL;
15736 }
15737
15738 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15739 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15740 u16 rxq_index, u32 flow_id)
15741 {
15742 struct bnxt *bp = netdev_priv(dev);
15743 struct bnxt_ntuple_filter *fltr, *new_fltr;
15744 struct flow_keys *fkeys;
15745 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15746 struct bnxt_l2_filter *l2_fltr;
15747 int rc = 0, idx;
15748 u32 flags;
15749
15750 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15751 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15752 atomic_inc(&l2_fltr->refcnt);
15753 } else {
15754 struct bnxt_l2_key key;
15755
15756 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15757 key.vlan = 0;
15758 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15759 if (!l2_fltr)
15760 return -EINVAL;
15761 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15762 bnxt_del_l2_filter(bp, l2_fltr);
15763 return -EINVAL;
15764 }
15765 }
15766 new_fltr = kzalloc_obj(*new_fltr, GFP_ATOMIC);
15767 if (!new_fltr) {
15768 bnxt_del_l2_filter(bp, l2_fltr);
15769 return -ENOMEM;
15770 }
15771
15772 fkeys = &new_fltr->fkeys;
15773 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15774 rc = -EPROTONOSUPPORT;
15775 goto err_free;
15776 }
15777
15778 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15779 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15780 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15781 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15782 rc = -EPROTONOSUPPORT;
15783 goto err_free;
15784 }
15785 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15786 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15787 if (bp->hwrm_spec_code < 0x10601) {
15788 rc = -EPROTONOSUPPORT;
15789 goto err_free;
15790 }
15791 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15792 }
15793 flags = fkeys->control.flags;
15794 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15795 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15796 rc = -EPROTONOSUPPORT;
15797 goto err_free;
15798 }
15799 new_fltr->l2_fltr = l2_fltr;
15800
15801 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15802 rcu_read_lock();
15803 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15804 if (fltr) {
15805 rc = fltr->base.sw_id;
15806 rcu_read_unlock();
15807 goto err_free;
15808 }
15809 rcu_read_unlock();
15810
15811 new_fltr->flow_id = flow_id;
15812 new_fltr->base.rxq = rxq_index;
15813 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15814 if (!rc) {
15815 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15816 return new_fltr->base.sw_id;
15817 }
15818
15819 err_free:
15820 bnxt_del_l2_filter(bp, l2_fltr);
15821 kfree(new_fltr);
15822 return rc;
15823 }
15824 #endif
15825
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15826 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15827 {
15828 spin_lock_bh(&bp->ntp_fltr_lock);
15829 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15830 spin_unlock_bh(&bp->ntp_fltr_lock);
15831 return;
15832 }
15833 hlist_del_rcu(&fltr->base.hash);
15834 bnxt_del_one_usr_fltr(bp, &fltr->base);
15835 bp->ntp_fltr_count--;
15836 spin_unlock_bh(&bp->ntp_fltr_lock);
15837 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15838 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15839 kfree_rcu(fltr, base.rcu);
15840 }
15841
bnxt_cfg_ntp_filters(struct bnxt * bp)15842 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15843 {
15844 #ifdef CONFIG_RFS_ACCEL
15845 int i;
15846
15847 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15848 struct hlist_head *head;
15849 struct hlist_node *tmp;
15850 struct bnxt_ntuple_filter *fltr;
15851 int rc;
15852
15853 head = &bp->ntp_fltr_hash_tbl[i];
15854 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15855 bool del = false;
15856
15857 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15858 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15859 continue;
15860 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15861 fltr->flow_id,
15862 fltr->base.sw_id)) {
15863 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15864 fltr);
15865 del = true;
15866 }
15867 } else {
15868 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15869 fltr);
15870 if (rc)
15871 del = true;
15872 else
15873 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15874 }
15875
15876 if (del)
15877 bnxt_del_ntp_filter(bp, fltr);
15878 }
15879 }
15880 #endif
15881 }
15882
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15883 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15884 unsigned int entry, struct udp_tunnel_info *ti)
15885 {
15886 struct bnxt *bp = netdev_priv(netdev);
15887 unsigned int cmd;
15888
15889 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15890 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15891 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15892 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15893 else
15894 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15895
15896 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15897 }
15898
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15899 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15900 unsigned int entry, struct udp_tunnel_info *ti)
15901 {
15902 struct bnxt *bp = netdev_priv(netdev);
15903 unsigned int cmd;
15904
15905 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15906 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15907 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15908 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15909 else
15910 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15911
15912 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15913 }
15914
15915 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15916 .set_port = bnxt_udp_tunnel_set_port,
15917 .unset_port = bnxt_udp_tunnel_unset_port,
15918 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15919 .tables = {
15920 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15921 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15922 },
15923 }, bnxt_udp_tunnels_p7 = {
15924 .set_port = bnxt_udp_tunnel_set_port,
15925 .unset_port = bnxt_udp_tunnel_unset_port,
15926 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15927 .tables = {
15928 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15929 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15930 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15931 },
15932 };
15933
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15934 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15935 struct net_device *dev, u32 filter_mask,
15936 int nlflags)
15937 {
15938 struct bnxt *bp = netdev_priv(dev);
15939
15940 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15941 nlflags, filter_mask, NULL);
15942 }
15943
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15944 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15945 u16 flags, struct netlink_ext_ack *extack)
15946 {
15947 struct bnxt *bp = netdev_priv(dev);
15948 struct nlattr *attr, *br_spec;
15949 int rem, rc = 0;
15950
15951 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15952 return -EOPNOTSUPP;
15953
15954 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15955 if (!br_spec)
15956 return -EINVAL;
15957
15958 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15959 u16 mode;
15960
15961 mode = nla_get_u16(attr);
15962 if (mode == bp->br_mode)
15963 break;
15964
15965 rc = bnxt_hwrm_set_br_mode(bp, mode);
15966 if (!rc)
15967 bp->br_mode = mode;
15968 break;
15969 }
15970 return rc;
15971 }
15972
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15973 int bnxt_get_port_parent_id(struct net_device *dev,
15974 struct netdev_phys_item_id *ppid)
15975 {
15976 struct bnxt *bp = netdev_priv(dev);
15977
15978 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15979 return -EOPNOTSUPP;
15980
15981 /* The PF and it's VF-reps only support the switchdev framework */
15982 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15983 return -EOPNOTSUPP;
15984
15985 ppid->id_len = sizeof(bp->dsn);
15986 memcpy(ppid->id, bp->dsn, ppid->id_len);
15987
15988 return 0;
15989 }
15990
15991 static const struct net_device_ops bnxt_netdev_ops = {
15992 .ndo_open = bnxt_open,
15993 .ndo_start_xmit = bnxt_start_xmit,
15994 .ndo_stop = bnxt_close,
15995 .ndo_get_stats64 = bnxt_get_stats64,
15996 .ndo_set_rx_mode_async = bnxt_set_rx_mode,
15997 .ndo_eth_ioctl = bnxt_ioctl,
15998 .ndo_validate_addr = eth_validate_addr,
15999 .ndo_set_mac_address = bnxt_change_mac_addr,
16000 .ndo_change_mtu = bnxt_change_mtu,
16001 .ndo_fix_features = bnxt_fix_features,
16002 .ndo_set_features = bnxt_set_features,
16003 .ndo_features_check = bnxt_features_check,
16004 .ndo_tx_timeout = bnxt_tx_timeout,
16005 #ifdef CONFIG_BNXT_SRIOV
16006 .ndo_get_vf_config = bnxt_get_vf_config,
16007 .ndo_set_vf_mac = bnxt_set_vf_mac,
16008 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
16009 .ndo_set_vf_rate = bnxt_set_vf_bw,
16010 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
16011 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
16012 .ndo_set_vf_trust = bnxt_set_vf_trust,
16013 #endif
16014 .ndo_setup_tc = bnxt_setup_tc,
16015 #ifdef CONFIG_RFS_ACCEL
16016 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
16017 #endif
16018 .ndo_bpf = bnxt_xdp,
16019 .ndo_xdp_xmit = bnxt_xdp_xmit,
16020 .ndo_bridge_getlink = bnxt_bridge_getlink,
16021 .ndo_bridge_setlink = bnxt_bridge_setlink,
16022 .ndo_hwtstamp_get = bnxt_hwtstamp_get,
16023 .ndo_hwtstamp_set = bnxt_hwtstamp_set,
16024 };
16025
16026 static const struct xdp_metadata_ops bnxt_xdp_metadata_ops = {
16027 .xmo_rx_hash = bnxt_xdp_rx_hash,
16028 };
16029
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)16030 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
16031 struct netdev_queue_stats_rx *stats)
16032 {
16033 struct bnxt *bp = netdev_priv(dev);
16034 struct bnxt_cp_ring_info *cpr;
16035 u64 *sw;
16036
16037 if (!bp->bnapi)
16038 return;
16039
16040 cpr = &bp->bnapi[i]->cp_ring;
16041 sw = cpr->stats.sw_stats;
16042
16043 stats->packets = 0;
16044 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
16045 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
16046 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
16047
16048 stats->bytes = 0;
16049 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
16050 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
16051 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
16052
16053 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
16054 stats->hw_gro_packets = cpr->sw_stats->rx.rx_hw_gro_packets;
16055 stats->hw_gro_wire_packets = cpr->sw_stats->rx.rx_hw_gro_wire_packets;
16056 }
16057
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)16058 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
16059 struct netdev_queue_stats_tx *stats)
16060 {
16061 struct bnxt *bp = netdev_priv(dev);
16062 struct bnxt_napi *bnapi;
16063 u64 *sw;
16064
16065 if (!bp->tx_ring)
16066 return;
16067
16068 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
16069 sw = bnapi->cp_ring.stats.sw_stats;
16070
16071 stats->packets = 0;
16072 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
16073 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
16074 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
16075
16076 stats->bytes = 0;
16077 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
16078 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
16079 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
16080 }
16081
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)16082 static void bnxt_get_base_stats(struct net_device *dev,
16083 struct netdev_queue_stats_rx *rx,
16084 struct netdev_queue_stats_tx *tx)
16085 {
16086 struct bnxt *bp = netdev_priv(dev);
16087
16088 rx->packets = bp->net_stats_prev.rx_packets;
16089 rx->bytes = bp->net_stats_prev.rx_bytes;
16090 rx->alloc_fail = bp->ring_drv_stats_prev.rx_total_oom_discards;
16091 rx->hw_gro_packets = bp->ring_drv_stats_prev.rx_total_hw_gro_packets;
16092 rx->hw_gro_wire_packets = bp->ring_drv_stats_prev.rx_total_hw_gro_wire_packets;
16093
16094 tx->packets = bp->net_stats_prev.tx_packets;
16095 tx->bytes = bp->net_stats_prev.tx_bytes;
16096 }
16097
16098 static const struct netdev_stat_ops bnxt_stat_ops = {
16099 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
16100 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
16101 .get_base_stats = bnxt_get_base_stats,
16102 };
16103
bnxt_queue_default_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg)16104 static void bnxt_queue_default_qcfg(struct net_device *dev,
16105 struct netdev_queue_config *qcfg)
16106 {
16107 qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
16108 }
16109
bnxt_validate_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg,struct netlink_ext_ack * extack)16110 static int bnxt_validate_qcfg(struct net_device *dev,
16111 struct netdev_queue_config *qcfg,
16112 struct netlink_ext_ack *extack)
16113 {
16114 struct bnxt *bp = netdev_priv(dev);
16115
16116 /* Older chips need MSS calc so rx_page_size is not supported */
16117 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
16118 qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
16119 return -EINVAL;
16120
16121 if (!is_power_of_2(qcfg->rx_page_size))
16122 return -ERANGE;
16123
16124 if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
16125 qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
16126 return -ERANGE;
16127
16128 return 0;
16129 }
16130
bnxt_queue_mem_alloc(struct net_device * dev,struct netdev_queue_config * qcfg,void * qmem,int idx)16131 static int bnxt_queue_mem_alloc(struct net_device *dev,
16132 struct netdev_queue_config *qcfg,
16133 void *qmem, int idx)
16134 {
16135 struct bnxt_rx_ring_info *rxr, *clone;
16136 struct bnxt *bp = netdev_priv(dev);
16137 struct bnxt_ring_struct *ring;
16138 int rc;
16139
16140 if (!bp->rx_ring)
16141 return -ENETDOWN;
16142
16143 rxr = &bp->rx_ring[idx];
16144 clone = qmem;
16145 memcpy(clone, rxr, sizeof(*rxr));
16146 bnxt_init_rx_ring_struct(bp, clone);
16147 bnxt_reset_rx_ring_struct(bp, clone);
16148
16149 clone->rx_prod = 0;
16150 clone->rx_agg_prod = 0;
16151 clone->rx_sw_agg_prod = 0;
16152 clone->rx_next_cons = 0;
16153 clone->need_head_pool = false;
16154 clone->rx_page_size = qcfg->rx_page_size;
16155
16156 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
16157 if (rc)
16158 return rc;
16159
16160 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
16161 if (rc < 0)
16162 goto err_page_pool_destroy;
16163
16164 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
16165 MEM_TYPE_PAGE_POOL,
16166 clone->page_pool);
16167 if (rc)
16168 goto err_rxq_info_unreg;
16169
16170 ring = &clone->rx_ring_struct;
16171 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16172 if (rc)
16173 goto err_free_rx_ring;
16174
16175 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
16176 ring = &clone->rx_agg_ring_struct;
16177 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16178 if (rc)
16179 goto err_free_rx_agg_ring;
16180
16181 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
16182 if (rc)
16183 goto err_free_rx_agg_ring;
16184 }
16185
16186 if (bp->flags & BNXT_FLAG_TPA) {
16187 rc = bnxt_alloc_one_tpa_info(bp, clone);
16188 if (rc)
16189 goto err_free_tpa_info;
16190 }
16191
16192 bnxt_init_one_rx_ring_rxbd(bp, clone);
16193 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
16194
16195 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
16196 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16197 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
16198 if (bp->flags & BNXT_FLAG_TPA)
16199 bnxt_alloc_one_tpa_info_data(bp, clone);
16200
16201 return 0;
16202
16203 err_free_tpa_info:
16204 bnxt_free_one_tpa_info(bp, clone);
16205 err_free_rx_agg_ring:
16206 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
16207 err_free_rx_ring:
16208 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
16209 err_rxq_info_unreg:
16210 xdp_rxq_info_unreg(&clone->xdp_rxq);
16211 err_page_pool_destroy:
16212 page_pool_destroy(clone->page_pool);
16213 page_pool_destroy(clone->head_pool);
16214 clone->page_pool = NULL;
16215 clone->head_pool = NULL;
16216 return rc;
16217 }
16218
bnxt_queue_mem_free(struct net_device * dev,void * qmem)16219 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
16220 {
16221 struct bnxt_rx_ring_info *rxr = qmem;
16222 struct bnxt *bp = netdev_priv(dev);
16223 struct bnxt_ring_struct *ring;
16224
16225 bnxt_free_one_rx_ring_skbs(bp, rxr);
16226 bnxt_free_one_tpa_info(bp, rxr);
16227
16228 xdp_rxq_info_unreg(&rxr->xdp_rxq);
16229
16230 page_pool_destroy(rxr->page_pool);
16231 page_pool_destroy(rxr->head_pool);
16232 rxr->page_pool = NULL;
16233 rxr->head_pool = NULL;
16234
16235 ring = &rxr->rx_ring_struct;
16236 bnxt_free_ring(bp, &ring->ring_mem);
16237
16238 ring = &rxr->rx_agg_ring_struct;
16239 bnxt_free_ring(bp, &ring->ring_mem);
16240
16241 kfree(rxr->rx_agg_bmap);
16242 rxr->rx_agg_bmap = NULL;
16243 }
16244
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)16245 static void bnxt_copy_rx_ring(struct bnxt *bp,
16246 struct bnxt_rx_ring_info *dst,
16247 struct bnxt_rx_ring_info *src)
16248 {
16249 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
16250 struct bnxt_ring_struct *dst_ring, *src_ring;
16251 int i;
16252
16253 dst_ring = &dst->rx_ring_struct;
16254 dst_rmem = &dst_ring->ring_mem;
16255 src_ring = &src->rx_ring_struct;
16256 src_rmem = &src_ring->ring_mem;
16257
16258 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16259 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16260 WARN_ON(dst_rmem->flags != src_rmem->flags);
16261 WARN_ON(dst_rmem->depth != src_rmem->depth);
16262 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16263 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16264
16265 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16266 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16267 *dst_rmem->vmem = *src_rmem->vmem;
16268 for (i = 0; i < dst_rmem->nr_pages; i++) {
16269 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16270 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16271 }
16272
16273 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16274 return;
16275
16276 dst_ring = &dst->rx_agg_ring_struct;
16277 dst_rmem = &dst_ring->ring_mem;
16278 src_ring = &src->rx_agg_ring_struct;
16279 src_rmem = &src_ring->ring_mem;
16280
16281 dst->rx_page_size = src->rx_page_size;
16282
16283 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16284 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16285 WARN_ON(dst_rmem->flags != src_rmem->flags);
16286 WARN_ON(dst_rmem->depth != src_rmem->depth);
16287 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16288 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16289 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16290
16291 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16292 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16293 *dst_rmem->vmem = *src_rmem->vmem;
16294 for (i = 0; i < dst_rmem->nr_pages; i++) {
16295 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16296 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16297 }
16298
16299 dst->rx_agg_bmap = src->rx_agg_bmap;
16300 }
16301
bnxt_queue_start(struct net_device * dev,struct netdev_queue_config * qcfg,void * qmem,int idx)16302 static int bnxt_queue_start(struct net_device *dev,
16303 struct netdev_queue_config *qcfg,
16304 void *qmem, int idx)
16305 {
16306 struct bnxt *bp = netdev_priv(dev);
16307 struct bnxt_rx_ring_info *rxr, *clone;
16308 struct bnxt_cp_ring_info *cpr;
16309 struct bnxt_vnic_info *vnic;
16310 struct bnxt_napi *bnapi;
16311 int i, rc;
16312 u16 mru;
16313
16314 rxr = &bp->rx_ring[idx];
16315 clone = qmem;
16316
16317 rxr->rx_prod = clone->rx_prod;
16318 rxr->rx_agg_prod = clone->rx_agg_prod;
16319 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16320 rxr->rx_next_cons = clone->rx_next_cons;
16321 rxr->rx_tpa = clone->rx_tpa;
16322 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16323 rxr->page_pool = clone->page_pool;
16324 rxr->head_pool = clone->head_pool;
16325 rxr->xdp_rxq = clone->xdp_rxq;
16326 rxr->need_head_pool = clone->need_head_pool;
16327
16328 bnxt_copy_rx_ring(bp, rxr, clone);
16329
16330 bnapi = rxr->bnapi;
16331 cpr = &bnapi->cp_ring;
16332
16333 /* All rings have been reserved and previously allocated.
16334 * Reallocating with the same parameters should never fail.
16335 */
16336 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16337 if (rc)
16338 goto err_reset;
16339
16340 if (bp->tph_mode) {
16341 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16342 if (rc)
16343 goto err_reset;
16344 }
16345
16346 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16347 if (rc)
16348 goto err_reset;
16349
16350 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16351 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16352 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16353
16354 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16355 rc = bnxt_tx_queue_start(bp, idx);
16356 if (rc)
16357 goto err_reset;
16358 }
16359
16360 bnxt_enable_rx_page_pool(rxr);
16361 napi_enable_locked(&bnapi->napi);
16362 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16363
16364 mru = bp->dev->mtu + VLAN_ETH_HLEN;
16365 for (i = 0; i < bp->nr_vnics; i++) {
16366 vnic = &bp->vnic_info[i];
16367
16368 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16369 if (rc)
16370 return rc;
16371 }
16372 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16373
16374 err_reset:
16375 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16376 rc);
16377 napi_enable_locked(&bnapi->napi);
16378 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16379 bnxt_reset_task(bp, true);
16380 return rc;
16381 }
16382
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)16383 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16384 {
16385 struct bnxt *bp = netdev_priv(dev);
16386 struct bnxt_rx_ring_info *rxr;
16387 struct bnxt_cp_ring_info *cpr;
16388 struct bnxt_vnic_info *vnic;
16389 struct bnxt_napi *bnapi;
16390 int i;
16391
16392 for (i = 0; i < bp->nr_vnics; i++) {
16393 vnic = &bp->vnic_info[i];
16394
16395 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16396 }
16397 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16398 /* Make sure NAPI sees that the VNIC is disabled */
16399 synchronize_net();
16400 rxr = &bp->rx_ring[idx];
16401 bnapi = rxr->bnapi;
16402 cpr = &bnapi->cp_ring;
16403 cancel_work_sync(&cpr->dim.work);
16404 bnxt_hwrm_rx_ring_free(bp, rxr, false);
16405 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16406 page_pool_disable_direct_recycling(rxr->page_pool);
16407 if (bnxt_separate_head_pool(rxr))
16408 page_pool_disable_direct_recycling(rxr->head_pool);
16409
16410 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16411 bnxt_tx_queue_stop(bp, idx);
16412
16413 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16414 * completion is handled in NAPI to guarantee no more DMA on that ring
16415 * after seeing the completion.
16416 */
16417 napi_disable_locked(&bnapi->napi);
16418
16419 if (bp->tph_mode) {
16420 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16421 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16422 }
16423 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16424
16425 memcpy(qmem, rxr, sizeof(*rxr));
16426 bnxt_init_rx_ring_struct(bp, qmem);
16427
16428 return 0;
16429 }
16430
16431 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16432 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
16433 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
16434 .ndo_queue_mem_free = bnxt_queue_mem_free,
16435 .ndo_queue_start = bnxt_queue_start,
16436 .ndo_queue_stop = bnxt_queue_stop,
16437 .ndo_default_qcfg = bnxt_queue_default_qcfg,
16438 .ndo_validate_qcfg = bnxt_validate_qcfg,
16439 .supported_params = QCFG_RX_PAGE_SIZE,
16440 };
16441
16442 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
16443 .ndo_default_qcfg = bnxt_queue_default_qcfg,
16444 };
16445
bnxt_remove_one(struct pci_dev * pdev)16446 static void bnxt_remove_one(struct pci_dev *pdev)
16447 {
16448 struct net_device *dev = pci_get_drvdata(pdev);
16449 struct bnxt *bp = netdev_priv(dev);
16450
16451 if (BNXT_PF(bp))
16452 __bnxt_sriov_disable(bp);
16453
16454 bnxt_aux_devices_del(bp);
16455
16456 unregister_netdev(dev);
16457 bnxt_ptp_clear(bp);
16458
16459 bnxt_aux_devices_uninit(bp);
16460 bnxt_auxdev_id_free(bp, bp->auxdev_id);
16461
16462 bnxt_free_l2_filters(bp, true);
16463 bnxt_free_ntp_fltrs(bp, true);
16464 WARN_ON(bp->num_rss_ctx);
16465 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16466 /* Flush any pending tasks */
16467 cancel_work_sync(&bp->sp_task);
16468 cancel_delayed_work_sync(&bp->fw_reset_task);
16469 bp->sp_event = 0;
16470
16471 bnxt_dl_fw_reporters_destroy(bp);
16472 bnxt_dl_unregister(bp);
16473 bnxt_shutdown_tc(bp);
16474
16475 bnxt_clear_int_mode(bp);
16476 bnxt_hwrm_func_drv_unrgtr(bp);
16477 bnxt_free_hwrm_resources(bp);
16478 bnxt_hwmon_uninit(bp);
16479 bnxt_ethtool_free(bp);
16480 bnxt_dcb_free(bp);
16481 kfree(bp->ptp_cfg);
16482 bp->ptp_cfg = NULL;
16483 kfree(bp->fw_health);
16484 bp->fw_health = NULL;
16485 bnxt_cleanup_pci(bp);
16486 bnxt_free_ctx_mem(bp, true);
16487 bnxt_free_crash_dump_mem(bp);
16488 kfree(bp->rss_indir_tbl);
16489 bp->rss_indir_tbl = NULL;
16490 bnxt_free_port_stats(bp);
16491 free_netdev(dev);
16492 }
16493
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)16494 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16495 {
16496 int rc = 0;
16497 struct bnxt_link_info *link_info = &bp->link_info;
16498
16499 bp->phy_flags = 0;
16500 rc = bnxt_hwrm_phy_qcaps(bp);
16501 if (rc) {
16502 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16503 rc);
16504 return rc;
16505 }
16506 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16507 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16508 else
16509 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16510
16511 bp->mac_flags = 0;
16512 bnxt_hwrm_mac_qcaps(bp);
16513
16514 if (!fw_dflt)
16515 return 0;
16516
16517 mutex_lock(&bp->link_lock);
16518 rc = bnxt_update_link(bp, false);
16519 if (rc) {
16520 mutex_unlock(&bp->link_lock);
16521 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16522 rc);
16523 return rc;
16524 }
16525
16526 /* Older firmware does not have supported_auto_speeds, so assume
16527 * that all supported speeds can be autonegotiated.
16528 */
16529 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16530 link_info->support_auto_speeds = link_info->support_speeds;
16531
16532 bnxt_init_ethtool_link_settings(bp);
16533 mutex_unlock(&bp->link_lock);
16534 return 0;
16535 }
16536
bnxt_get_max_irq(struct pci_dev * pdev)16537 static int bnxt_get_max_irq(struct pci_dev *pdev)
16538 {
16539 u16 ctrl;
16540
16541 if (!pdev->msix_cap)
16542 return 1;
16543
16544 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16545 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16546 }
16547
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)16548 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16549 int *max_cp)
16550 {
16551 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16552 int max_ring_grps = 0, max_irq;
16553
16554 *max_tx = hw_resc->max_tx_rings;
16555 *max_rx = hw_resc->max_rx_rings;
16556 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16557 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16558 bnxt_get_ulp_msix_num_in_use(bp),
16559 hw_resc->max_stat_ctxs -
16560 bnxt_get_ulp_stat_ctxs_in_use(bp));
16561 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16562 *max_cp = min_t(int, *max_cp, max_irq);
16563 max_ring_grps = hw_resc->max_hw_ring_grps;
16564 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16565 *max_cp -= 1;
16566 *max_rx -= 2;
16567 }
16568 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16569 *max_rx >>= 1;
16570 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16571 int rc;
16572
16573 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16574 if (rc) {
16575 *max_rx = 0;
16576 *max_tx = 0;
16577 }
16578 /* On P5 chips, max_cp output param should be available NQs */
16579 *max_cp = max_irq;
16580 }
16581 *max_rx = min_t(int, *max_rx, max_ring_grps);
16582 }
16583
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16584 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16585 {
16586 int rx, tx, cp;
16587
16588 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
16589 *max_rx = rx;
16590 *max_tx = tx;
16591 if (!rx || !tx || !cp)
16592 return -ENOMEM;
16593
16594 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16595 }
16596
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16597 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16598 bool shared)
16599 {
16600 int rc;
16601
16602 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16603 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16604 /* Not enough rings, try disabling agg rings. */
16605 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16606 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16607 if (rc) {
16608 /* set BNXT_FLAG_AGG_RINGS back for consistency */
16609 bp->flags |= BNXT_FLAG_AGG_RINGS;
16610 return rc;
16611 }
16612 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16613 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16614 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16615 bnxt_set_ring_params(bp);
16616 }
16617
16618 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16619 int max_cp, max_stat, max_irq;
16620
16621 /* Reserve minimum resources for RoCE */
16622 max_cp = bnxt_get_max_func_cp_rings(bp);
16623 max_stat = bnxt_get_max_func_stat_ctxs(bp);
16624 max_irq = bnxt_get_max_func_irqs(bp);
16625 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16626 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16627 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16628 return 0;
16629
16630 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16631 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16632 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16633 max_cp = min_t(int, max_cp, max_irq);
16634 max_cp = min_t(int, max_cp, max_stat);
16635 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16636 if (rc)
16637 rc = 0;
16638 }
16639 return rc;
16640 }
16641
16642 /* In initial default shared ring setting, each shared ring must have a
16643 * RX/TX ring pair.
16644 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)16645 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16646 {
16647 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16648 bp->rx_nr_rings = bp->cp_nr_rings;
16649 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16650 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16651 }
16652
bnxt_adj_dflt_rings(struct bnxt * bp,bool sh)16653 static void bnxt_adj_dflt_rings(struct bnxt *bp, bool sh)
16654 {
16655 if (sh)
16656 bnxt_trim_dflt_sh_rings(bp);
16657 else
16658 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16659 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16660 if (sh && READ_ONCE(bp->xdp_prog)) {
16661 bnxt_set_xdp_tx_rings(bp);
16662 bnxt_set_cp_rings(bp, true);
16663 }
16664 }
16665
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)16666 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16667 {
16668 int dflt_rings, max_rx_rings, max_tx_rings, rc;
16669 int avail_msix;
16670
16671 if (!bnxt_can_reserve_rings(bp))
16672 return 0;
16673
16674 if (sh)
16675 bp->flags |= BNXT_FLAG_SHARED_RINGS;
16676 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16677 /* Reduce default rings on multi-port cards so that total default
16678 * rings do not exceed CPU count.
16679 */
16680 if (bp->port_count > 1) {
16681 int max_rings =
16682 max_t(int, num_online_cpus() / bp->port_count, 1);
16683
16684 dflt_rings = min_t(int, dflt_rings, max_rings);
16685 }
16686 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16687 if (rc)
16688 return rc;
16689 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16690 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16691
16692 bnxt_adj_dflt_rings(bp, sh);
16693
16694 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16695 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16696 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16697
16698 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16699 bnxt_set_dflt_ulp_stat_ctxs(bp);
16700 }
16701
16702 rc = __bnxt_reserve_rings(bp);
16703 if (rc && rc != -ENODEV)
16704 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16705
16706 bnxt_adj_tx_rings(bp);
16707 if (sh)
16708 bnxt_adj_dflt_rings(bp, true);
16709
16710 /* Rings may have been trimmed, re-reserve the trimmed rings. */
16711 if (bnxt_need_reserve_rings(bp)) {
16712 rc = __bnxt_reserve_rings(bp);
16713 if (rc && rc != -ENODEV)
16714 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16715 bnxt_adj_tx_rings(bp);
16716 }
16717 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16718 bp->rx_nr_rings++;
16719 bp->cp_nr_rings++;
16720 }
16721 if (rc) {
16722 bp->tx_nr_rings = 0;
16723 bp->rx_nr_rings = 0;
16724 }
16725 return rc;
16726 }
16727
bnxt_init_dflt_ring_mode(struct bnxt * bp)16728 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16729 {
16730 int rc;
16731
16732 if (bp->tx_nr_rings)
16733 return 0;
16734
16735 bnxt_ulp_irq_stop(bp);
16736 bnxt_clear_int_mode(bp);
16737 rc = bnxt_set_dflt_rings(bp, true);
16738 if (rc) {
16739 if (BNXT_VF(bp) && rc == -ENODEV)
16740 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16741 else
16742 netdev_err(bp->dev, "Not enough rings available.\n");
16743 goto init_dflt_ring_err;
16744 }
16745 rc = bnxt_init_int_mode(bp);
16746 if (rc)
16747 goto init_dflt_ring_err;
16748
16749 bnxt_adj_tx_rings(bp);
16750
16751 bnxt_set_dflt_rfs(bp);
16752
16753 init_dflt_ring_err:
16754 bnxt_ulp_irq_restart(bp, rc);
16755 return rc;
16756 }
16757
bnxt_restore_pf_fw_resources(struct bnxt * bp)16758 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16759 {
16760 int rc;
16761
16762 netdev_ops_assert_locked(bp->dev);
16763 bnxt_hwrm_func_qcaps(bp);
16764
16765 if (netif_running(bp->dev))
16766 __bnxt_close_nic(bp, true, false);
16767
16768 bnxt_ulp_irq_stop(bp);
16769 bnxt_clear_int_mode(bp);
16770 rc = bnxt_init_int_mode(bp);
16771 bnxt_ulp_irq_restart(bp, rc);
16772
16773 if (netif_running(bp->dev)) {
16774 if (rc)
16775 netif_close(bp->dev);
16776 else
16777 rc = bnxt_open_nic(bp, true, false);
16778 }
16779
16780 return rc;
16781 }
16782
bnxt_init_mac_addr(struct bnxt * bp)16783 static int bnxt_init_mac_addr(struct bnxt *bp)
16784 {
16785 int rc = 0;
16786
16787 if (BNXT_PF(bp)) {
16788 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16789 } else {
16790 #ifdef CONFIG_BNXT_SRIOV
16791 struct bnxt_vf_info *vf = &bp->vf;
16792 bool strict_approval = true;
16793
16794 if (is_valid_ether_addr(vf->mac_addr)) {
16795 /* overwrite netdev dev_addr with admin VF MAC */
16796 eth_hw_addr_set(bp->dev, vf->mac_addr);
16797 /* Older PF driver or firmware may not approve this
16798 * correctly.
16799 */
16800 strict_approval = false;
16801 } else {
16802 eth_hw_addr_random(bp->dev);
16803 }
16804 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16805 #endif
16806 }
16807 return rc;
16808 }
16809
bnxt_vpd_read_info(struct bnxt * bp)16810 static void bnxt_vpd_read_info(struct bnxt *bp)
16811 {
16812 struct pci_dev *pdev = bp->pdev;
16813 unsigned int vpd_size, kw_len;
16814 int pos, size;
16815 u8 *vpd_data;
16816
16817 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16818 if (IS_ERR(vpd_data)) {
16819 pci_warn(pdev, "Unable to read VPD\n");
16820 return;
16821 }
16822
16823 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16824 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16825 if (pos < 0)
16826 goto read_sn;
16827
16828 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16829 memcpy(bp->board_partno, &vpd_data[pos], size);
16830
16831 read_sn:
16832 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16833 PCI_VPD_RO_KEYWORD_SERIALNO,
16834 &kw_len);
16835 if (pos < 0)
16836 goto exit;
16837
16838 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16839 memcpy(bp->board_serialno, &vpd_data[pos], size);
16840 exit:
16841 kfree(vpd_data);
16842 }
16843
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16844 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16845 {
16846 struct pci_dev *pdev = bp->pdev;
16847 u64 qword;
16848
16849 qword = pci_get_dsn(pdev);
16850 if (!qword) {
16851 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16852 return -EOPNOTSUPP;
16853 }
16854
16855 put_unaligned_le64(qword, dsn);
16856
16857 bp->flags |= BNXT_FLAG_DSN_VALID;
16858 return 0;
16859 }
16860
bnxt_map_db_bar(struct bnxt * bp)16861 static int bnxt_map_db_bar(struct bnxt *bp)
16862 {
16863 if (!bp->db_size)
16864 return -ENODEV;
16865 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16866 if (!bp->bar1)
16867 return -ENOMEM;
16868 return 0;
16869 }
16870
bnxt_print_device_info(struct bnxt * bp)16871 void bnxt_print_device_info(struct bnxt *bp)
16872 {
16873 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16874 board_info[bp->board_idx].name,
16875 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16876
16877 pcie_print_link_status(bp->pdev);
16878 }
16879
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16880 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16881 {
16882 struct bnxt_hw_resc *hw_resc;
16883 struct net_device *dev;
16884 struct bnxt *bp;
16885 int rc, max_irqs;
16886
16887 if (pci_is_bridge(pdev))
16888 return -ENODEV;
16889
16890 if (!pdev->msix_cap) {
16891 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16892 return -ENODEV;
16893 }
16894
16895 /* Clear any pending DMA transactions from crash kernel
16896 * while loading driver in capture kernel.
16897 */
16898 if (is_kdump_kernel()) {
16899 pci_clear_master(pdev);
16900 pcie_flr(pdev);
16901 }
16902
16903 max_irqs = bnxt_get_max_irq(pdev);
16904 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16905 max_irqs);
16906 if (!dev)
16907 return -ENOMEM;
16908
16909 bp = netdev_priv(dev);
16910 bp->board_idx = ent->driver_data;
16911 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16912 bnxt_set_max_func_irqs(bp, max_irqs);
16913
16914 if (bnxt_vf_pciid(bp->board_idx))
16915 bp->flags |= BNXT_FLAG_VF;
16916
16917 /* No devlink port registration in case of a VF */
16918 if (BNXT_PF(bp))
16919 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16920
16921 rc = bnxt_init_board(pdev, dev);
16922 if (rc < 0)
16923 goto init_err_free;
16924
16925 dev->netdev_ops = &bnxt_netdev_ops;
16926 dev->xdp_metadata_ops = &bnxt_xdp_metadata_ops;
16927 dev->stat_ops = &bnxt_stat_ops;
16928 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16929 dev->ethtool_ops = &bnxt_ethtool_ops;
16930 pci_set_drvdata(pdev, dev);
16931
16932 rc = bnxt_alloc_hwrm_resources(bp);
16933 if (rc)
16934 goto init_err_pci_clean;
16935
16936 mutex_init(&bp->hwrm_cmd_lock);
16937 mutex_init(&bp->link_lock);
16938
16939 rc = bnxt_fw_init_one_p1(bp);
16940 if (rc)
16941 goto init_err_pci_clean;
16942
16943 if (BNXT_PF(bp))
16944 bnxt_vpd_read_info(bp);
16945
16946 if (BNXT_CHIP_P5_PLUS(bp)) {
16947 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16948 if (BNXT_CHIP_P7(bp))
16949 bp->flags |= BNXT_FLAG_CHIP_P7;
16950 }
16951
16952 rc = bnxt_alloc_rss_indir_tbl(bp);
16953 if (rc)
16954 goto init_err_pci_clean;
16955
16956 rc = bnxt_fw_init_one_p2(bp);
16957 if (rc)
16958 goto init_err_pci_clean;
16959
16960 rc = bnxt_map_db_bar(bp);
16961 if (rc) {
16962 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16963 rc);
16964 goto init_err_pci_clean;
16965 }
16966
16967 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16968 NETIF_F_TSO | NETIF_F_TSO6 |
16969 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16970 NETIF_F_GSO_IPXIP4 |
16971 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16972 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16973 NETIF_F_RXCSUM | NETIF_F_GRO;
16974 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16975
16976 if (BNXT_SUPPORTS_TPA(bp))
16977 dev->hw_features |= NETIF_F_LRO;
16978
16979 dev->hw_enc_features =
16980 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16981 NETIF_F_TSO | NETIF_F_TSO6 |
16982 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16983 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16984 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16985 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16986 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16987 if (bp->flags & BNXT_FLAG_CHIP_P7)
16988 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16989 else
16990 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16991
16992 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16993 NETIF_F_GSO_GRE_CSUM;
16994 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16995 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16996 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16997 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16998 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16999 if (BNXT_SUPPORTS_TPA(bp))
17000 dev->hw_features |= NETIF_F_GRO_HW;
17001 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
17002 if (dev->features & NETIF_F_GRO_HW)
17003 dev->features &= ~NETIF_F_LRO;
17004 dev->priv_flags |= IFF_UNICAST_FLT;
17005
17006 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
17007 if (!(bp->flags & BNXT_FLAG_UDP_GSO_CAP)) {
17008 u16 max_segs = BNXT_SW_USO_MAX_SEGS;
17009
17010 if (bp->tso_max_segs)
17011 max_segs = min_t(u16, max_segs, bp->tso_max_segs);
17012 netif_set_tso_max_segs(dev, max_segs);
17013 } else if (bp->tso_max_segs) {
17014 netif_set_tso_max_segs(dev, bp->tso_max_segs);
17015 }
17016
17017 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
17018 NETDEV_XDP_ACT_RX_SG;
17019
17020 #ifdef CONFIG_BNXT_SRIOV
17021 init_waitqueue_head(&bp->sriov_cfg_wait);
17022 #endif
17023 if (BNXT_SUPPORTS_TPA(bp)) {
17024 bp->gro_func = bnxt_gro_func_5730x;
17025 if (BNXT_CHIP_P4(bp))
17026 bp->gro_func = bnxt_gro_func_5731x;
17027 else if (BNXT_CHIP_P5_PLUS(bp))
17028 bp->gro_func = bnxt_gro_func_5750x;
17029 }
17030 if (!BNXT_CHIP_P4_PLUS(bp))
17031 bp->flags |= BNXT_FLAG_DOUBLE_DB;
17032
17033 rc = bnxt_init_mac_addr(bp);
17034 if (rc) {
17035 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
17036 rc = -EADDRNOTAVAIL;
17037 goto init_err_pci_clean;
17038 }
17039
17040 if (BNXT_PF(bp)) {
17041 /* Read the adapter's DSN to use as the eswitch switch_id */
17042 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
17043 }
17044
17045 /* MTU range: 60 - FW defined max */
17046 dev->min_mtu = ETH_ZLEN;
17047 dev->max_mtu = bp->max_mtu;
17048
17049 rc = bnxt_probe_phy(bp, true);
17050 if (rc)
17051 goto init_err_pci_clean;
17052
17053 hw_resc = &bp->hw_resc;
17054 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
17055 BNXT_L2_FLTR_MAX_FLTR;
17056 /* Older firmware may not report these filters properly */
17057 if (bp->max_fltr < BNXT_MAX_FLTR)
17058 bp->max_fltr = BNXT_MAX_FLTR;
17059 bnxt_init_l2_fltr_tbl(bp);
17060 __bnxt_set_rx_skb_mode(bp, false);
17061 bnxt_set_tpa_flags(bp);
17062 bnxt_init_ring_params(bp);
17063 bnxt_set_ring_params(bp);
17064 mutex_init(&bp->auxdev_lock);
17065 if (!bnxt_auxdev_id_alloc(bp))
17066 bnxt_aux_devices_init(bp);
17067 rc = bnxt_set_dflt_rings(bp, true);
17068 if (rc) {
17069 if (BNXT_VF(bp) && rc == -ENODEV) {
17070 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
17071 } else {
17072 netdev_err(bp->dev, "Not enough rings available.\n");
17073 rc = -ENOMEM;
17074 }
17075 goto init_err_pci_clean;
17076 }
17077
17078 bnxt_fw_init_one_p3(bp);
17079
17080 bnxt_init_dflt_coal(bp);
17081
17082 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
17083 bp->flags |= BNXT_FLAG_STRIP_VLAN;
17084
17085 rc = bnxt_init_int_mode(bp);
17086 if (rc)
17087 goto init_err_pci_clean;
17088
17089 /* No TC has been set yet and rings may have been trimmed due to
17090 * limited MSIX, so we re-initialize the TX rings per TC.
17091 */
17092 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
17093
17094 if (BNXT_PF(bp)) {
17095 if (!bnxt_pf_wq) {
17096 bnxt_pf_wq =
17097 create_singlethread_workqueue("bnxt_pf_wq");
17098 if (!bnxt_pf_wq) {
17099 dev_err(&pdev->dev, "Unable to create workqueue.\n");
17100 rc = -ENOMEM;
17101 goto init_err_pci_clean;
17102 }
17103 }
17104 rc = bnxt_init_tc(bp);
17105 if (rc)
17106 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
17107 rc);
17108 }
17109
17110 bnxt_inv_fw_health_reg(bp);
17111 rc = bnxt_dl_register(bp);
17112 if (rc)
17113 goto init_err_dl;
17114
17115 INIT_LIST_HEAD(&bp->usr_fltr_list);
17116
17117 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
17118 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
17119
17120 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
17121 if (BNXT_SUPPORTS_QUEUE_API(bp))
17122 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
17123 dev->netmem_tx = true;
17124
17125 rc = register_netdev(dev);
17126 if (rc)
17127 goto init_err_cleanup;
17128
17129 bnxt_dl_fw_reporters_create(bp);
17130
17131 bnxt_aux_devices_add(bp);
17132
17133 bnxt_print_device_info(bp);
17134
17135 pci_save_state(pdev);
17136
17137 return 0;
17138 init_err_cleanup:
17139 bnxt_aux_devices_uninit(bp);
17140 bnxt_auxdev_id_free(bp, bp->auxdev_id);
17141 bnxt_dl_unregister(bp);
17142 init_err_dl:
17143 bnxt_shutdown_tc(bp);
17144 bnxt_clear_int_mode(bp);
17145
17146 init_err_pci_clean:
17147 bnxt_hwrm_func_drv_unrgtr(bp);
17148 bnxt_ptp_clear(bp);
17149 kfree(bp->ptp_cfg);
17150 bp->ptp_cfg = NULL;
17151 bnxt_free_hwrm_resources(bp);
17152 bnxt_hwmon_uninit(bp);
17153 bnxt_ethtool_free(bp);
17154 kfree(bp->fw_health);
17155 bp->fw_health = NULL;
17156 bnxt_cleanup_pci(bp);
17157 bnxt_free_ctx_mem(bp, true);
17158 bnxt_free_crash_dump_mem(bp);
17159 kfree(bp->rss_indir_tbl);
17160 bp->rss_indir_tbl = NULL;
17161
17162 init_err_free:
17163 free_netdev(dev);
17164 return rc;
17165 }
17166
bnxt_shutdown(struct pci_dev * pdev)17167 static void bnxt_shutdown(struct pci_dev *pdev)
17168 {
17169 struct net_device *dev = pci_get_drvdata(pdev);
17170 struct bnxt *bp;
17171
17172 if (!dev)
17173 return;
17174
17175 rtnl_lock();
17176 netdev_lock(dev);
17177 bp = netdev_priv(dev);
17178 if (!bp)
17179 goto shutdown_exit;
17180
17181 if (netif_running(dev))
17182 netif_close(dev);
17183
17184 if (bnxt_hwrm_func_drv_unrgtr(bp)) {
17185 pcie_flr(pdev);
17186 goto shutdown_exit;
17187 }
17188 bnxt_ptp_clear(bp);
17189 bnxt_clear_int_mode(bp);
17190 pci_disable_device(pdev);
17191
17192 if (system_state == SYSTEM_POWER_OFF) {
17193 pci_wake_from_d3(pdev, bp->wol);
17194 pci_set_power_state(pdev, PCI_D3hot);
17195 }
17196
17197 shutdown_exit:
17198 netdev_unlock(dev);
17199 rtnl_unlock();
17200 }
17201
17202 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)17203 static int bnxt_suspend(struct device *device)
17204 {
17205 struct net_device *dev = dev_get_drvdata(device);
17206 struct bnxt *bp = netdev_priv(dev);
17207 int rc = 0;
17208
17209 bnxt_ulp_stop(bp);
17210
17211 netdev_lock(dev);
17212 if (netif_running(dev)) {
17213 netif_device_detach(dev);
17214 rc = bnxt_close(dev);
17215 }
17216 bnxt_hwrm_func_drv_unrgtr(bp);
17217 bnxt_ptp_clear(bp);
17218 pci_disable_device(bp->pdev);
17219 bnxt_free_ctx_mem(bp, false);
17220 netdev_unlock(dev);
17221 return rc;
17222 }
17223
bnxt_resume(struct device * device)17224 static int bnxt_resume(struct device *device)
17225 {
17226 struct net_device *dev = dev_get_drvdata(device);
17227 struct bnxt *bp = netdev_priv(dev);
17228 int rc = 0;
17229
17230 netdev_lock(dev);
17231 rc = pci_enable_device(bp->pdev);
17232 if (rc) {
17233 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
17234 rc);
17235 goto resume_exit;
17236 }
17237 pci_set_master(bp->pdev);
17238 if (bnxt_hwrm_ver_get(bp)) {
17239 rc = -ENODEV;
17240 goto resume_exit;
17241 }
17242 rc = bnxt_hwrm_func_reset(bp);
17243 if (rc) {
17244 rc = -EBUSY;
17245 goto resume_exit;
17246 }
17247
17248 rc = bnxt_hwrm_func_qcaps(bp);
17249 if (rc)
17250 goto resume_exit;
17251
17252 bnxt_clear_reservations(bp, true);
17253
17254 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
17255 rc = -ENODEV;
17256 goto resume_exit;
17257 }
17258 if (bp->fw_crash_mem)
17259 bnxt_hwrm_crash_dump_mem_cfg(bp);
17260
17261 if (bnxt_ptp_init(bp)) {
17262 kfree(bp->ptp_cfg);
17263 bp->ptp_cfg = NULL;
17264 }
17265 bnxt_get_wol_settings(bp);
17266 if (netif_running(dev)) {
17267 rc = bnxt_open(dev);
17268 if (!rc)
17269 netif_device_attach(dev);
17270 }
17271
17272 resume_exit:
17273 netdev_unlock(bp->dev);
17274 if (!rc) {
17275 bnxt_ulp_start(bp);
17276 bnxt_reenable_sriov(bp);
17277 }
17278 return rc;
17279 }
17280
17281 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
17282 #define BNXT_PM_OPS (&bnxt_pm_ops)
17283
17284 #else
17285
17286 #define BNXT_PM_OPS NULL
17287
17288 #endif /* CONFIG_PM_SLEEP */
17289
17290 /**
17291 * bnxt_io_error_detected - called when PCI error is detected
17292 * @pdev: Pointer to PCI device
17293 * @state: The current pci connection state
17294 *
17295 * This function is called after a PCI bus error affecting
17296 * this device has been detected.
17297 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)17298 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17299 pci_channel_state_t state)
17300 {
17301 struct net_device *netdev = pci_get_drvdata(pdev);
17302 struct bnxt *bp = netdev_priv(netdev);
17303 bool abort = false;
17304
17305 netdev_info(netdev, "PCI I/O error detected\n");
17306
17307 bnxt_ulp_stop(bp);
17308
17309 netdev_lock(netdev);
17310 netif_device_detach(netdev);
17311
17312 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17313 netdev_err(bp->dev, "Firmware reset already in progress\n");
17314 abort = true;
17315 }
17316
17317 if (abort || state == pci_channel_io_perm_failure) {
17318 netdev_unlock(netdev);
17319 return PCI_ERS_RESULT_DISCONNECT;
17320 }
17321
17322 /* Link is not reliable anymore if state is pci_channel_io_frozen
17323 * so we disable bus master to prevent any potential bad DMAs before
17324 * freeing kernel memory.
17325 */
17326 if (state == pci_channel_io_frozen) {
17327 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17328 bnxt_fw_fatal_close(bp);
17329 }
17330
17331 if (netif_running(netdev))
17332 __bnxt_close_nic(bp, true, true);
17333
17334 if (pci_is_enabled(pdev))
17335 pci_disable_device(pdev);
17336 bnxt_free_ctx_mem(bp, false);
17337 netdev_unlock(netdev);
17338
17339 /* Request a slot reset. */
17340 return PCI_ERS_RESULT_NEED_RESET;
17341 }
17342
17343 /**
17344 * bnxt_io_slot_reset - called after the pci bus has been reset.
17345 * @pdev: Pointer to PCI device
17346 *
17347 * Restart the card from scratch, as if from a cold-boot.
17348 * At this point, the card has experienced a hard reset,
17349 * followed by fixups by BIOS, and has its config space
17350 * set up identically to what it was at cold boot.
17351 */
bnxt_io_slot_reset(struct pci_dev * pdev)17352 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17353 {
17354 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17355 struct net_device *netdev = pci_get_drvdata(pdev);
17356 struct bnxt *bp = netdev_priv(netdev);
17357 int retry = 0;
17358 int err = 0;
17359 int off;
17360
17361 netdev_info(bp->dev, "PCI Slot Reset\n");
17362
17363 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17364 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17365 msleep(900);
17366
17367 netdev_lock(netdev);
17368
17369 if (pci_enable_device(pdev)) {
17370 dev_err(&pdev->dev,
17371 "Cannot re-enable PCI device after reset.\n");
17372 } else {
17373 pci_set_master(pdev);
17374 /* Upon fatal error, our device internal logic that latches to
17375 * BAR value is getting reset and will restore only upon
17376 * rewriting the BARs.
17377 *
17378 * As pci_restore_state() does not re-write the BARs if the
17379 * value is same as saved value earlier, driver needs to
17380 * write the BARs to 0 to force restore, in case of fatal error.
17381 */
17382 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17383 &bp->state)) {
17384 for (off = PCI_BASE_ADDRESS_0;
17385 off <= PCI_BASE_ADDRESS_5; off += 4)
17386 pci_write_config_dword(bp->pdev, off, 0);
17387 }
17388 pci_restore_state(pdev);
17389 pci_save_state(pdev);
17390
17391 bnxt_inv_fw_health_reg(bp);
17392 bnxt_try_map_fw_health_reg(bp);
17393
17394 /* In some PCIe AER scenarios, firmware may take up to
17395 * 10 seconds to become ready in the worst case.
17396 */
17397 do {
17398 err = bnxt_try_recover_fw(bp);
17399 if (!err)
17400 break;
17401 retry++;
17402 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
17403
17404 if (err) {
17405 dev_err(&pdev->dev, "Firmware not ready\n");
17406 goto reset_exit;
17407 }
17408
17409 err = bnxt_hwrm_func_reset(bp);
17410 if (!err)
17411 result = PCI_ERS_RESULT_RECOVERED;
17412
17413 /* IRQ will be initialized later in bnxt_io_resume */
17414 bnxt_ulp_irq_stop(bp);
17415 bnxt_clear_int_mode(bp);
17416 }
17417
17418 reset_exit:
17419 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17420 bnxt_clear_reservations(bp, true);
17421 netdev_unlock(netdev);
17422
17423 return result;
17424 }
17425
17426 /**
17427 * bnxt_io_resume - called when traffic can start flowing again.
17428 * @pdev: Pointer to PCI device
17429 *
17430 * This callback is called when the error recovery driver tells
17431 * us that its OK to resume normal operation.
17432 */
bnxt_io_resume(struct pci_dev * pdev)17433 static void bnxt_io_resume(struct pci_dev *pdev)
17434 {
17435 struct net_device *netdev = pci_get_drvdata(pdev);
17436 struct bnxt *bp = netdev_priv(netdev);
17437 int err;
17438
17439 netdev_info(bp->dev, "PCI Slot Resume\n");
17440 netdev_lock(netdev);
17441
17442 err = bnxt_hwrm_func_qcaps(bp);
17443 if (!err) {
17444 if (netif_running(netdev)) {
17445 err = bnxt_open(netdev);
17446 } else {
17447 err = bnxt_reserve_rings(bp, true);
17448 if (!err)
17449 err = bnxt_init_int_mode(bp);
17450 }
17451 }
17452
17453 if (!err)
17454 netif_device_attach(netdev);
17455
17456 netdev_unlock(netdev);
17457 if (!err) {
17458 bnxt_ulp_start(bp);
17459 bnxt_reenable_sriov(bp);
17460 }
17461 }
17462
17463 static const struct pci_error_handlers bnxt_err_handler = {
17464 .error_detected = bnxt_io_error_detected,
17465 .slot_reset = bnxt_io_slot_reset,
17466 .resume = bnxt_io_resume
17467 };
17468
17469 static struct pci_driver bnxt_pci_driver = {
17470 .name = DRV_MODULE_NAME,
17471 .id_table = bnxt_pci_tbl,
17472 .probe = bnxt_init_one,
17473 .remove = bnxt_remove_one,
17474 .shutdown = bnxt_shutdown,
17475 .driver.pm = BNXT_PM_OPS,
17476 .err_handler = &bnxt_err_handler,
17477 #if defined(CONFIG_BNXT_SRIOV)
17478 .sriov_configure = bnxt_sriov_configure,
17479 #endif
17480 };
17481
bnxt_init(void)17482 static int __init bnxt_init(void)
17483 {
17484 int err;
17485
17486 bnxt_debug_init();
17487 err = pci_register_driver(&bnxt_pci_driver);
17488 if (err) {
17489 bnxt_debug_exit();
17490 return err;
17491 }
17492
17493 return 0;
17494 }
17495
bnxt_exit(void)17496 static void __exit bnxt_exit(void)
17497 {
17498 pci_unregister_driver(&bnxt_pci_driver);
17499 if (bnxt_pf_wq)
17500 destroy_workqueue(bnxt_pf_wq);
17501 bnxt_debug_exit();
17502 }
17503
17504 module_init(bnxt_init);
17505 module_exit(bnxt_exit);
17506