1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62
63 #include "bnxt.h"
64 #include "bnxt_hwrm.h"
65 #include "bnxt_ulp.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77
78 #define BNXT_TX_TIMEOUT (5 * HZ)
79 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
80 NETIF_MSG_TX_ERR)
81
82 MODULE_IMPORT_NS("NETDEV_INTERNAL");
83 MODULE_LICENSE("GPL");
84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
85
86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
88
89 #define BNXT_TX_PUSH_THRESH 164
90
91 /* indexed by enum board_idx */
92 static const struct {
93 char *name;
94 } board_info[] = {
95 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
96 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
97 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
98 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
99 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
100 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
101 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
102 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
103 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
104 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
105 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
106 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
108 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
109 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
110 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
111 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
112 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
114 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
115 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
116 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
117 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
118 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
119 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
120 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
121 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
123 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
124 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
127 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
129 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
130 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
131 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
132 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
133 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
134 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
136 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
137 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
138 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
139 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
140 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
141 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
142 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
143 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
144 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
145 [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
146 };
147
148 static const struct pci_device_id bnxt_pci_tbl[] = {
149 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
150 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
151 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
152 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
153 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
154 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
155 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
156 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
157 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
158 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
159 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
160 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
161 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
162 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
163 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
164 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
165 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
166 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
167 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
168 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
169 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
171 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
172 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
173 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
176 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
183 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
184 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
185 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
186 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
187 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
188 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
189 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
190 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
191 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
197 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
198 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
199 #ifdef CONFIG_BNXT_SRIOV
200 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
201 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
204 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
207 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
208 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
209 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
210 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
216 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
217 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
218 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
219 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
220 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
221 { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
222 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
223 #endif
224 { 0 }
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
228
229 static const u16 bnxt_vf_req_snif[] = {
230 HWRM_FUNC_CFG,
231 HWRM_FUNC_VF_CFG,
232 HWRM_PORT_PHY_QCFG,
233 HWRM_CFA_L2_FILTER_ALLOC,
234 };
235
236 static const u16 bnxt_async_events_arr[] = {
237 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
238 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
239 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
240 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
241 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
242 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
243 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
244 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
245 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
246 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
247 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
248 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
249 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
250 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
251 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
252 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
253 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
254 };
255
256 const u16 bnxt_bstore_to_trace[] = {
257 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
258 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
259 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
260 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
261 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
262 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
263 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
264 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
265 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
266 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
267 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
268 [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
269 [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
270 };
271
272 static struct workqueue_struct *bnxt_pf_wq;
273
274 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
275 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
276 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
277
278 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
279 .ports = {
280 .src = 0,
281 .dst = 0,
282 },
283 .addrs = {
284 .v6addrs = {
285 .src = BNXT_IPV6_MASK_NONE,
286 .dst = BNXT_IPV6_MASK_NONE,
287 },
288 },
289 };
290
291 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
292 .ports = {
293 .src = cpu_to_be16(0xffff),
294 .dst = cpu_to_be16(0xffff),
295 },
296 .addrs = {
297 .v6addrs = {
298 .src = BNXT_IPV6_MASK_ALL,
299 .dst = BNXT_IPV6_MASK_ALL,
300 },
301 },
302 };
303
304 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
305 .ports = {
306 .src = cpu_to_be16(0xffff),
307 .dst = cpu_to_be16(0xffff),
308 },
309 .addrs = {
310 .v4addrs = {
311 .src = cpu_to_be32(0xffffffff),
312 .dst = cpu_to_be32(0xffffffff),
313 },
314 },
315 };
316
bnxt_vf_pciid(enum board_idx idx)317 static bool bnxt_vf_pciid(enum board_idx idx)
318 {
319 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
320 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
321 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
322 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
323 idx == NETXTREME_E_P7_VF_HV);
324 }
325
326 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
327 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
328
329 #define BNXT_DB_CQ(db, idx) \
330 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
331
332 #define BNXT_DB_NQ_P5(db, idx) \
333 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
334 (db)->doorbell)
335
336 #define BNXT_DB_NQ_P7(db, idx) \
337 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
338 DB_RING_IDX(db, idx), (db)->doorbell)
339
340 #define BNXT_DB_CQ_ARM(db, idx) \
341 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
342
343 #define BNXT_DB_NQ_ARM_P5(db, idx) \
344 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
345 DB_RING_IDX(db, idx), (db)->doorbell)
346
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)347 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
348 {
349 if (bp->flags & BNXT_FLAG_CHIP_P7)
350 BNXT_DB_NQ_P7(db, idx);
351 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
352 BNXT_DB_NQ_P5(db, idx);
353 else
354 BNXT_DB_CQ(db, idx);
355 }
356
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)357 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
358 {
359 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
360 BNXT_DB_NQ_ARM_P5(db, idx);
361 else
362 BNXT_DB_CQ_ARM(db, idx);
363 }
364
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)365 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
366 {
367 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
368 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
369 DB_RING_IDX(db, idx), db->doorbell);
370 else
371 BNXT_DB_CQ(db, idx);
372 }
373
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)374 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
375 {
376 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
377 return;
378
379 if (BNXT_PF(bp))
380 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
381 else
382 schedule_delayed_work(&bp->fw_reset_task, delay);
383 }
384
__bnxt_queue_sp_work(struct bnxt * bp)385 static void __bnxt_queue_sp_work(struct bnxt *bp)
386 {
387 if (BNXT_PF(bp))
388 queue_work(bnxt_pf_wq, &bp->sp_task);
389 else
390 schedule_work(&bp->sp_task);
391 }
392
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)393 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
394 {
395 set_bit(event, &bp->sp_event);
396 __bnxt_queue_sp_work(bp);
397 }
398
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)399 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
400 {
401 if (!rxr->bnapi->in_reset) {
402 rxr->bnapi->in_reset = true;
403 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
404 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
405 else
406 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
407 __bnxt_queue_sp_work(bp);
408 }
409 rxr->rx_next_cons = 0xffff;
410 }
411
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)412 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
413 u16 curr)
414 {
415 struct bnxt_napi *bnapi = txr->bnapi;
416
417 if (bnapi->tx_fault)
418 return;
419
420 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
421 txr->txq_index, txr->tx_hw_cons,
422 txr->tx_cons, txr->tx_prod, curr);
423 WARN_ON_ONCE(1);
424 bnapi->tx_fault = 1;
425 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
426 }
427
428 const u16 bnxt_lhint_arr[] = {
429 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
430 TX_BD_FLAGS_LHINT_512_TO_1023,
431 TX_BD_FLAGS_LHINT_1024_TO_2047,
432 TX_BD_FLAGS_LHINT_1024_TO_2047,
433 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 };
449
bnxt_xmit_get_cfa_action(struct sk_buff * skb)450 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
451 {
452 struct metadata_dst *md_dst = skb_metadata_dst(skb);
453
454 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
455 return 0;
456
457 return md_dst->u.port_info.port_id;
458 }
459
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)460 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
461 u16 prod)
462 {
463 /* Sync BD data before updating doorbell */
464 wmb();
465 bnxt_db_write(bp, &txr->tx_db, prod);
466 txr->kick_pending = 0;
467 }
468
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)469 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
470 {
471 struct bnxt *bp = netdev_priv(dev);
472 struct tx_bd *txbd, *txbd0;
473 struct tx_bd_ext *txbd1;
474 struct netdev_queue *txq;
475 int i;
476 dma_addr_t mapping;
477 unsigned int length, pad = 0;
478 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
479 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
480 struct pci_dev *pdev = bp->pdev;
481 u16 prod, last_frag, txts_prod;
482 struct bnxt_tx_ring_info *txr;
483 struct bnxt_sw_tx_bd *tx_buf;
484 __le32 lflags = 0;
485 skb_frag_t *frag;
486
487 i = skb_get_queue_mapping(skb);
488 if (unlikely(i >= bp->tx_nr_rings)) {
489 dev_kfree_skb_any(skb);
490 dev_core_stats_tx_dropped_inc(dev);
491 return NETDEV_TX_OK;
492 }
493
494 txq = netdev_get_tx_queue(dev, i);
495 txr = &bp->tx_ring[bp->tx_ring_map[i]];
496 prod = txr->tx_prod;
497
498 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
499 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
500 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
501 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
502 if (skb_linearize(skb)) {
503 dev_kfree_skb_any(skb);
504 dev_core_stats_tx_dropped_inc(dev);
505 return NETDEV_TX_OK;
506 }
507 }
508 #endif
509 free_size = bnxt_tx_avail(bp, txr);
510 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
511 /* We must have raced with NAPI cleanup */
512 if (net_ratelimit() && txr->kick_pending)
513 netif_warn(bp, tx_err, dev,
514 "bnxt: ring busy w/ flush pending!\n");
515 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
516 bp->tx_wake_thresh))
517 return NETDEV_TX_BUSY;
518 }
519
520 length = skb->len;
521 len = skb_headlen(skb);
522 last_frag = skb_shinfo(skb)->nr_frags;
523
524 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
525
526 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
527 tx_buf->skb = skb;
528 tx_buf->nr_frags = last_frag;
529
530 vlan_tag_flags = 0;
531 cfa_action = bnxt_xmit_get_cfa_action(skb);
532 if (skb_vlan_tag_present(skb)) {
533 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
534 skb_vlan_tag_get(skb);
535 /* Currently supports 8021Q, 8021AD vlan offloads
536 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
537 */
538 if (skb->vlan_proto == htons(ETH_P_8021Q))
539 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
540 }
541
542 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
543 ptp->tx_tstamp_en) {
544 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
545 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
546 tx_buf->is_ts_pkt = 1;
547 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
548 } else if (!skb_is_gso(skb)) {
549 u16 seq_id, hdr_off;
550
551 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
552 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
553 if (vlan_tag_flags)
554 hdr_off += VLAN_HLEN;
555 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
556 tx_buf->is_ts_pkt = 1;
557 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
558
559 ptp->txts_req[txts_prod].tx_seqid = seq_id;
560 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
561 tx_buf->txts_prod = txts_prod;
562 }
563 }
564 }
565 if (unlikely(skb->no_fcs))
566 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
567
568 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
569 skb_frags_readable(skb) && !lflags) {
570 struct tx_push_buffer *tx_push_buf = txr->tx_push;
571 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
572 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
573 void __iomem *db = txr->tx_db.doorbell;
574 void *pdata = tx_push_buf->data;
575 u64 *end;
576 int j, push_len;
577
578 /* Set COAL_NOW to be ready quickly for the next push */
579 tx_push->tx_bd_len_flags_type =
580 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
581 TX_BD_TYPE_LONG_TX_BD |
582 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
583 TX_BD_FLAGS_COAL_NOW |
584 TX_BD_FLAGS_PACKET_END |
585 TX_BD_CNT(2));
586
587 if (skb->ip_summed == CHECKSUM_PARTIAL)
588 tx_push1->tx_bd_hsize_lflags =
589 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
590 else
591 tx_push1->tx_bd_hsize_lflags = 0;
592
593 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
594 tx_push1->tx_bd_cfa_action =
595 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
596
597 end = pdata + length;
598 end = PTR_ALIGN(end, 8) - 1;
599 *end = 0;
600
601 skb_copy_from_linear_data(skb, pdata, len);
602 pdata += len;
603 for (j = 0; j < last_frag; j++) {
604 void *fptr;
605
606 frag = &skb_shinfo(skb)->frags[j];
607 fptr = skb_frag_address_safe(frag);
608 if (!fptr)
609 goto normal_tx;
610
611 memcpy(pdata, fptr, skb_frag_size(frag));
612 pdata += skb_frag_size(frag);
613 }
614
615 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
616 txbd->tx_bd_haddr = txr->data_mapping;
617 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
618 prod = NEXT_TX(prod);
619 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
620 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
621 memcpy(txbd, tx_push1, sizeof(*txbd));
622 prod = NEXT_TX(prod);
623 tx_push->doorbell =
624 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
625 DB_RING_IDX(&txr->tx_db, prod));
626 WRITE_ONCE(txr->tx_prod, prod);
627
628 tx_buf->is_push = 1;
629 netdev_tx_sent_queue(txq, skb->len);
630 wmb(); /* Sync is_push and byte queue before pushing data */
631
632 push_len = (length + sizeof(*tx_push) + 7) / 8;
633 if (push_len > 16) {
634 __iowrite64_copy(db, tx_push_buf, 16);
635 __iowrite32_copy(db + 4, tx_push_buf + 1,
636 (push_len - 16) << 1);
637 } else {
638 __iowrite64_copy(db, tx_push_buf, push_len);
639 }
640
641 goto tx_done;
642 }
643
644 normal_tx:
645 if (length < BNXT_MIN_PKT_SIZE) {
646 pad = BNXT_MIN_PKT_SIZE - length;
647 if (skb_pad(skb, pad))
648 /* SKB already freed. */
649 goto tx_kick_pending;
650 length = BNXT_MIN_PKT_SIZE;
651 }
652
653 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
654
655 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
656 goto tx_free;
657
658 dma_unmap_addr_set(tx_buf, mapping, mapping);
659 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
660 TX_BD_CNT(last_frag + 2);
661
662 txbd->tx_bd_haddr = cpu_to_le64(mapping);
663 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
664
665 prod = NEXT_TX(prod);
666 txbd1 = (struct tx_bd_ext *)
667 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
668
669 txbd1->tx_bd_hsize_lflags = lflags;
670 if (skb_is_gso(skb)) {
671 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
672 u32 hdr_len;
673
674 if (skb->encapsulation) {
675 if (udp_gso)
676 hdr_len = skb_inner_transport_offset(skb) +
677 sizeof(struct udphdr);
678 else
679 hdr_len = skb_inner_tcp_all_headers(skb);
680 } else if (udp_gso) {
681 hdr_len = skb_transport_offset(skb) +
682 sizeof(struct udphdr);
683 } else {
684 hdr_len = skb_tcp_all_headers(skb);
685 }
686
687 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
688 TX_BD_FLAGS_T_IPID |
689 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
690 length = skb_shinfo(skb)->gso_size;
691 txbd1->tx_bd_mss = cpu_to_le32(length);
692 length += hdr_len;
693 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
694 txbd1->tx_bd_hsize_lflags |=
695 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
696 txbd1->tx_bd_mss = 0;
697 }
698
699 length >>= 9;
700 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
701 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
702 skb->len);
703 i = 0;
704 goto tx_dma_error;
705 }
706 flags |= bnxt_lhint_arr[length];
707 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
708
709 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
710 txbd1->tx_bd_cfa_action =
711 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
712 txbd0 = txbd;
713 for (i = 0; i < last_frag; i++) {
714 frag = &skb_shinfo(skb)->frags[i];
715 prod = NEXT_TX(prod);
716 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
717
718 len = skb_frag_size(frag);
719 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
720 DMA_TO_DEVICE);
721
722 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
723 goto tx_dma_error;
724
725 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
726 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
727 mapping, mapping);
728
729 txbd->tx_bd_haddr = cpu_to_le64(mapping);
730
731 flags = len << TX_BD_LEN_SHIFT;
732 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
733 }
734
735 flags &= ~TX_BD_LEN;
736 txbd->tx_bd_len_flags_type =
737 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
738 TX_BD_FLAGS_PACKET_END);
739
740 netdev_tx_sent_queue(txq, skb->len);
741
742 skb_tx_timestamp(skb);
743
744 prod = NEXT_TX(prod);
745 WRITE_ONCE(txr->tx_prod, prod);
746
747 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
748 bnxt_txr_db_kick(bp, txr, prod);
749 } else {
750 if (free_size >= bp->tx_wake_thresh)
751 txbd0->tx_bd_len_flags_type |=
752 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
753 txr->kick_pending = 1;
754 }
755
756 tx_done:
757
758 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
759 if (netdev_xmit_more() && !tx_buf->is_push) {
760 txbd0->tx_bd_len_flags_type &=
761 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
762 bnxt_txr_db_kick(bp, txr, prod);
763 }
764
765 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
766 bp->tx_wake_thresh);
767 }
768 return NETDEV_TX_OK;
769
770 tx_dma_error:
771 last_frag = i;
772
773 /* start back at beginning and unmap skb */
774 prod = txr->tx_prod;
775 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
776 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
777 skb_headlen(skb), DMA_TO_DEVICE);
778 prod = NEXT_TX(prod);
779
780 /* unmap remaining mapped pages */
781 for (i = 0; i < last_frag; i++) {
782 prod = NEXT_TX(prod);
783 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
784 frag = &skb_shinfo(skb)->frags[i];
785 netmem_dma_unmap_page_attrs(&pdev->dev,
786 dma_unmap_addr(tx_buf, mapping),
787 skb_frag_size(frag),
788 DMA_TO_DEVICE, 0);
789 }
790
791 tx_free:
792 dev_kfree_skb_any(skb);
793 tx_kick_pending:
794 if (BNXT_TX_PTP_IS_SET(lflags)) {
795 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
796 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
797 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
798 /* set SKB to err so PTP worker will clean up */
799 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
800 }
801 if (txr->kick_pending)
802 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
803 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
804 dev_core_stats_tx_dropped_inc(dev);
805 return NETDEV_TX_OK;
806 }
807
808 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)809 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
810 int budget)
811 {
812 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
813 struct pci_dev *pdev = bp->pdev;
814 u16 hw_cons = txr->tx_hw_cons;
815 unsigned int tx_bytes = 0;
816 u16 cons = txr->tx_cons;
817 skb_frag_t *frag;
818 int tx_pkts = 0;
819 bool rc = false;
820
821 while (RING_TX(bp, cons) != hw_cons) {
822 struct bnxt_sw_tx_bd *tx_buf;
823 struct sk_buff *skb;
824 bool is_ts_pkt;
825 int j, last;
826
827 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
828 skb = tx_buf->skb;
829
830 if (unlikely(!skb)) {
831 bnxt_sched_reset_txr(bp, txr, cons);
832 return rc;
833 }
834
835 is_ts_pkt = tx_buf->is_ts_pkt;
836 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
837 rc = true;
838 break;
839 }
840
841 cons = NEXT_TX(cons);
842 tx_pkts++;
843 tx_bytes += skb->len;
844 tx_buf->skb = NULL;
845 tx_buf->is_ts_pkt = 0;
846
847 if (tx_buf->is_push) {
848 tx_buf->is_push = 0;
849 goto next_tx_int;
850 }
851
852 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
853 skb_headlen(skb), DMA_TO_DEVICE);
854 last = tx_buf->nr_frags;
855
856 for (j = 0; j < last; j++) {
857 frag = &skb_shinfo(skb)->frags[j];
858 cons = NEXT_TX(cons);
859 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
860 netmem_dma_unmap_page_attrs(&pdev->dev,
861 dma_unmap_addr(tx_buf,
862 mapping),
863 skb_frag_size(frag),
864 DMA_TO_DEVICE, 0);
865 }
866 if (unlikely(is_ts_pkt)) {
867 if (BNXT_CHIP_P5(bp)) {
868 /* PTP worker takes ownership of the skb */
869 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
870 skb = NULL;
871 }
872 }
873
874 next_tx_int:
875 cons = NEXT_TX(cons);
876
877 napi_consume_skb(skb, budget);
878 }
879
880 WRITE_ONCE(txr->tx_cons, cons);
881
882 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
883 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
884 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
885
886 return rc;
887 }
888
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)889 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
890 {
891 struct bnxt_tx_ring_info *txr;
892 bool more = false;
893 int i;
894
895 bnxt_for_each_napi_tx(i, bnapi, txr) {
896 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
897 more |= __bnxt_tx_int(bp, txr, budget);
898 }
899 if (!more)
900 bnapi->events &= ~BNXT_TX_CMP_EVENT;
901 }
902
bnxt_separate_head_pool(struct bnxt_rx_ring_info * rxr)903 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
904 {
905 return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
906 }
907
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)908 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
909 struct bnxt_rx_ring_info *rxr,
910 unsigned int *offset,
911 gfp_t gfp)
912 {
913 struct page *page;
914
915 if (rxr->rx_page_size < PAGE_SIZE) {
916 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
917 rxr->rx_page_size);
918 } else {
919 page = page_pool_dev_alloc_pages(rxr->page_pool);
920 *offset = 0;
921 }
922 if (!page)
923 return NULL;
924
925 *mapping = page_pool_get_dma_addr(page) + *offset;
926 return page;
927 }
928
__bnxt_alloc_rx_netmem(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)929 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
930 struct bnxt_rx_ring_info *rxr,
931 unsigned int *offset,
932 gfp_t gfp)
933 {
934 netmem_ref netmem;
935
936 if (rxr->rx_page_size < PAGE_SIZE) {
937 netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
938 rxr->rx_page_size, gfp);
939 } else {
940 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
941 *offset = 0;
942 }
943 if (!netmem)
944 return 0;
945
946 *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
947 return netmem;
948 }
949
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)950 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
951 struct bnxt_rx_ring_info *rxr,
952 gfp_t gfp)
953 {
954 unsigned int offset;
955 struct page *page;
956
957 page = page_pool_alloc_frag(rxr->head_pool, &offset,
958 bp->rx_buf_size, gfp);
959 if (!page)
960 return NULL;
961
962 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
963 return page_address(page) + offset;
964 }
965
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)966 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
967 u16 prod, gfp_t gfp)
968 {
969 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
970 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
971 dma_addr_t mapping;
972
973 if (BNXT_RX_PAGE_MODE(bp)) {
974 unsigned int offset;
975 struct page *page =
976 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
977
978 if (!page)
979 return -ENOMEM;
980
981 mapping += bp->rx_dma_offset;
982 rx_buf->data = page;
983 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
984 } else {
985 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
986
987 if (!data)
988 return -ENOMEM;
989
990 rx_buf->data = data;
991 rx_buf->data_ptr = data + bp->rx_offset;
992 }
993 rx_buf->mapping = mapping;
994
995 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
996 return 0;
997 }
998
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)999 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1000 {
1001 u16 prod = rxr->rx_prod;
1002 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1003 struct bnxt *bp = rxr->bnapi->bp;
1004 struct rx_bd *cons_bd, *prod_bd;
1005
1006 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1007 cons_rx_buf = &rxr->rx_buf_ring[cons];
1008
1009 prod_rx_buf->data = data;
1010 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1011
1012 prod_rx_buf->mapping = cons_rx_buf->mapping;
1013
1014 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1015 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1016
1017 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1018 }
1019
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1020 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1021 {
1022 u16 next, max = rxr->rx_agg_bmap_size;
1023
1024 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1025 if (next >= max)
1026 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1027 return next;
1028 }
1029
bnxt_alloc_rx_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)1030 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1031 u16 prod, gfp_t gfp)
1032 {
1033 struct rx_bd *rxbd =
1034 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1035 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1036 u16 sw_prod = rxr->rx_sw_agg_prod;
1037 unsigned int offset = 0;
1038 dma_addr_t mapping;
1039 netmem_ref netmem;
1040
1041 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1042 if (!netmem)
1043 return -ENOMEM;
1044
1045 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1046 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1047
1048 __set_bit(sw_prod, rxr->rx_agg_bmap);
1049 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1050 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1051
1052 rx_agg_buf->netmem = netmem;
1053 rx_agg_buf->offset = offset;
1054 rx_agg_buf->mapping = mapping;
1055 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1056 rxbd->rx_bd_opaque = sw_prod;
1057 return 0;
1058 }
1059
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1060 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1061 struct bnxt_cp_ring_info *cpr,
1062 u16 cp_cons, u16 curr)
1063 {
1064 struct rx_agg_cmp *agg;
1065
1066 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1067 agg = (struct rx_agg_cmp *)
1068 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1069 return agg;
1070 }
1071
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1072 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1073 struct bnxt_rx_ring_info *rxr,
1074 u16 agg_id, u16 curr)
1075 {
1076 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1077
1078 return &tpa_info->agg_arr[curr];
1079 }
1080
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1081 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1082 u16 start, u32 agg_bufs, bool tpa)
1083 {
1084 struct bnxt_napi *bnapi = cpr->bnapi;
1085 struct bnxt *bp = bnapi->bp;
1086 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1087 u16 prod = rxr->rx_agg_prod;
1088 u16 sw_prod = rxr->rx_sw_agg_prod;
1089 bool p5_tpa = false;
1090 u32 i;
1091
1092 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1093 p5_tpa = true;
1094
1095 for (i = 0; i < agg_bufs; i++) {
1096 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1097 struct rx_agg_cmp *agg;
1098 struct rx_bd *prod_bd;
1099 netmem_ref netmem;
1100 u16 cons;
1101
1102 if (p5_tpa)
1103 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1104 else
1105 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1106 cons = agg->rx_agg_cmp_opaque;
1107 __clear_bit(cons, rxr->rx_agg_bmap);
1108
1109 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1110 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1111
1112 __set_bit(sw_prod, rxr->rx_agg_bmap);
1113 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1114 cons_rx_buf = &rxr->rx_agg_ring[cons];
1115
1116 /* It is possible for sw_prod to be equal to cons, so
1117 * set cons_rx_buf->netmem to 0 first.
1118 */
1119 netmem = cons_rx_buf->netmem;
1120 cons_rx_buf->netmem = 0;
1121 prod_rx_buf->netmem = netmem;
1122 prod_rx_buf->offset = cons_rx_buf->offset;
1123
1124 prod_rx_buf->mapping = cons_rx_buf->mapping;
1125
1126 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1127
1128 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1129 prod_bd->rx_bd_opaque = sw_prod;
1130
1131 prod = NEXT_RX_AGG(prod);
1132 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1133 }
1134 rxr->rx_agg_prod = prod;
1135 rxr->rx_sw_agg_prod = sw_prod;
1136 }
1137
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1138 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1139 struct bnxt_rx_ring_info *rxr,
1140 u16 cons, void *data, u8 *data_ptr,
1141 dma_addr_t dma_addr,
1142 unsigned int offset_and_len)
1143 {
1144 unsigned int len = offset_and_len & 0xffff;
1145 struct page *page = data;
1146 u16 prod = rxr->rx_prod;
1147 struct sk_buff *skb;
1148 int err;
1149
1150 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1151 if (unlikely(err)) {
1152 bnxt_reuse_rx_data(rxr, cons, data);
1153 return NULL;
1154 }
1155 dma_addr -= bp->rx_dma_offset;
1156 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1157 bp->rx_dir);
1158 skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
1159 if (!skb) {
1160 page_pool_recycle_direct(rxr->page_pool, page);
1161 return NULL;
1162 }
1163 skb_mark_for_recycle(skb);
1164 skb_reserve(skb, bp->rx_offset);
1165 __skb_put(skb, len);
1166
1167 return skb;
1168 }
1169
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1170 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1171 struct bnxt_rx_ring_info *rxr,
1172 u16 cons, void *data, u8 *data_ptr,
1173 dma_addr_t dma_addr,
1174 unsigned int offset_and_len)
1175 {
1176 unsigned int payload = offset_and_len >> 16;
1177 unsigned int len = offset_and_len & 0xffff;
1178 skb_frag_t *frag;
1179 struct page *page = data;
1180 u16 prod = rxr->rx_prod;
1181 struct sk_buff *skb;
1182 int off, err;
1183
1184 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1185 if (unlikely(err)) {
1186 bnxt_reuse_rx_data(rxr, cons, data);
1187 return NULL;
1188 }
1189 dma_addr -= bp->rx_dma_offset;
1190 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1191 bp->rx_dir);
1192
1193 if (unlikely(!payload))
1194 payload = eth_get_headlen(bp->dev, data_ptr, len);
1195
1196 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1197 if (!skb) {
1198 page_pool_recycle_direct(rxr->page_pool, page);
1199 return NULL;
1200 }
1201
1202 skb_mark_for_recycle(skb);
1203 off = (void *)data_ptr - page_address(page);
1204 skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
1205 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1206 payload + NET_IP_ALIGN);
1207
1208 frag = &skb_shinfo(skb)->frags[0];
1209 skb_frag_size_sub(frag, payload);
1210 skb_frag_off_add(frag, payload);
1211 skb->data_len -= payload;
1212 skb->tail += payload;
1213
1214 return skb;
1215 }
1216
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1217 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1218 struct bnxt_rx_ring_info *rxr, u16 cons,
1219 void *data, u8 *data_ptr,
1220 dma_addr_t dma_addr,
1221 unsigned int offset_and_len)
1222 {
1223 u16 prod = rxr->rx_prod;
1224 struct sk_buff *skb;
1225 int err;
1226
1227 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1228 if (unlikely(err)) {
1229 bnxt_reuse_rx_data(rxr, cons, data);
1230 return NULL;
1231 }
1232
1233 skb = napi_build_skb(data, bp->rx_buf_size);
1234 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1235 bp->rx_dir);
1236 if (!skb) {
1237 page_pool_free_va(rxr->head_pool, data, true);
1238 return NULL;
1239 }
1240
1241 skb_mark_for_recycle(skb);
1242 skb_reserve(skb, bp->rx_offset);
1243 skb_put(skb, offset_and_len & 0xffff);
1244 return skb;
1245 }
1246
__bnxt_rx_agg_netmems(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs,bool tpa,struct sk_buff * skb,struct xdp_buff * xdp)1247 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1248 struct bnxt_cp_ring_info *cpr,
1249 u16 idx, u32 agg_bufs, bool tpa,
1250 struct sk_buff *skb,
1251 struct xdp_buff *xdp)
1252 {
1253 struct bnxt_napi *bnapi = cpr->bnapi;
1254 struct skb_shared_info *shinfo;
1255 struct bnxt_rx_ring_info *rxr;
1256 u32 i, total_frag_len = 0;
1257 bool p5_tpa = false;
1258 u16 prod;
1259
1260 rxr = bnapi->rx_ring;
1261 prod = rxr->rx_agg_prod;
1262
1263 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1264 p5_tpa = true;
1265
1266 if (skb)
1267 shinfo = skb_shinfo(skb);
1268 else
1269 shinfo = xdp_get_shared_info_from_buff(xdp);
1270
1271 for (i = 0; i < agg_bufs; i++) {
1272 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1273 struct rx_agg_cmp *agg;
1274 u16 cons, frag_len;
1275 netmem_ref netmem;
1276
1277 if (p5_tpa)
1278 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1279 else
1280 agg = bnxt_get_agg(bp, cpr, idx, i);
1281 cons = agg->rx_agg_cmp_opaque;
1282 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1283 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1284
1285 cons_rx_buf = &rxr->rx_agg_ring[cons];
1286 if (skb) {
1287 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1288 cons_rx_buf->offset,
1289 frag_len, rxr->rx_page_size);
1290 } else {
1291 skb_frag_t *frag = &shinfo->frags[i];
1292
1293 skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1294 cons_rx_buf->offset,
1295 frag_len);
1296 shinfo->nr_frags = i + 1;
1297 }
1298 __clear_bit(cons, rxr->rx_agg_bmap);
1299
1300 /* It is possible for bnxt_alloc_rx_netmem() to allocate
1301 * a sw_prod index that equals the cons index, so we
1302 * need to clear the cons entry now.
1303 */
1304 netmem = cons_rx_buf->netmem;
1305 cons_rx_buf->netmem = 0;
1306
1307 if (xdp && netmem_is_pfmemalloc(netmem))
1308 xdp_buff_set_frag_pfmemalloc(xdp);
1309
1310 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1311 if (skb) {
1312 skb->len -= frag_len;
1313 skb->data_len -= frag_len;
1314 skb->truesize -= rxr->rx_page_size;
1315 }
1316
1317 --shinfo->nr_frags;
1318 cons_rx_buf->netmem = netmem;
1319
1320 /* Update prod since possibly some netmems have been
1321 * allocated already.
1322 */
1323 rxr->rx_agg_prod = prod;
1324 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1325 return 0;
1326 }
1327
1328 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1329 rxr->rx_page_size);
1330
1331 total_frag_len += frag_len;
1332 prod = NEXT_RX_AGG(prod);
1333 }
1334 rxr->rx_agg_prod = prod;
1335 return total_frag_len;
1336 }
1337
bnxt_rx_agg_netmems_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1338 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1339 struct bnxt_cp_ring_info *cpr,
1340 struct sk_buff *skb, u16 idx,
1341 u32 agg_bufs, bool tpa)
1342 {
1343 u32 total_frag_len = 0;
1344
1345 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1346 skb, NULL);
1347 if (!total_frag_len) {
1348 skb_mark_for_recycle(skb);
1349 dev_kfree_skb(skb);
1350 return NULL;
1351 }
1352
1353 return skb;
1354 }
1355
bnxt_rx_agg_netmems_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1356 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1357 struct bnxt_cp_ring_info *cpr,
1358 struct xdp_buff *xdp, u16 idx,
1359 u32 agg_bufs, bool tpa)
1360 {
1361 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1362 u32 total_frag_len = 0;
1363
1364 if (!xdp_buff_has_frags(xdp))
1365 shinfo->nr_frags = 0;
1366
1367 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1368 NULL, xdp);
1369 if (total_frag_len) {
1370 xdp_buff_set_frags_flag(xdp);
1371 shinfo->nr_frags = agg_bufs;
1372 shinfo->xdp_frags_size = total_frag_len;
1373 }
1374 return total_frag_len;
1375 }
1376
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1377 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1378 u8 agg_bufs, u32 *raw_cons)
1379 {
1380 u16 last;
1381 struct rx_agg_cmp *agg;
1382
1383 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1384 last = RING_CMP(*raw_cons);
1385 agg = (struct rx_agg_cmp *)
1386 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1387 return RX_AGG_CMP_VALID(agg, *raw_cons);
1388 }
1389
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1390 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1391 unsigned int len,
1392 dma_addr_t mapping)
1393 {
1394 struct bnxt *bp = bnapi->bp;
1395 struct pci_dev *pdev = bp->pdev;
1396 struct sk_buff *skb;
1397
1398 skb = napi_alloc_skb(&bnapi->napi, len);
1399 if (!skb)
1400 return NULL;
1401
1402 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1403 bp->rx_dir);
1404
1405 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1406 len + NET_IP_ALIGN);
1407
1408 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1409 bp->rx_dir);
1410
1411 skb_put(skb, len);
1412
1413 return skb;
1414 }
1415
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1416 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1417 unsigned int len,
1418 dma_addr_t mapping)
1419 {
1420 return bnxt_copy_data(bnapi, data, len, mapping);
1421 }
1422
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1423 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1424 struct xdp_buff *xdp,
1425 unsigned int len,
1426 dma_addr_t mapping)
1427 {
1428 unsigned int metasize = 0;
1429 u8 *data = xdp->data;
1430 struct sk_buff *skb;
1431
1432 len = xdp->data_end - xdp->data_meta;
1433 metasize = xdp->data - xdp->data_meta;
1434 data = xdp->data_meta;
1435
1436 skb = bnxt_copy_data(bnapi, data, len, mapping);
1437 if (!skb)
1438 return skb;
1439
1440 if (metasize) {
1441 skb_metadata_set(skb, metasize);
1442 __skb_pull(skb, metasize);
1443 }
1444
1445 return skb;
1446 }
1447
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1448 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1449 u32 *raw_cons, void *cmp)
1450 {
1451 struct rx_cmp *rxcmp = cmp;
1452 u32 tmp_raw_cons = *raw_cons;
1453 u8 cmp_type, agg_bufs = 0;
1454
1455 cmp_type = RX_CMP_TYPE(rxcmp);
1456
1457 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1458 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1459 RX_CMP_AGG_BUFS) >>
1460 RX_CMP_AGG_BUFS_SHIFT;
1461 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1462 struct rx_tpa_end_cmp *tpa_end = cmp;
1463
1464 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1465 return 0;
1466
1467 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1468 }
1469
1470 if (agg_bufs) {
1471 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1472 return -EBUSY;
1473 }
1474 *raw_cons = tmp_raw_cons;
1475 return 0;
1476 }
1477
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1478 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1479 {
1480 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1481 u16 idx = agg_id & MAX_TPA_P5_MASK;
1482
1483 if (test_bit(idx, map->agg_idx_bmap)) {
1484 idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
1485 if (idx >= MAX_TPA_P5)
1486 return INVALID_HW_RING_ID;
1487 }
1488 __set_bit(idx, map->agg_idx_bmap);
1489 map->agg_id_tbl[agg_id] = idx;
1490 return idx;
1491 }
1492
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1493 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1494 {
1495 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1496
1497 __clear_bit(idx, map->agg_idx_bmap);
1498 }
1499
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1500 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1501 {
1502 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1503
1504 return map->agg_id_tbl[agg_id];
1505 }
1506
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1507 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1508 struct rx_tpa_start_cmp *tpa_start,
1509 struct rx_tpa_start_cmp_ext *tpa_start1)
1510 {
1511 tpa_info->cfa_code_valid = 1;
1512 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1513 tpa_info->vlan_valid = 0;
1514 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1515 tpa_info->vlan_valid = 1;
1516 tpa_info->metadata =
1517 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1518 }
1519 }
1520
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1521 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1522 struct rx_tpa_start_cmp *tpa_start,
1523 struct rx_tpa_start_cmp_ext *tpa_start1)
1524 {
1525 tpa_info->vlan_valid = 0;
1526 if (TPA_START_VLAN_VALID(tpa_start)) {
1527 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1528 u32 vlan_proto = ETH_P_8021Q;
1529
1530 tpa_info->vlan_valid = 1;
1531 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1532 vlan_proto = ETH_P_8021AD;
1533 tpa_info->metadata = vlan_proto << 16 |
1534 TPA_START_METADATA0_TCI(tpa_start1);
1535 }
1536 }
1537
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1538 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1539 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1540 struct rx_tpa_start_cmp_ext *tpa_start1)
1541 {
1542 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1543 struct bnxt_tpa_info *tpa_info;
1544 u16 cons, prod, agg_id;
1545 struct rx_bd *prod_bd;
1546 dma_addr_t mapping;
1547
1548 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1549 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1550 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1551 if (unlikely(agg_id == INVALID_HW_RING_ID)) {
1552 netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
1553 rxr->bnapi->index,
1554 TPA_START_AGG_ID_P5(tpa_start));
1555 bnxt_sched_reset_rxr(bp, rxr);
1556 return;
1557 }
1558 } else {
1559 agg_id = TPA_START_AGG_ID(tpa_start);
1560 }
1561 cons = tpa_start->rx_tpa_start_cmp_opaque;
1562 prod = rxr->rx_prod;
1563 cons_rx_buf = &rxr->rx_buf_ring[cons];
1564 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1565 tpa_info = &rxr->rx_tpa[agg_id];
1566
1567 if (unlikely(cons != rxr->rx_next_cons ||
1568 TPA_START_ERROR(tpa_start))) {
1569 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1570 cons, rxr->rx_next_cons,
1571 TPA_START_ERROR_CODE(tpa_start1));
1572 bnxt_sched_reset_rxr(bp, rxr);
1573 return;
1574 }
1575 prod_rx_buf->data = tpa_info->data;
1576 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1577
1578 mapping = tpa_info->mapping;
1579 prod_rx_buf->mapping = mapping;
1580
1581 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1582
1583 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1584
1585 tpa_info->data = cons_rx_buf->data;
1586 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1587 cons_rx_buf->data = NULL;
1588 tpa_info->mapping = cons_rx_buf->mapping;
1589
1590 tpa_info->len =
1591 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1592 RX_TPA_START_CMP_LEN_SHIFT;
1593 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1594 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1595 tpa_info->gso_type = SKB_GSO_TCPV4;
1596 if (TPA_START_IS_IPV6(tpa_start1))
1597 tpa_info->gso_type = SKB_GSO_TCPV6;
1598 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1599 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1600 TPA_START_HASH_TYPE(tpa_start) == 3)
1601 tpa_info->gso_type = SKB_GSO_TCPV6;
1602 tpa_info->rss_hash =
1603 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1604 } else {
1605 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1606 tpa_info->gso_type = 0;
1607 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1608 }
1609 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1610 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1611 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1612 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1613 else
1614 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1615 tpa_info->agg_count = 0;
1616
1617 rxr->rx_prod = NEXT_RX(prod);
1618 cons = RING_RX(bp, NEXT_RX(cons));
1619 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1620 cons_rx_buf = &rxr->rx_buf_ring[cons];
1621
1622 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1623 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1624 cons_rx_buf->data = NULL;
1625 }
1626
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1627 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1628 {
1629 if (agg_bufs)
1630 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1631 }
1632
1633 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1634 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1635 {
1636 struct udphdr *uh = NULL;
1637
1638 if (ip_proto == htons(ETH_P_IP)) {
1639 struct iphdr *iph = (struct iphdr *)skb->data;
1640
1641 if (iph->protocol == IPPROTO_UDP)
1642 uh = (struct udphdr *)(iph + 1);
1643 } else {
1644 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1645
1646 if (iph->nexthdr == IPPROTO_UDP)
1647 uh = (struct udphdr *)(iph + 1);
1648 }
1649 if (uh) {
1650 if (uh->check)
1651 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1652 else
1653 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1654 }
1655 }
1656 #endif
1657
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1658 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1659 int payload_off, int tcp_ts,
1660 struct sk_buff *skb)
1661 {
1662 #ifdef CONFIG_INET
1663 struct tcphdr *th;
1664 int len, nw_off;
1665 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1666 u32 hdr_info = tpa_info->hdr_info;
1667 bool loopback = false;
1668
1669 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1670 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1671 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1672
1673 /* If the packet is an internal loopback packet, the offsets will
1674 * have an extra 4 bytes.
1675 */
1676 if (inner_mac_off == 4) {
1677 loopback = true;
1678 } else if (inner_mac_off > 4) {
1679 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1680 ETH_HLEN - 2));
1681
1682 /* We only support inner iPv4/ipv6. If we don't see the
1683 * correct protocol ID, it must be a loopback packet where
1684 * the offsets are off by 4.
1685 */
1686 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1687 loopback = true;
1688 }
1689 if (loopback) {
1690 /* internal loopback packet, subtract all offsets by 4 */
1691 inner_ip_off -= 4;
1692 inner_mac_off -= 4;
1693 outer_ip_off -= 4;
1694 }
1695
1696 nw_off = inner_ip_off - ETH_HLEN;
1697 skb_set_network_header(skb, nw_off);
1698 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1699 struct ipv6hdr *iph = ipv6_hdr(skb);
1700
1701 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1702 len = skb->len - skb_transport_offset(skb);
1703 th = tcp_hdr(skb);
1704 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1705 } else {
1706 struct iphdr *iph = ip_hdr(skb);
1707
1708 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1709 len = skb->len - skb_transport_offset(skb);
1710 th = tcp_hdr(skb);
1711 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1712 }
1713
1714 if (inner_mac_off) { /* tunnel */
1715 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1716 ETH_HLEN - 2));
1717
1718 bnxt_gro_tunnel(skb, proto);
1719 }
1720 #endif
1721 return skb;
1722 }
1723
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1724 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1725 int payload_off, int tcp_ts,
1726 struct sk_buff *skb)
1727 {
1728 #ifdef CONFIG_INET
1729 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1730 u32 hdr_info = tpa_info->hdr_info;
1731 int iphdr_len, nw_off;
1732
1733 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1734 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1735 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1736
1737 nw_off = inner_ip_off - ETH_HLEN;
1738 skb_set_network_header(skb, nw_off);
1739 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1740 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1741 skb_set_transport_header(skb, nw_off + iphdr_len);
1742
1743 if (inner_mac_off) { /* tunnel */
1744 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1745 ETH_HLEN - 2));
1746
1747 bnxt_gro_tunnel(skb, proto);
1748 }
1749 #endif
1750 return skb;
1751 }
1752
1753 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1754 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1755
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1756 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1757 int payload_off, int tcp_ts,
1758 struct sk_buff *skb)
1759 {
1760 #ifdef CONFIG_INET
1761 struct tcphdr *th;
1762 int len, nw_off, tcp_opt_len = 0;
1763
1764 if (tcp_ts)
1765 tcp_opt_len = 12;
1766
1767 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1768 struct iphdr *iph;
1769
1770 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1771 ETH_HLEN;
1772 skb_set_network_header(skb, nw_off);
1773 iph = ip_hdr(skb);
1774 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1775 len = skb->len - skb_transport_offset(skb);
1776 th = tcp_hdr(skb);
1777 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1778 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1779 struct ipv6hdr *iph;
1780
1781 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1782 ETH_HLEN;
1783 skb_set_network_header(skb, nw_off);
1784 iph = ipv6_hdr(skb);
1785 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1786 len = skb->len - skb_transport_offset(skb);
1787 th = tcp_hdr(skb);
1788 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1789 } else {
1790 dev_kfree_skb_any(skb);
1791 return NULL;
1792 }
1793
1794 if (nw_off) /* tunnel */
1795 bnxt_gro_tunnel(skb, skb->protocol);
1796 #endif
1797 return skb;
1798 }
1799
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb,struct bnxt_rx_sw_stats * rx_stats)1800 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1801 struct bnxt_tpa_info *tpa_info,
1802 struct rx_tpa_end_cmp *tpa_end,
1803 struct rx_tpa_end_cmp_ext *tpa_end1,
1804 struct sk_buff *skb,
1805 struct bnxt_rx_sw_stats *rx_stats)
1806 {
1807 #ifdef CONFIG_INET
1808 int payload_off;
1809 u16 segs;
1810
1811 segs = TPA_END_TPA_SEGS(tpa_end);
1812 if (segs == 1)
1813 return skb;
1814
1815 rx_stats->rx_hw_gro_packets++;
1816 rx_stats->rx_hw_gro_wire_packets += segs;
1817
1818 NAPI_GRO_CB(skb)->count = segs;
1819 skb_shinfo(skb)->gso_size =
1820 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1821 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1822 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1823 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1824 else
1825 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1826 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1827 if (likely(skb))
1828 tcp_gro_complete(skb);
1829 #endif
1830 return skb;
1831 }
1832
1833 /* Given the cfa_code of a received packet determine which
1834 * netdev (vf-rep or PF) the packet is destined to.
1835 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1836 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1837 {
1838 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1839
1840 /* if vf-rep dev is NULL, it must belong to the PF */
1841 return dev ? dev : bp->dev;
1842 }
1843
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1844 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1845 struct bnxt_cp_ring_info *cpr,
1846 u32 *raw_cons,
1847 struct rx_tpa_end_cmp *tpa_end,
1848 struct rx_tpa_end_cmp_ext *tpa_end1,
1849 u8 *event)
1850 {
1851 struct bnxt_napi *bnapi = cpr->bnapi;
1852 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1853 struct net_device *dev = bp->dev;
1854 u8 *data_ptr, agg_bufs;
1855 unsigned int len;
1856 struct bnxt_tpa_info *tpa_info;
1857 dma_addr_t mapping;
1858 struct sk_buff *skb;
1859 u16 idx = 0, agg_id;
1860 void *data;
1861 bool gro;
1862
1863 if (unlikely(bnapi->in_reset)) {
1864 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1865
1866 if (rc < 0)
1867 return ERR_PTR(-EBUSY);
1868 return NULL;
1869 }
1870
1871 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1872 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1873 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1874 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1875 tpa_info = &rxr->rx_tpa[agg_id];
1876 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1877 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1878 agg_bufs, tpa_info->agg_count);
1879 agg_bufs = tpa_info->agg_count;
1880 }
1881 tpa_info->agg_count = 0;
1882 *event |= BNXT_AGG_EVENT;
1883 bnxt_free_agg_idx(rxr, agg_id);
1884 idx = agg_id;
1885 gro = !!(bp->flags & BNXT_FLAG_GRO);
1886 } else {
1887 agg_id = TPA_END_AGG_ID(tpa_end);
1888 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1889 tpa_info = &rxr->rx_tpa[agg_id];
1890 idx = RING_CMP(*raw_cons);
1891 if (agg_bufs) {
1892 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1893 return ERR_PTR(-EBUSY);
1894
1895 *event |= BNXT_AGG_EVENT;
1896 idx = NEXT_CMP(idx);
1897 }
1898 gro = !!TPA_END_GRO(tpa_end);
1899 }
1900 data = tpa_info->data;
1901 data_ptr = tpa_info->data_ptr;
1902 prefetch(data_ptr);
1903 len = tpa_info->len;
1904 mapping = tpa_info->mapping;
1905
1906 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1907 bnxt_abort_tpa(cpr, idx, agg_bufs);
1908 if (agg_bufs > MAX_SKB_FRAGS)
1909 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1910 agg_bufs, (int)MAX_SKB_FRAGS);
1911 return NULL;
1912 }
1913
1914 if (len <= bp->rx_copybreak) {
1915 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1916 if (!skb) {
1917 bnxt_abort_tpa(cpr, idx, agg_bufs);
1918 cpr->sw_stats->rx.rx_oom_discards += 1;
1919 return NULL;
1920 }
1921 } else {
1922 u8 *new_data;
1923 dma_addr_t new_mapping;
1924
1925 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1926 GFP_ATOMIC);
1927 if (!new_data) {
1928 bnxt_abort_tpa(cpr, idx, agg_bufs);
1929 cpr->sw_stats->rx.rx_oom_discards += 1;
1930 return NULL;
1931 }
1932
1933 tpa_info->data = new_data;
1934 tpa_info->data_ptr = new_data + bp->rx_offset;
1935 tpa_info->mapping = new_mapping;
1936
1937 skb = napi_build_skb(data, bp->rx_buf_size);
1938 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1939 bp->rx_buf_use_size, bp->rx_dir);
1940
1941 if (!skb) {
1942 page_pool_free_va(rxr->head_pool, data, true);
1943 bnxt_abort_tpa(cpr, idx, agg_bufs);
1944 cpr->sw_stats->rx.rx_oom_discards += 1;
1945 return NULL;
1946 }
1947 skb_mark_for_recycle(skb);
1948 skb_reserve(skb, bp->rx_offset);
1949 skb_put(skb, len);
1950 }
1951
1952 if (agg_bufs) {
1953 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1954 true);
1955 if (!skb) {
1956 /* Page reuse already handled by bnxt_rx_pages(). */
1957 cpr->sw_stats->rx.rx_oom_discards += 1;
1958 return NULL;
1959 }
1960 }
1961
1962 if (tpa_info->cfa_code_valid)
1963 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1964 skb->protocol = eth_type_trans(skb, dev);
1965
1966 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1967 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1968
1969 if (tpa_info->vlan_valid &&
1970 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1971 __be16 vlan_proto = htons(tpa_info->metadata >>
1972 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1973 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1974
1975 if (eth_type_vlan(vlan_proto)) {
1976 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1977 } else {
1978 dev_kfree_skb(skb);
1979 return NULL;
1980 }
1981 }
1982
1983 skb_checksum_none_assert(skb);
1984 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1985 skb->ip_summed = CHECKSUM_UNNECESSARY;
1986 skb->csum_level =
1987 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1988 }
1989
1990 if (gro)
1991 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb,
1992 &cpr->sw_stats->rx);
1993
1994 return skb;
1995 }
1996
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1997 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1998 struct rx_agg_cmp *rx_agg)
1999 {
2000 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
2001 struct bnxt_tpa_info *tpa_info;
2002
2003 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
2004 tpa_info = &rxr->rx_tpa[agg_id];
2005 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
2006 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
2007 }
2008
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)2009 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
2010 struct sk_buff *skb)
2011 {
2012 skb_mark_for_recycle(skb);
2013
2014 if (skb->dev != bp->dev) {
2015 /* this packet belongs to a vf-rep */
2016 bnxt_vf_rep_rx(bp, skb);
2017 return;
2018 }
2019 skb_record_rx_queue(skb, bnapi->index);
2020 napi_gro_receive(&bnapi->napi, skb);
2021 }
2022
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)2023 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2024 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2025 {
2026 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2027
2028 if (BNXT_PTP_RX_TS_VALID(flags))
2029 goto ts_valid;
2030 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2031 return false;
2032
2033 ts_valid:
2034 *cmpl_ts = ts;
2035 return true;
2036 }
2037
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)2038 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2039 struct rx_cmp *rxcmp,
2040 struct rx_cmp_ext *rxcmp1)
2041 {
2042 __be16 vlan_proto;
2043 u16 vtag;
2044
2045 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2046 __le32 flags2 = rxcmp1->rx_cmp_flags2;
2047 u32 meta_data;
2048
2049 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2050 return skb;
2051
2052 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2053 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2054 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2055 if (eth_type_vlan(vlan_proto))
2056 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2057 else
2058 goto vlan_err;
2059 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2060 if (RX_CMP_VLAN_VALID(rxcmp)) {
2061 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2062
2063 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2064 vlan_proto = htons(ETH_P_8021Q);
2065 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2066 vlan_proto = htons(ETH_P_8021AD);
2067 else
2068 goto vlan_err;
2069 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2070 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2071 }
2072 }
2073 return skb;
2074 vlan_err:
2075 skb_mark_for_recycle(skb);
2076 dev_kfree_skb(skb);
2077 return NULL;
2078 }
2079
bnxt_rss_ext_op(struct bnxt * bp,struct rx_cmp * rxcmp)2080 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2081 struct rx_cmp *rxcmp)
2082 {
2083 u8 ext_op;
2084
2085 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2086 switch (ext_op) {
2087 case EXT_OP_INNER_4:
2088 case EXT_OP_OUTER_4:
2089 case EXT_OP_INNFL_3:
2090 case EXT_OP_OUTFL_3:
2091 return PKT_HASH_TYPE_L4;
2092 default:
2093 return PKT_HASH_TYPE_L3;
2094 }
2095 }
2096
2097 /* returns the following:
2098 * 1 - 1 packet successfully received
2099 * 0 - successful TPA_START, packet not completed yet
2100 * -EBUSY - completion ring does not have all the agg buffers yet
2101 * -ENOMEM - packet aborted due to out of memory
2102 * -EIO - packet aborted due to hw error indicated in BD
2103 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2104 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2105 u32 *raw_cons, u8 *event)
2106 {
2107 struct bnxt_napi *bnapi = cpr->bnapi;
2108 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2109 struct net_device *dev = bp->dev;
2110 struct rx_cmp *rxcmp;
2111 struct rx_cmp_ext *rxcmp1;
2112 u32 tmp_raw_cons = *raw_cons;
2113 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2114 struct skb_shared_info *sinfo;
2115 struct bnxt_sw_rx_bd *rx_buf;
2116 unsigned int len;
2117 u8 *data_ptr, agg_bufs, cmp_type;
2118 bool xdp_active = false;
2119 dma_addr_t dma_addr;
2120 struct sk_buff *skb;
2121 struct xdp_buff xdp;
2122 u32 flags, misc;
2123 u32 cmpl_ts;
2124 void *data;
2125 int rc = 0;
2126
2127 rxcmp = (struct rx_cmp *)
2128 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2129
2130 cmp_type = RX_CMP_TYPE(rxcmp);
2131
2132 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2133 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2134 goto next_rx_no_prod_no_len;
2135 }
2136
2137 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2138 cp_cons = RING_CMP(tmp_raw_cons);
2139 rxcmp1 = (struct rx_cmp_ext *)
2140 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2141
2142 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2143 return -EBUSY;
2144
2145 /* The valid test of the entry must be done first before
2146 * reading any further.
2147 */
2148 dma_rmb();
2149 prod = rxr->rx_prod;
2150
2151 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2152 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2153 bnxt_tpa_start(bp, rxr, cmp_type,
2154 (struct rx_tpa_start_cmp *)rxcmp,
2155 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2156
2157 *event |= BNXT_RX_EVENT;
2158 goto next_rx_no_prod_no_len;
2159
2160 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2161 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2162 (struct rx_tpa_end_cmp *)rxcmp,
2163 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2164
2165 if (IS_ERR(skb))
2166 return -EBUSY;
2167
2168 rc = -ENOMEM;
2169 if (likely(skb)) {
2170 bnxt_deliver_skb(bp, bnapi, skb);
2171 rc = 1;
2172 }
2173 *event |= BNXT_RX_EVENT;
2174 goto next_rx_no_prod_no_len;
2175 }
2176
2177 cons = rxcmp->rx_cmp_opaque;
2178 if (unlikely(cons != rxr->rx_next_cons)) {
2179 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2180
2181 /* 0xffff is forced error, don't print it */
2182 if (rxr->rx_next_cons != 0xffff)
2183 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2184 cons, rxr->rx_next_cons);
2185 bnxt_sched_reset_rxr(bp, rxr);
2186 if (rc1)
2187 return rc1;
2188 goto next_rx_no_prod_no_len;
2189 }
2190 rx_buf = &rxr->rx_buf_ring[cons];
2191 data = rx_buf->data;
2192 data_ptr = rx_buf->data_ptr;
2193 prefetch(data_ptr);
2194
2195 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2196 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2197
2198 if (agg_bufs) {
2199 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2200 return -EBUSY;
2201
2202 cp_cons = NEXT_CMP(cp_cons);
2203 *event |= BNXT_AGG_EVENT;
2204 }
2205 *event |= BNXT_RX_EVENT;
2206
2207 rx_buf->data = NULL;
2208 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2209 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2210
2211 bnxt_reuse_rx_data(rxr, cons, data);
2212 if (agg_bufs)
2213 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2214 false);
2215
2216 rc = -EIO;
2217 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2218 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2219 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2220 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2221 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2222 rx_err);
2223 bnxt_sched_reset_rxr(bp, rxr);
2224 }
2225 }
2226 goto next_rx_no_len;
2227 }
2228
2229 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2230 len = flags >> RX_CMP_LEN_SHIFT;
2231 dma_addr = rx_buf->mapping;
2232
2233 if (bnxt_xdp_attached(bp, rxr)) {
2234 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2235 if (agg_bufs) {
2236 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
2237 cp_cons,
2238 agg_bufs,
2239 false);
2240 if (!frag_len)
2241 goto oom_next_rx;
2242
2243 }
2244 xdp_active = true;
2245 }
2246
2247 if (xdp_active) {
2248 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2249 rc = 1;
2250 goto next_rx;
2251 }
2252 if (xdp_buff_has_frags(&xdp)) {
2253 sinfo = xdp_get_shared_info_from_buff(&xdp);
2254 agg_bufs = sinfo->nr_frags;
2255 } else {
2256 agg_bufs = 0;
2257 }
2258 }
2259
2260 if (len <= bp->rx_copybreak) {
2261 if (!xdp_active)
2262 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2263 else
2264 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2265 bnxt_reuse_rx_data(rxr, cons, data);
2266 if (!skb) {
2267 if (agg_bufs) {
2268 if (!xdp_active)
2269 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2270 agg_bufs, false);
2271 else
2272 bnxt_xdp_buff_frags_free(rxr, &xdp);
2273 }
2274 goto oom_next_rx;
2275 }
2276 } else {
2277 u32 payload;
2278
2279 if (rx_buf->data_ptr == data_ptr)
2280 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2281 else
2282 payload = 0;
2283 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2284 payload | len);
2285 if (!skb)
2286 goto oom_next_rx;
2287 }
2288
2289 if (agg_bufs) {
2290 if (!xdp_active) {
2291 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2292 agg_bufs, false);
2293 if (!skb)
2294 goto oom_next_rx;
2295 } else {
2296 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
2297 if (!skb) {
2298 /* we should be able to free the old skb here */
2299 bnxt_xdp_buff_frags_free(rxr, &xdp);
2300 goto oom_next_rx;
2301 }
2302 }
2303 }
2304
2305 if (RX_CMP_HASH_VALID(rxcmp)) {
2306 enum pkt_hash_types type;
2307
2308 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2309 type = bnxt_rss_ext_op(bp, rxcmp);
2310 } else {
2311 u32 itypes = RX_CMP_ITYPES(rxcmp);
2312
2313 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2314 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2315 type = PKT_HASH_TYPE_L4;
2316 else
2317 type = PKT_HASH_TYPE_L3;
2318 }
2319 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2320 }
2321
2322 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2323 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2324 skb->protocol = eth_type_trans(skb, dev);
2325
2326 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2327 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2328 if (!skb)
2329 goto next_rx;
2330 }
2331
2332 skb_checksum_none_assert(skb);
2333 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2334 if (dev->features & NETIF_F_RXCSUM) {
2335 skb->ip_summed = CHECKSUM_UNNECESSARY;
2336 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2337 }
2338 } else {
2339 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2340 if (dev->features & NETIF_F_RXCSUM)
2341 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2342 }
2343 }
2344
2345 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2346 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2347 u64 ns, ts;
2348
2349 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2350 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2351
2352 ns = bnxt_timecounter_cyc2time(ptp, ts);
2353 memset(skb_hwtstamps(skb), 0,
2354 sizeof(*skb_hwtstamps(skb)));
2355 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2356 }
2357 }
2358 }
2359 bnxt_deliver_skb(bp, bnapi, skb);
2360 rc = 1;
2361
2362 next_rx:
2363 cpr->rx_packets += 1;
2364 cpr->rx_bytes += len;
2365
2366 next_rx_no_len:
2367 rxr->rx_prod = NEXT_RX(prod);
2368 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2369
2370 next_rx_no_prod_no_len:
2371 *raw_cons = tmp_raw_cons;
2372
2373 return rc;
2374
2375 oom_next_rx:
2376 cpr->sw_stats->rx.rx_oom_discards += 1;
2377 rc = -ENOMEM;
2378 goto next_rx;
2379 }
2380
2381 /* In netpoll mode, if we are using a combined completion ring, we need to
2382 * discard the rx packets and recycle the buffers.
2383 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2384 static int bnxt_force_rx_discard(struct bnxt *bp,
2385 struct bnxt_cp_ring_info *cpr,
2386 u32 *raw_cons, u8 *event)
2387 {
2388 u32 tmp_raw_cons = *raw_cons;
2389 struct rx_cmp_ext *rxcmp1;
2390 struct rx_cmp *rxcmp;
2391 u16 cp_cons;
2392 u8 cmp_type;
2393 int rc;
2394
2395 cp_cons = RING_CMP(tmp_raw_cons);
2396 rxcmp = (struct rx_cmp *)
2397 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2398
2399 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2400 cp_cons = RING_CMP(tmp_raw_cons);
2401 rxcmp1 = (struct rx_cmp_ext *)
2402 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2403
2404 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2405 return -EBUSY;
2406
2407 /* The valid test of the entry must be done first before
2408 * reading any further.
2409 */
2410 dma_rmb();
2411 cmp_type = RX_CMP_TYPE(rxcmp);
2412 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2413 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2414 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2415 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2416 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2417 struct rx_tpa_end_cmp_ext *tpa_end1;
2418
2419 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2420 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2421 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2422 }
2423 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2424 if (rc && rc != -EBUSY)
2425 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2426 return rc;
2427 }
2428
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2429 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2430 {
2431 struct bnxt_fw_health *fw_health = bp->fw_health;
2432 u32 reg = fw_health->regs[reg_idx];
2433 u32 reg_type, reg_off, val = 0;
2434
2435 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2436 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2437 switch (reg_type) {
2438 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2439 pci_read_config_dword(bp->pdev, reg_off, &val);
2440 break;
2441 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2442 reg_off = fw_health->mapped_regs[reg_idx];
2443 fallthrough;
2444 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2445 val = readl(bp->bar0 + reg_off);
2446 break;
2447 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2448 val = readl(bp->bar1 + reg_off);
2449 break;
2450 }
2451 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2452 val &= fw_health->fw_reset_inprog_reg_mask;
2453 return val;
2454 }
2455
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2456 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2457 {
2458 int i;
2459
2460 for (i = 0; i < bp->rx_nr_rings; i++) {
2461 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2462 struct bnxt_ring_grp_info *grp_info;
2463
2464 grp_info = &bp->grp_info[grp_idx];
2465 if (grp_info->agg_fw_ring_id == ring_id)
2466 return grp_idx;
2467 }
2468 return INVALID_HW_RING_ID;
2469 }
2470
bnxt_get_force_speed(struct bnxt_link_info * link_info)2471 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2472 {
2473 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2474
2475 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2476 return link_info->force_link_speed2;
2477 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2478 return link_info->force_pam4_link_speed;
2479 return link_info->force_link_speed;
2480 }
2481
bnxt_set_force_speed(struct bnxt_link_info * link_info)2482 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2483 {
2484 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2485
2486 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2487 link_info->req_link_speed = link_info->force_link_speed2;
2488 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2489 switch (link_info->req_link_speed) {
2490 case BNXT_LINK_SPEED_50GB_PAM4:
2491 case BNXT_LINK_SPEED_100GB_PAM4:
2492 case BNXT_LINK_SPEED_200GB_PAM4:
2493 case BNXT_LINK_SPEED_400GB_PAM4:
2494 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2495 break;
2496 case BNXT_LINK_SPEED_100GB_PAM4_112:
2497 case BNXT_LINK_SPEED_200GB_PAM4_112:
2498 case BNXT_LINK_SPEED_400GB_PAM4_112:
2499 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2500 break;
2501 default:
2502 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2503 }
2504 return;
2505 }
2506 link_info->req_link_speed = link_info->force_link_speed;
2507 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2508 if (link_info->force_pam4_link_speed) {
2509 link_info->req_link_speed = link_info->force_pam4_link_speed;
2510 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2511 }
2512 }
2513
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2514 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2515 {
2516 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2517
2518 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2519 link_info->advertising = link_info->auto_link_speeds2;
2520 return;
2521 }
2522 link_info->advertising = link_info->auto_link_speeds;
2523 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2524 }
2525
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2526 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2527 {
2528 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2529
2530 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2531 if (link_info->req_link_speed != link_info->force_link_speed2)
2532 return true;
2533 return false;
2534 }
2535 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2536 link_info->req_link_speed != link_info->force_link_speed)
2537 return true;
2538 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2539 link_info->req_link_speed != link_info->force_pam4_link_speed)
2540 return true;
2541 return false;
2542 }
2543
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2544 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2545 {
2546 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2547
2548 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2549 if (link_info->advertising != link_info->auto_link_speeds2)
2550 return true;
2551 return false;
2552 }
2553 if (link_info->advertising != link_info->auto_link_speeds ||
2554 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2555 return true;
2556 return false;
2557 }
2558
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2559 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2560 {
2561 u32 flags = bp->ctx->ctx_arr[type].flags;
2562
2563 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2564 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2565 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2566 }
2567
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2568 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2569 {
2570 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2571 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2572 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2573 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2574 struct bnxt_bs_trace_info *bs_trace;
2575 int last_pg;
2576
2577 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2578 return;
2579
2580 mem_size = ctxm->max_entries * ctxm->entry_size;
2581 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2582 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2583
2584 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2585 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2586
2587 rmem = &ctx_pg[0].ring_mem;
2588 bs_trace = &bp->bs_trace[trace_type];
2589 bs_trace->ctx_type = ctxm->type;
2590 bs_trace->trace_type = trace_type;
2591 if (pages > MAX_CTX_PAGES) {
2592 int last_pg_dir = rmem->nr_pages - 1;
2593
2594 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2595 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2596 } else {
2597 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2598 }
2599 bs_trace->magic_byte += magic_byte_offset;
2600 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2601 }
2602
2603 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2604 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2605 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2606
2607 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2608 (((data2) & \
2609 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2610 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2611
2612 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2613 ((data2) & \
2614 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2615
2616 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2617 (((data2) & \
2618 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2619 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2620
2621 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2622 ((data1) & \
2623 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2624
2625 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2626 (((data1) & \
2627 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2628 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2629
2630 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2631 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2632 {
2633 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2634
2635 switch (err_type) {
2636 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2637 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2638 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2639 break;
2640 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2641 netdev_warn(bp->dev, "Pause Storm detected!\n");
2642 break;
2643 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2644 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2645 break;
2646 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2647 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2648 char *threshold_type;
2649 bool notify = false;
2650 char *dir_str;
2651
2652 switch (type) {
2653 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2654 threshold_type = "warning";
2655 break;
2656 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2657 threshold_type = "critical";
2658 break;
2659 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2660 threshold_type = "fatal";
2661 break;
2662 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2663 threshold_type = "shutdown";
2664 break;
2665 default:
2666 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2667 return false;
2668 }
2669 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2670 dir_str = "above";
2671 notify = true;
2672 } else {
2673 dir_str = "below";
2674 }
2675 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2676 dir_str, threshold_type);
2677 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2678 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2679 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2680 if (notify) {
2681 bp->thermal_threshold_type = type;
2682 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2683 return true;
2684 }
2685 return false;
2686 }
2687 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2688 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2689 break;
2690 default:
2691 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2692 err_type);
2693 break;
2694 }
2695 return false;
2696 }
2697
2698 #define BNXT_GET_EVENT_PORT(data) \
2699 ((data) & \
2700 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2701
2702 #define BNXT_EVENT_RING_TYPE(data2) \
2703 ((data2) & \
2704 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2705
2706 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2707 (BNXT_EVENT_RING_TYPE(data2) == \
2708 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2709
2710 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2711 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2712 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2713
2714 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2715 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2716 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2717
2718 #define BNXT_PHC_BITS 48
2719
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2720 static int bnxt_async_event_process(struct bnxt *bp,
2721 struct hwrm_async_event_cmpl *cmpl)
2722 {
2723 u16 event_id = le16_to_cpu(cmpl->event_id);
2724 u32 data1 = le32_to_cpu(cmpl->event_data1);
2725 u32 data2 = le32_to_cpu(cmpl->event_data2);
2726
2727 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2728 event_id, data1, data2);
2729
2730 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2731 switch (event_id) {
2732 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2733 struct bnxt_link_info *link_info = &bp->link_info;
2734
2735 if (BNXT_VF(bp))
2736 goto async_event_process_exit;
2737
2738 /* print unsupported speed warning in forced speed mode only */
2739 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2740 (data1 & 0x20000)) {
2741 u16 fw_speed = bnxt_get_force_speed(link_info);
2742 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2743
2744 if (speed != SPEED_UNKNOWN)
2745 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2746 speed);
2747 }
2748 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2749 }
2750 fallthrough;
2751 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2752 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2753 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2754 fallthrough;
2755 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2756 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2757 break;
2758 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2759 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2760 break;
2761 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2762 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2763
2764 if (BNXT_VF(bp))
2765 break;
2766
2767 if (bp->pf.port_id != port_id)
2768 break;
2769
2770 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2771 break;
2772 }
2773 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2774 if (BNXT_PF(bp))
2775 goto async_event_process_exit;
2776 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2777 break;
2778 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2779 char *type_str = "Solicited";
2780
2781 if (!bp->fw_health)
2782 goto async_event_process_exit;
2783
2784 bp->fw_reset_timestamp = jiffies;
2785 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2786 if (!bp->fw_reset_min_dsecs)
2787 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2788 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2789 if (!bp->fw_reset_max_dsecs)
2790 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2791 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2792 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2793 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2794 type_str = "Fatal";
2795 bp->fw_health->fatalities++;
2796 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2797 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2798 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2799 type_str = "Non-fatal";
2800 bp->fw_health->survivals++;
2801 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2802 }
2803 netif_warn(bp, hw, bp->dev,
2804 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2805 type_str, data1, data2,
2806 bp->fw_reset_min_dsecs * 100,
2807 bp->fw_reset_max_dsecs * 100);
2808 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2809 break;
2810 }
2811 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2812 struct bnxt_fw_health *fw_health = bp->fw_health;
2813 char *status_desc = "healthy";
2814 u32 status;
2815
2816 if (!fw_health)
2817 goto async_event_process_exit;
2818
2819 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2820 fw_health->enabled = false;
2821 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2822 break;
2823 }
2824 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2825 fw_health->tmr_multiplier =
2826 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2827 bp->current_interval * 10);
2828 fw_health->tmr_counter = fw_health->tmr_multiplier;
2829 if (!fw_health->enabled)
2830 fw_health->last_fw_heartbeat =
2831 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2832 fw_health->last_fw_reset_cnt =
2833 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2834 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2835 if (status != BNXT_FW_STATUS_HEALTHY)
2836 status_desc = "unhealthy";
2837 netif_info(bp, drv, bp->dev,
2838 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2839 fw_health->primary ? "primary" : "backup", status,
2840 status_desc, fw_health->last_fw_reset_cnt);
2841 if (!fw_health->enabled) {
2842 /* Make sure tmr_counter is set and visible to
2843 * bnxt_health_check() before setting enabled to true.
2844 */
2845 smp_wmb();
2846 fw_health->enabled = true;
2847 }
2848 goto async_event_process_exit;
2849 }
2850 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2851 netif_notice(bp, hw, bp->dev,
2852 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2853 data1, data2);
2854 goto async_event_process_exit;
2855 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2856 struct bnxt_rx_ring_info *rxr;
2857 u16 grp_idx;
2858
2859 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2860 goto async_event_process_exit;
2861
2862 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2863 BNXT_EVENT_RING_TYPE(data2), data1);
2864 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2865 goto async_event_process_exit;
2866
2867 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2868 if (grp_idx == INVALID_HW_RING_ID) {
2869 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2870 data1);
2871 goto async_event_process_exit;
2872 }
2873 rxr = bp->bnapi[grp_idx]->rx_ring;
2874 bnxt_sched_reset_rxr(bp, rxr);
2875 goto async_event_process_exit;
2876 }
2877 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2878 struct bnxt_fw_health *fw_health = bp->fw_health;
2879
2880 netif_notice(bp, hw, bp->dev,
2881 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2882 data1, data2);
2883 if (fw_health) {
2884 fw_health->echo_req_data1 = data1;
2885 fw_health->echo_req_data2 = data2;
2886 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2887 break;
2888 }
2889 goto async_event_process_exit;
2890 }
2891 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2892 bnxt_ptp_pps_event(bp, data1, data2);
2893 goto async_event_process_exit;
2894 }
2895 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2896 if (bnxt_event_error_report(bp, data1, data2))
2897 break;
2898 goto async_event_process_exit;
2899 }
2900 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2901 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2902 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2903 if (BNXT_PTP_USE_RTC(bp)) {
2904 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2905 unsigned long flags;
2906 u64 ns;
2907
2908 if (!ptp)
2909 goto async_event_process_exit;
2910
2911 bnxt_ptp_update_current_time(bp);
2912 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2913 BNXT_PHC_BITS) | ptp->current_time);
2914 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2915 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2916 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2917 }
2918 break;
2919 }
2920 goto async_event_process_exit;
2921 }
2922 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2923 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2924
2925 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2926 goto async_event_process_exit;
2927 }
2928 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2929 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2930 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2931
2932 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2933 goto async_event_process_exit;
2934 }
2935 default:
2936 goto async_event_process_exit;
2937 }
2938 __bnxt_queue_sp_work(bp);
2939 async_event_process_exit:
2940 bnxt_ulp_async_events(bp, cmpl);
2941 return 0;
2942 }
2943
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2944 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2945 {
2946 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2947 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2948 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2949 (struct hwrm_fwd_req_cmpl *)txcmp;
2950
2951 switch (cmpl_type) {
2952 case CMPL_BASE_TYPE_HWRM_DONE:
2953 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2954 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2955 break;
2956
2957 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2958 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2959
2960 if ((vf_id < bp->pf.first_vf_id) ||
2961 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2962 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2963 vf_id);
2964 return -EINVAL;
2965 }
2966
2967 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2968 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2969 break;
2970
2971 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2972 bnxt_async_event_process(bp,
2973 (struct hwrm_async_event_cmpl *)txcmp);
2974 break;
2975
2976 default:
2977 break;
2978 }
2979
2980 return 0;
2981 }
2982
bnxt_vnic_is_active(struct bnxt * bp)2983 static bool bnxt_vnic_is_active(struct bnxt *bp)
2984 {
2985 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2986
2987 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2988 }
2989
bnxt_msix(int irq,void * dev_instance)2990 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2991 {
2992 struct bnxt_napi *bnapi = dev_instance;
2993 struct bnxt *bp = bnapi->bp;
2994 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2995 u32 cons = RING_CMP(cpr->cp_raw_cons);
2996
2997 cpr->event_ctr++;
2998 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2999 napi_schedule(&bnapi->napi);
3000 return IRQ_HANDLED;
3001 }
3002
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)3003 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3004 {
3005 u32 raw_cons = cpr->cp_raw_cons;
3006 u16 cons = RING_CMP(raw_cons);
3007 struct tx_cmp *txcmp;
3008
3009 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3010
3011 return TX_CMP_VALID(txcmp, raw_cons);
3012 }
3013
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3014 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3015 int budget)
3016 {
3017 struct bnxt_napi *bnapi = cpr->bnapi;
3018 u32 raw_cons = cpr->cp_raw_cons;
3019 bool flush_xdp = false;
3020 u32 cons;
3021 int rx_pkts = 0;
3022 u8 event = 0;
3023 struct tx_cmp *txcmp;
3024
3025 cpr->has_more_work = 0;
3026 cpr->had_work_done = 1;
3027 while (1) {
3028 u8 cmp_type;
3029 int rc;
3030
3031 cons = RING_CMP(raw_cons);
3032 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3033
3034 if (!TX_CMP_VALID(txcmp, raw_cons))
3035 break;
3036
3037 /* The valid test of the entry must be done first before
3038 * reading any further.
3039 */
3040 dma_rmb();
3041 cmp_type = TX_CMP_TYPE(txcmp);
3042 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3043 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3044 u32 opaque = txcmp->tx_cmp_opaque;
3045 struct bnxt_tx_ring_info *txr;
3046 u16 tx_freed;
3047
3048 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3049 event |= BNXT_TX_CMP_EVENT;
3050 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3051 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3052 else
3053 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3054 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3055 bp->tx_ring_mask;
3056 /* return full budget so NAPI will complete. */
3057 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3058 rx_pkts = budget;
3059 raw_cons = NEXT_RAW_CMP(raw_cons);
3060 if (budget)
3061 cpr->has_more_work = 1;
3062 break;
3063 }
3064 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3065 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3066 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3067 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3068 if (likely(budget))
3069 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3070 else
3071 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3072 &event);
3073 if (event & BNXT_REDIRECT_EVENT)
3074 flush_xdp = true;
3075 if (likely(rc >= 0))
3076 rx_pkts += rc;
3077 /* Increment rx_pkts when rc is -ENOMEM to count towards
3078 * the NAPI budget. Otherwise, we may potentially loop
3079 * here forever if we consistently cannot allocate
3080 * buffers.
3081 */
3082 else if (rc == -ENOMEM && budget)
3083 rx_pkts++;
3084 else if (rc == -EBUSY) /* partial completion */
3085 break;
3086 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3087 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3088 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3089 bnxt_hwrm_handler(bp, txcmp);
3090 }
3091 raw_cons = NEXT_RAW_CMP(raw_cons);
3092
3093 if (rx_pkts && rx_pkts == budget) {
3094 cpr->has_more_work = 1;
3095 break;
3096 }
3097 }
3098
3099 if (flush_xdp) {
3100 xdp_do_flush();
3101 event &= ~BNXT_REDIRECT_EVENT;
3102 }
3103
3104 if (event & BNXT_TX_EVENT) {
3105 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3106 u16 prod = txr->tx_prod;
3107
3108 /* Sync BD data before updating doorbell */
3109 wmb();
3110
3111 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3112 event &= ~BNXT_TX_EVENT;
3113 }
3114
3115 cpr->cp_raw_cons = raw_cons;
3116 bnapi->events |= event;
3117 return rx_pkts;
3118 }
3119
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3120 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3121 int budget)
3122 {
3123 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3124 bnapi->tx_int(bp, bnapi, budget);
3125
3126 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3127 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3128
3129 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3130 bnapi->events &= ~BNXT_RX_EVENT;
3131 }
3132 if (bnapi->events & BNXT_AGG_EVENT) {
3133 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3134
3135 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3136 bnapi->events &= ~BNXT_AGG_EVENT;
3137 }
3138 }
3139
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3140 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3141 int budget)
3142 {
3143 struct bnxt_napi *bnapi = cpr->bnapi;
3144 int rx_pkts;
3145
3146 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3147
3148 /* ACK completion ring before freeing tx ring and producing new
3149 * buffers in rx/agg rings to prevent overflowing the completion
3150 * ring.
3151 */
3152 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3153
3154 __bnxt_poll_work_done(bp, bnapi, budget);
3155 return rx_pkts;
3156 }
3157
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3158 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3159 {
3160 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3161 struct bnxt *bp = bnapi->bp;
3162 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3163 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3164 struct tx_cmp *txcmp;
3165 struct rx_cmp_ext *rxcmp1;
3166 u32 cp_cons, tmp_raw_cons;
3167 u32 raw_cons = cpr->cp_raw_cons;
3168 bool flush_xdp = false;
3169 u32 rx_pkts = 0;
3170 u8 event = 0;
3171
3172 while (1) {
3173 int rc;
3174
3175 cp_cons = RING_CMP(raw_cons);
3176 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3177
3178 if (!TX_CMP_VALID(txcmp, raw_cons))
3179 break;
3180
3181 /* The valid test of the entry must be done first before
3182 * reading any further.
3183 */
3184 dma_rmb();
3185 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3186 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3187 cp_cons = RING_CMP(tmp_raw_cons);
3188 rxcmp1 = (struct rx_cmp_ext *)
3189 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3190
3191 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3192 break;
3193
3194 /* force an error to recycle the buffer */
3195 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3196 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3197
3198 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3199 if (likely(rc == -EIO) && budget)
3200 rx_pkts++;
3201 else if (rc == -EBUSY) /* partial completion */
3202 break;
3203 if (event & BNXT_REDIRECT_EVENT)
3204 flush_xdp = true;
3205 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3206 CMPL_BASE_TYPE_HWRM_DONE)) {
3207 bnxt_hwrm_handler(bp, txcmp);
3208 } else {
3209 netdev_err(bp->dev,
3210 "Invalid completion received on special ring\n");
3211 }
3212 raw_cons = NEXT_RAW_CMP(raw_cons);
3213
3214 if (rx_pkts == budget)
3215 break;
3216 }
3217
3218 cpr->cp_raw_cons = raw_cons;
3219 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3220 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3221
3222 if (event & BNXT_AGG_EVENT)
3223 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3224 if (flush_xdp)
3225 xdp_do_flush();
3226
3227 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3228 napi_complete_done(napi, rx_pkts);
3229 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3230 }
3231 return rx_pkts;
3232 }
3233
bnxt_poll(struct napi_struct * napi,int budget)3234 static int bnxt_poll(struct napi_struct *napi, int budget)
3235 {
3236 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3237 struct bnxt *bp = bnapi->bp;
3238 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3239 int work_done = 0;
3240
3241 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3242 napi_complete(napi);
3243 return 0;
3244 }
3245 while (1) {
3246 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3247
3248 if (work_done >= budget) {
3249 if (!budget)
3250 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3251 break;
3252 }
3253
3254 if (!bnxt_has_work(bp, cpr)) {
3255 if (napi_complete_done(napi, work_done))
3256 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3257 break;
3258 }
3259 }
3260 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3261 struct dim_sample dim_sample = {};
3262
3263 dim_update_sample(cpr->event_ctr,
3264 cpr->rx_packets,
3265 cpr->rx_bytes,
3266 &dim_sample);
3267 net_dim(&cpr->dim, &dim_sample);
3268 }
3269 return work_done;
3270 }
3271
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3272 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3273 {
3274 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3275 int i, work_done = 0;
3276
3277 for (i = 0; i < cpr->cp_ring_count; i++) {
3278 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3279
3280 if (cpr2->had_nqe_notify) {
3281 work_done += __bnxt_poll_work(bp, cpr2,
3282 budget - work_done);
3283 cpr->has_more_work |= cpr2->has_more_work;
3284 }
3285 }
3286 return work_done;
3287 }
3288
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3289 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3290 u64 dbr_type, int budget)
3291 {
3292 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3293 int i;
3294
3295 for (i = 0; i < cpr->cp_ring_count; i++) {
3296 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3297 struct bnxt_db_info *db;
3298
3299 if (cpr2->had_work_done) {
3300 u32 tgl = 0;
3301
3302 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3303 cpr2->had_nqe_notify = 0;
3304 tgl = cpr2->toggle;
3305 }
3306 db = &cpr2->cp_db;
3307 bnxt_writeq(bp,
3308 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3309 DB_RING_IDX(db, cpr2->cp_raw_cons),
3310 db->doorbell);
3311 cpr2->had_work_done = 0;
3312 }
3313 }
3314 __bnxt_poll_work_done(bp, bnapi, budget);
3315 }
3316
bnxt_poll_p5(struct napi_struct * napi,int budget)3317 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3318 {
3319 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3320 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3321 struct bnxt_cp_ring_info *cpr_rx;
3322 u32 raw_cons = cpr->cp_raw_cons;
3323 struct bnxt *bp = bnapi->bp;
3324 struct nqe_cn *nqcmp;
3325 int work_done = 0;
3326 u32 cons;
3327
3328 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3329 napi_complete(napi);
3330 return 0;
3331 }
3332 if (cpr->has_more_work) {
3333 cpr->has_more_work = 0;
3334 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3335 }
3336 while (1) {
3337 u16 type;
3338
3339 cons = RING_CMP(raw_cons);
3340 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3341
3342 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3343 if (cpr->has_more_work)
3344 break;
3345
3346 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3347 budget);
3348 cpr->cp_raw_cons = raw_cons;
3349 if (napi_complete_done(napi, work_done))
3350 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3351 cpr->cp_raw_cons);
3352 goto poll_done;
3353 }
3354
3355 /* The valid test of the entry must be done first before
3356 * reading any further.
3357 */
3358 dma_rmb();
3359
3360 type = le16_to_cpu(nqcmp->type);
3361 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3362 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3363 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3364 struct bnxt_cp_ring_info *cpr2;
3365
3366 /* No more budget for RX work */
3367 if (budget && work_done >= budget &&
3368 cq_type == BNXT_NQ_HDL_TYPE_RX)
3369 break;
3370
3371 idx = BNXT_NQ_HDL_IDX(idx);
3372 cpr2 = &cpr->cp_ring_arr[idx];
3373 cpr2->had_nqe_notify = 1;
3374 cpr2->toggle = NQE_CN_TOGGLE(type);
3375 work_done += __bnxt_poll_work(bp, cpr2,
3376 budget - work_done);
3377 cpr->has_more_work |= cpr2->has_more_work;
3378 } else {
3379 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3380 }
3381 raw_cons = NEXT_RAW_CMP(raw_cons);
3382 }
3383 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3384 if (raw_cons != cpr->cp_raw_cons) {
3385 cpr->cp_raw_cons = raw_cons;
3386 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3387 }
3388 poll_done:
3389 cpr_rx = &cpr->cp_ring_arr[0];
3390 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3391 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3392 struct dim_sample dim_sample = {};
3393
3394 dim_update_sample(cpr->event_ctr,
3395 cpr_rx->rx_packets,
3396 cpr_rx->rx_bytes,
3397 &dim_sample);
3398 net_dim(&cpr->dim, &dim_sample);
3399 }
3400 return work_done;
3401 }
3402
bnxt_free_one_tx_ring_skbs(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int idx)3403 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3404 struct bnxt_tx_ring_info *txr, int idx)
3405 {
3406 int i, max_idx;
3407 struct pci_dev *pdev = bp->pdev;
3408
3409 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3410
3411 for (i = 0; i < max_idx;) {
3412 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3413 struct sk_buff *skb;
3414 int j, last;
3415
3416 if (idx < bp->tx_nr_rings_xdp &&
3417 tx_buf->action == XDP_REDIRECT) {
3418 dma_unmap_single(&pdev->dev,
3419 dma_unmap_addr(tx_buf, mapping),
3420 dma_unmap_len(tx_buf, len),
3421 DMA_TO_DEVICE);
3422 xdp_return_frame(tx_buf->xdpf);
3423 tx_buf->action = 0;
3424 tx_buf->xdpf = NULL;
3425 i++;
3426 continue;
3427 }
3428
3429 skb = tx_buf->skb;
3430 if (!skb) {
3431 i++;
3432 continue;
3433 }
3434
3435 tx_buf->skb = NULL;
3436
3437 if (tx_buf->is_push) {
3438 dev_kfree_skb(skb);
3439 i += 2;
3440 continue;
3441 }
3442
3443 dma_unmap_single(&pdev->dev,
3444 dma_unmap_addr(tx_buf, mapping),
3445 skb_headlen(skb),
3446 DMA_TO_DEVICE);
3447
3448 last = tx_buf->nr_frags;
3449 i += 2;
3450 for (j = 0; j < last; j++, i++) {
3451 int ring_idx = i & bp->tx_ring_mask;
3452 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3453
3454 tx_buf = &txr->tx_buf_ring[ring_idx];
3455 netmem_dma_unmap_page_attrs(&pdev->dev,
3456 dma_unmap_addr(tx_buf,
3457 mapping),
3458 skb_frag_size(frag),
3459 DMA_TO_DEVICE, 0);
3460 }
3461 dev_kfree_skb(skb);
3462 }
3463 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3464 }
3465
bnxt_free_tx_skbs(struct bnxt * bp)3466 static void bnxt_free_tx_skbs(struct bnxt *bp)
3467 {
3468 int i;
3469
3470 if (!bp->tx_ring)
3471 return;
3472
3473 for (i = 0; i < bp->tx_nr_rings; i++) {
3474 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3475
3476 if (!txr->tx_buf_ring)
3477 continue;
3478
3479 bnxt_free_one_tx_ring_skbs(bp, txr, i);
3480 }
3481
3482 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3483 bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3484 }
3485
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3486 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3487 {
3488 int i, max_idx;
3489
3490 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3491
3492 for (i = 0; i < max_idx; i++) {
3493 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3494 void *data = rx_buf->data;
3495
3496 if (!data)
3497 continue;
3498
3499 rx_buf->data = NULL;
3500 if (BNXT_RX_PAGE_MODE(bp))
3501 page_pool_recycle_direct(rxr->page_pool, data);
3502 else
3503 page_pool_free_va(rxr->head_pool, data, true);
3504 }
3505 }
3506
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3507 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3508 {
3509 int i, max_idx;
3510
3511 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3512
3513 for (i = 0; i < max_idx; i++) {
3514 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3515 netmem_ref netmem = rx_agg_buf->netmem;
3516
3517 if (!netmem)
3518 continue;
3519
3520 rx_agg_buf->netmem = 0;
3521 __clear_bit(i, rxr->rx_agg_bmap);
3522
3523 page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3524 }
3525 }
3526
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3527 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3528 struct bnxt_rx_ring_info *rxr)
3529 {
3530 int i;
3531
3532 for (i = 0; i < bp->max_tpa; i++) {
3533 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3534 u8 *data = tpa_info->data;
3535
3536 if (!data)
3537 continue;
3538
3539 tpa_info->data = NULL;
3540 page_pool_free_va(rxr->head_pool, data, false);
3541 }
3542 }
3543
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3544 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3545 struct bnxt_rx_ring_info *rxr)
3546 {
3547 struct bnxt_tpa_idx_map *map;
3548
3549 if (!rxr->rx_tpa)
3550 goto skip_rx_tpa_free;
3551
3552 bnxt_free_one_tpa_info_data(bp, rxr);
3553
3554 skip_rx_tpa_free:
3555 if (!rxr->rx_buf_ring)
3556 goto skip_rx_buf_free;
3557
3558 bnxt_free_one_rx_ring(bp, rxr);
3559
3560 skip_rx_buf_free:
3561 if (!rxr->rx_agg_ring)
3562 goto skip_rx_agg_free;
3563
3564 bnxt_free_one_rx_agg_ring(bp, rxr);
3565
3566 skip_rx_agg_free:
3567 map = rxr->rx_tpa_idx_map;
3568 if (map)
3569 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3570 }
3571
bnxt_free_rx_skbs(struct bnxt * bp)3572 static void bnxt_free_rx_skbs(struct bnxt *bp)
3573 {
3574 int i;
3575
3576 if (!bp->rx_ring)
3577 return;
3578
3579 for (i = 0; i < bp->rx_nr_rings; i++)
3580 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3581 }
3582
bnxt_free_skbs(struct bnxt * bp)3583 static void bnxt_free_skbs(struct bnxt *bp)
3584 {
3585 bnxt_free_tx_skbs(bp);
3586 bnxt_free_rx_skbs(bp);
3587 }
3588
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3589 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3590 {
3591 u8 init_val = ctxm->init_value;
3592 u16 offset = ctxm->init_offset;
3593 u8 *p2 = p;
3594 int i;
3595
3596 if (!init_val)
3597 return;
3598 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3599 memset(p, init_val, len);
3600 return;
3601 }
3602 for (i = 0; i < len; i += ctxm->entry_size)
3603 *(p2 + i + offset) = init_val;
3604 }
3605
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3606 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3607 void *buf, size_t offset, size_t head,
3608 size_t tail)
3609 {
3610 int i, head_page, start_idx, source_offset;
3611 size_t len, rem_len, total_len, max_bytes;
3612
3613 head_page = head / rmem->page_size;
3614 source_offset = head % rmem->page_size;
3615 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3616 if (!total_len)
3617 total_len = MAX_CTX_BYTES;
3618 start_idx = head_page % MAX_CTX_PAGES;
3619 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3620 source_offset;
3621 total_len = min(total_len, max_bytes);
3622 rem_len = total_len;
3623
3624 for (i = start_idx; rem_len; i++, source_offset = 0) {
3625 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3626 if (buf)
3627 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3628 len);
3629 offset += len;
3630 rem_len -= len;
3631 }
3632 return total_len;
3633 }
3634
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3635 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3636 {
3637 struct pci_dev *pdev = bp->pdev;
3638 int i;
3639
3640 if (!rmem->pg_arr)
3641 goto skip_pages;
3642
3643 for (i = 0; i < rmem->nr_pages; i++) {
3644 if (!rmem->pg_arr[i])
3645 continue;
3646
3647 dma_free_coherent(&pdev->dev, rmem->page_size,
3648 rmem->pg_arr[i], rmem->dma_arr[i]);
3649
3650 rmem->pg_arr[i] = NULL;
3651 }
3652 skip_pages:
3653 if (rmem->pg_tbl) {
3654 size_t pg_tbl_size = rmem->nr_pages * 8;
3655
3656 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3657 pg_tbl_size = rmem->page_size;
3658 dma_free_coherent(&pdev->dev, pg_tbl_size,
3659 rmem->pg_tbl, rmem->pg_tbl_map);
3660 rmem->pg_tbl = NULL;
3661 }
3662 if (rmem->vmem_size && *rmem->vmem) {
3663 vfree(*rmem->vmem);
3664 *rmem->vmem = NULL;
3665 }
3666 }
3667
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3668 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3669 {
3670 struct pci_dev *pdev = bp->pdev;
3671 u64 valid_bit = 0;
3672 int i;
3673
3674 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3675 valid_bit = PTU_PTE_VALID;
3676 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3677 size_t pg_tbl_size = rmem->nr_pages * 8;
3678
3679 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3680 pg_tbl_size = rmem->page_size;
3681 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3682 &rmem->pg_tbl_map,
3683 GFP_KERNEL);
3684 if (!rmem->pg_tbl)
3685 return -ENOMEM;
3686 }
3687
3688 for (i = 0; i < rmem->nr_pages; i++) {
3689 u64 extra_bits = valid_bit;
3690
3691 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3692 rmem->page_size,
3693 &rmem->dma_arr[i],
3694 GFP_KERNEL);
3695 if (!rmem->pg_arr[i])
3696 return -ENOMEM;
3697
3698 if (rmem->ctx_mem)
3699 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3700 rmem->page_size);
3701 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3702 if (i == rmem->nr_pages - 2 &&
3703 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3704 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3705 else if (i == rmem->nr_pages - 1 &&
3706 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3707 extra_bits |= PTU_PTE_LAST;
3708 rmem->pg_tbl[i] =
3709 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3710 }
3711 }
3712
3713 if (rmem->vmem_size) {
3714 *rmem->vmem = vzalloc(rmem->vmem_size);
3715 if (!(*rmem->vmem))
3716 return -ENOMEM;
3717 }
3718 return 0;
3719 }
3720
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3721 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3722 struct bnxt_rx_ring_info *rxr)
3723 {
3724 int i;
3725
3726 kfree(rxr->rx_tpa_idx_map);
3727 rxr->rx_tpa_idx_map = NULL;
3728 if (rxr->rx_tpa) {
3729 for (i = 0; i < bp->max_tpa; i++) {
3730 kfree(rxr->rx_tpa[i].agg_arr);
3731 rxr->rx_tpa[i].agg_arr = NULL;
3732 }
3733 }
3734 kfree(rxr->rx_tpa);
3735 rxr->rx_tpa = NULL;
3736 }
3737
bnxt_free_tpa_info(struct bnxt * bp)3738 static void bnxt_free_tpa_info(struct bnxt *bp)
3739 {
3740 int i;
3741
3742 for (i = 0; i < bp->rx_nr_rings; i++) {
3743 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3744
3745 bnxt_free_one_tpa_info(bp, rxr);
3746 }
3747 }
3748
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3749 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3750 struct bnxt_rx_ring_info *rxr)
3751 {
3752 struct rx_agg_cmp *agg;
3753 int i;
3754
3755 rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa);
3756 if (!rxr->rx_tpa)
3757 return -ENOMEM;
3758
3759 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3760 return 0;
3761 for (i = 0; i < bp->max_tpa; i++) {
3762 agg = kzalloc_objs(*agg, MAX_SKB_FRAGS);
3763 if (!agg)
3764 return -ENOMEM;
3765 rxr->rx_tpa[i].agg_arr = agg;
3766 }
3767 rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map);
3768 if (!rxr->rx_tpa_idx_map)
3769 return -ENOMEM;
3770
3771 return 0;
3772 }
3773
bnxt_alloc_tpa_info(struct bnxt * bp)3774 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3775 {
3776 int i, rc;
3777
3778 bp->max_tpa = MAX_TPA;
3779 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3780 if (!bp->max_tpa_v2)
3781 return 0;
3782 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3783 }
3784
3785 for (i = 0; i < bp->rx_nr_rings; i++) {
3786 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3787
3788 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3789 if (rc)
3790 return rc;
3791 }
3792 return 0;
3793 }
3794
bnxt_free_rx_rings(struct bnxt * bp)3795 static void bnxt_free_rx_rings(struct bnxt *bp)
3796 {
3797 int i;
3798
3799 if (!bp->rx_ring)
3800 return;
3801
3802 bnxt_free_tpa_info(bp);
3803 for (i = 0; i < bp->rx_nr_rings; i++) {
3804 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3805 struct bnxt_ring_struct *ring;
3806
3807 if (rxr->xdp_prog)
3808 bpf_prog_put(rxr->xdp_prog);
3809
3810 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3811 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3812
3813 page_pool_destroy(rxr->page_pool);
3814 page_pool_destroy(rxr->head_pool);
3815 rxr->page_pool = rxr->head_pool = NULL;
3816
3817 kfree(rxr->rx_agg_bmap);
3818 rxr->rx_agg_bmap = NULL;
3819
3820 ring = &rxr->rx_ring_struct;
3821 bnxt_free_ring(bp, &ring->ring_mem);
3822
3823 ring = &rxr->rx_agg_ring_struct;
3824 bnxt_free_ring(bp, &ring->ring_mem);
3825 }
3826 }
3827
bnxt_rx_agg_ring_fill_level(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3828 static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
3829 struct bnxt_rx_ring_info *rxr)
3830 {
3831 /* User may have chosen larger than default rx_page_size,
3832 * we keep the ring sizes uniform and also want uniform amount
3833 * of bytes consumed per ring, so cap how much of the rings we fill.
3834 */
3835 int fill_level = bp->rx_agg_ring_size;
3836
3837 if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
3838 fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3839
3840 return fill_level;
3841 }
3842
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3843 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3844 struct bnxt_rx_ring_info *rxr,
3845 int numa_node)
3846 {
3847 unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3848 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3849 struct page_pool_params pp = { 0 };
3850 struct page_pool *pool;
3851
3852 pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
3853 if (BNXT_RX_PAGE_MODE(bp))
3854 pp.pool_size += bp->rx_ring_size / rx_size_fac;
3855
3856 pp.order = get_order(rxr->rx_page_size);
3857 pp.nid = numa_node;
3858 pp.netdev = bp->dev;
3859 pp.dev = &bp->pdev->dev;
3860 pp.dma_dir = bp->rx_dir;
3861 pp.max_len = PAGE_SIZE << pp.order;
3862 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3863 PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3864 pp.queue_idx = rxr->bnapi->index;
3865
3866 pool = page_pool_create(&pp);
3867 if (IS_ERR(pool))
3868 return PTR_ERR(pool);
3869 rxr->page_pool = pool;
3870
3871 rxr->need_head_pool = page_pool_is_unreadable(pool);
3872 rxr->need_head_pool |= !!pp.order;
3873 if (bnxt_separate_head_pool(rxr)) {
3874 pp.order = 0;
3875 pp.max_len = PAGE_SIZE;
3876 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3877 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3878 pool = page_pool_create(&pp);
3879 if (IS_ERR(pool))
3880 goto err_destroy_pp;
3881 } else {
3882 page_pool_get(pool);
3883 }
3884 rxr->head_pool = pool;
3885
3886 return 0;
3887
3888 err_destroy_pp:
3889 page_pool_destroy(rxr->page_pool);
3890 rxr->page_pool = NULL;
3891 return PTR_ERR(pool);
3892 }
3893
bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info * rxr)3894 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3895 {
3896 page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3897 page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3898 }
3899
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3900 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3901 {
3902 u16 mem_size;
3903
3904 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3905 mem_size = rxr->rx_agg_bmap_size / 8;
3906 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3907 if (!rxr->rx_agg_bmap)
3908 return -ENOMEM;
3909
3910 return 0;
3911 }
3912
bnxt_alloc_rx_rings(struct bnxt * bp)3913 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3914 {
3915 int numa_node = dev_to_node(&bp->pdev->dev);
3916 int i, rc = 0, agg_rings = 0, cpu;
3917
3918 if (!bp->rx_ring)
3919 return -ENOMEM;
3920
3921 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3922 agg_rings = 1;
3923
3924 for (i = 0; i < bp->rx_nr_rings; i++) {
3925 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3926 struct bnxt_ring_struct *ring;
3927 int cpu_node;
3928
3929 ring = &rxr->rx_ring_struct;
3930
3931 cpu = cpumask_local_spread(i, numa_node);
3932 cpu_node = cpu_to_node(cpu);
3933 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3934 i, cpu_node);
3935 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3936 if (rc)
3937 return rc;
3938 bnxt_enable_rx_page_pool(rxr);
3939
3940 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3941 if (rc < 0)
3942 return rc;
3943
3944 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3945 MEM_TYPE_PAGE_POOL,
3946 rxr->page_pool);
3947 if (rc) {
3948 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3949 return rc;
3950 }
3951
3952 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3953 if (rc)
3954 return rc;
3955
3956 ring->grp_idx = i;
3957 if (agg_rings) {
3958 ring = &rxr->rx_agg_ring_struct;
3959 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3960 if (rc)
3961 return rc;
3962
3963 ring->grp_idx = i;
3964 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3965 if (rc)
3966 return rc;
3967 }
3968 }
3969 if (bp->flags & BNXT_FLAG_TPA)
3970 rc = bnxt_alloc_tpa_info(bp);
3971 return rc;
3972 }
3973
bnxt_free_tx_rings(struct bnxt * bp)3974 static void bnxt_free_tx_rings(struct bnxt *bp)
3975 {
3976 int i;
3977 struct pci_dev *pdev = bp->pdev;
3978
3979 if (!bp->tx_ring)
3980 return;
3981
3982 for (i = 0; i < bp->tx_nr_rings; i++) {
3983 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3984 struct bnxt_ring_struct *ring;
3985
3986 if (txr->tx_push) {
3987 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3988 txr->tx_push, txr->tx_push_mapping);
3989 txr->tx_push = NULL;
3990 }
3991
3992 ring = &txr->tx_ring_struct;
3993
3994 bnxt_free_ring(bp, &ring->ring_mem);
3995 }
3996 }
3997
3998 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3999 ((tc) * (bp)->tx_nr_rings_per_tc)
4000
4001 #define BNXT_RING_TO_TC_OFF(bp, tx) \
4002 ((tx) % (bp)->tx_nr_rings_per_tc)
4003
4004 #define BNXT_RING_TO_TC(bp, tx) \
4005 ((tx) / (bp)->tx_nr_rings_per_tc)
4006
bnxt_alloc_tx_rings(struct bnxt * bp)4007 static int bnxt_alloc_tx_rings(struct bnxt *bp)
4008 {
4009 int i, j, rc;
4010 struct pci_dev *pdev = bp->pdev;
4011
4012 bp->tx_push_size = 0;
4013 if (bp->tx_push_thresh) {
4014 int push_size;
4015
4016 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
4017 bp->tx_push_thresh);
4018
4019 if (push_size > 256) {
4020 push_size = 0;
4021 bp->tx_push_thresh = 0;
4022 }
4023
4024 bp->tx_push_size = push_size;
4025 }
4026
4027 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
4028 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4029 struct bnxt_ring_struct *ring;
4030 u8 qidx;
4031
4032 ring = &txr->tx_ring_struct;
4033
4034 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4035 if (rc)
4036 return rc;
4037
4038 ring->grp_idx = txr->bnapi->index;
4039 if (bp->tx_push_size) {
4040 dma_addr_t mapping;
4041
4042 /* One pre-allocated DMA buffer to backup
4043 * TX push operation
4044 */
4045 txr->tx_push = dma_alloc_coherent(&pdev->dev,
4046 bp->tx_push_size,
4047 &txr->tx_push_mapping,
4048 GFP_KERNEL);
4049
4050 if (!txr->tx_push)
4051 return -ENOMEM;
4052
4053 mapping = txr->tx_push_mapping +
4054 sizeof(struct tx_push_bd);
4055 txr->data_mapping = cpu_to_le64(mapping);
4056 }
4057 qidx = bp->tc_to_qidx[j];
4058 ring->queue_id = bp->q_info[qidx].queue_id;
4059 spin_lock_init(&txr->xdp_tx_lock);
4060 if (i < bp->tx_nr_rings_xdp)
4061 continue;
4062 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4063 j++;
4064 }
4065 return 0;
4066 }
4067
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)4068 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4069 {
4070 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4071
4072 kfree(cpr->cp_desc_ring);
4073 cpr->cp_desc_ring = NULL;
4074 ring->ring_mem.pg_arr = NULL;
4075 kfree(cpr->cp_desc_mapping);
4076 cpr->cp_desc_mapping = NULL;
4077 ring->ring_mem.dma_arr = NULL;
4078 }
4079
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)4080 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4081 {
4082 cpr->cp_desc_ring = kzalloc_objs(*cpr->cp_desc_ring, n);
4083 if (!cpr->cp_desc_ring)
4084 return -ENOMEM;
4085 cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n);
4086 if (!cpr->cp_desc_mapping)
4087 return -ENOMEM;
4088 return 0;
4089 }
4090
bnxt_free_all_cp_arrays(struct bnxt * bp)4091 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4092 {
4093 int i;
4094
4095 if (!bp->bnapi)
4096 return;
4097 for (i = 0; i < bp->cp_nr_rings; i++) {
4098 struct bnxt_napi *bnapi = bp->bnapi[i];
4099
4100 if (!bnapi)
4101 continue;
4102 bnxt_free_cp_arrays(&bnapi->cp_ring);
4103 }
4104 }
4105
bnxt_alloc_all_cp_arrays(struct bnxt * bp)4106 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4107 {
4108 int i, n = bp->cp_nr_pages;
4109
4110 for (i = 0; i < bp->cp_nr_rings; i++) {
4111 struct bnxt_napi *bnapi = bp->bnapi[i];
4112 int rc;
4113
4114 if (!bnapi)
4115 continue;
4116 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4117 if (rc)
4118 return rc;
4119 }
4120 return 0;
4121 }
4122
bnxt_free_cp_rings(struct bnxt * bp)4123 static void bnxt_free_cp_rings(struct bnxt *bp)
4124 {
4125 int i;
4126
4127 if (!bp->bnapi)
4128 return;
4129
4130 for (i = 0; i < bp->cp_nr_rings; i++) {
4131 struct bnxt_napi *bnapi = bp->bnapi[i];
4132 struct bnxt_cp_ring_info *cpr;
4133 struct bnxt_ring_struct *ring;
4134 int j;
4135
4136 if (!bnapi)
4137 continue;
4138
4139 cpr = &bnapi->cp_ring;
4140 ring = &cpr->cp_ring_struct;
4141
4142 bnxt_free_ring(bp, &ring->ring_mem);
4143
4144 if (!cpr->cp_ring_arr)
4145 continue;
4146
4147 for (j = 0; j < cpr->cp_ring_count; j++) {
4148 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4149
4150 ring = &cpr2->cp_ring_struct;
4151 bnxt_free_ring(bp, &ring->ring_mem);
4152 bnxt_free_cp_arrays(cpr2);
4153 }
4154 kfree(cpr->cp_ring_arr);
4155 cpr->cp_ring_arr = NULL;
4156 cpr->cp_ring_count = 0;
4157 }
4158 }
4159
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4160 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4161 struct bnxt_cp_ring_info *cpr)
4162 {
4163 struct bnxt_ring_mem_info *rmem;
4164 struct bnxt_ring_struct *ring;
4165 int rc;
4166
4167 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4168 if (rc) {
4169 bnxt_free_cp_arrays(cpr);
4170 return -ENOMEM;
4171 }
4172 ring = &cpr->cp_ring_struct;
4173 rmem = &ring->ring_mem;
4174 rmem->nr_pages = bp->cp_nr_pages;
4175 rmem->page_size = HW_CMPD_RING_SIZE;
4176 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4177 rmem->dma_arr = cpr->cp_desc_mapping;
4178 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4179 rc = bnxt_alloc_ring(bp, rmem);
4180 if (rc) {
4181 bnxt_free_ring(bp, rmem);
4182 bnxt_free_cp_arrays(cpr);
4183 }
4184 return rc;
4185 }
4186
bnxt_alloc_cp_rings(struct bnxt * bp)4187 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4188 {
4189 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4190 int i, j, rc, ulp_msix;
4191 int tcs = bp->num_tc;
4192
4193 if (!tcs)
4194 tcs = 1;
4195 ulp_msix = bnxt_get_ulp_msix_num(bp);
4196 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4197 struct bnxt_napi *bnapi = bp->bnapi[i];
4198 struct bnxt_cp_ring_info *cpr, *cpr2;
4199 struct bnxt_ring_struct *ring;
4200 int cp_count = 0, k;
4201 int rx = 0, tx = 0;
4202
4203 if (!bnapi)
4204 continue;
4205
4206 cpr = &bnapi->cp_ring;
4207 cpr->bnapi = bnapi;
4208 ring = &cpr->cp_ring_struct;
4209
4210 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4211 if (rc)
4212 return rc;
4213
4214 ring->map_idx = ulp_msix + i;
4215
4216 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4217 continue;
4218
4219 if (i < bp->rx_nr_rings) {
4220 cp_count++;
4221 rx = 1;
4222 }
4223 if (i < bp->tx_nr_rings_xdp) {
4224 cp_count++;
4225 tx = 1;
4226 } else if ((sh && i < bp->tx_nr_rings) ||
4227 (!sh && i >= bp->rx_nr_rings)) {
4228 cp_count += tcs;
4229 tx = 1;
4230 }
4231
4232 cpr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
4233 if (!cpr->cp_ring_arr)
4234 return -ENOMEM;
4235 cpr->cp_ring_count = cp_count;
4236
4237 for (k = 0; k < cp_count; k++) {
4238 cpr2 = &cpr->cp_ring_arr[k];
4239 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4240 if (rc)
4241 return rc;
4242 cpr2->bnapi = bnapi;
4243 cpr2->sw_stats = cpr->sw_stats;
4244 cpr2->cp_idx = k;
4245 if (!k && rx) {
4246 bp->rx_ring[i].rx_cpr = cpr2;
4247 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4248 } else {
4249 int n, tc = k - rx;
4250
4251 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4252 bp->tx_ring[n].tx_cpr = cpr2;
4253 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4254 }
4255 }
4256 if (tx)
4257 j++;
4258 }
4259 return 0;
4260 }
4261
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4262 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4263 struct bnxt_rx_ring_info *rxr)
4264 {
4265 struct bnxt_ring_mem_info *rmem;
4266 struct bnxt_ring_struct *ring;
4267
4268 ring = &rxr->rx_ring_struct;
4269 rmem = &ring->ring_mem;
4270 rmem->nr_pages = bp->rx_nr_pages;
4271 rmem->page_size = HW_RXBD_RING_SIZE;
4272 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4273 rmem->dma_arr = rxr->rx_desc_mapping;
4274 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4275 rmem->vmem = (void **)&rxr->rx_buf_ring;
4276
4277 ring = &rxr->rx_agg_ring_struct;
4278 rmem = &ring->ring_mem;
4279 rmem->nr_pages = bp->rx_agg_nr_pages;
4280 rmem->page_size = HW_RXBD_RING_SIZE;
4281 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4282 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4283 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4284 rmem->vmem = (void **)&rxr->rx_agg_ring;
4285 }
4286
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4287 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4288 struct bnxt_rx_ring_info *rxr)
4289 {
4290 struct bnxt_ring_mem_info *rmem;
4291 struct bnxt_ring_struct *ring;
4292 int i;
4293
4294 rxr->page_pool->p.napi = NULL;
4295 rxr->page_pool = NULL;
4296 rxr->head_pool->p.napi = NULL;
4297 rxr->head_pool = NULL;
4298 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4299
4300 ring = &rxr->rx_ring_struct;
4301 rmem = &ring->ring_mem;
4302 rmem->pg_tbl = NULL;
4303 rmem->pg_tbl_map = 0;
4304 for (i = 0; i < rmem->nr_pages; i++) {
4305 rmem->pg_arr[i] = NULL;
4306 rmem->dma_arr[i] = 0;
4307 }
4308 *rmem->vmem = NULL;
4309
4310 ring = &rxr->rx_agg_ring_struct;
4311 rmem = &ring->ring_mem;
4312 rmem->pg_tbl = NULL;
4313 rmem->pg_tbl_map = 0;
4314 for (i = 0; i < rmem->nr_pages; i++) {
4315 rmem->pg_arr[i] = NULL;
4316 rmem->dma_arr[i] = 0;
4317 }
4318 *rmem->vmem = NULL;
4319 }
4320
bnxt_init_ring_struct(struct bnxt * bp)4321 static void bnxt_init_ring_struct(struct bnxt *bp)
4322 {
4323 int i, j;
4324
4325 for (i = 0; i < bp->cp_nr_rings; i++) {
4326 struct bnxt_napi *bnapi = bp->bnapi[i];
4327 struct netdev_queue_config qcfg;
4328 struct bnxt_ring_mem_info *rmem;
4329 struct bnxt_cp_ring_info *cpr;
4330 struct bnxt_rx_ring_info *rxr;
4331 struct bnxt_tx_ring_info *txr;
4332 struct bnxt_ring_struct *ring;
4333
4334 if (!bnapi)
4335 continue;
4336
4337 cpr = &bnapi->cp_ring;
4338 ring = &cpr->cp_ring_struct;
4339 rmem = &ring->ring_mem;
4340 rmem->nr_pages = bp->cp_nr_pages;
4341 rmem->page_size = HW_CMPD_RING_SIZE;
4342 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4343 rmem->dma_arr = cpr->cp_desc_mapping;
4344 rmem->vmem_size = 0;
4345
4346 rxr = bnapi->rx_ring;
4347 if (!rxr)
4348 goto skip_rx;
4349
4350 netdev_queue_config(bp->dev, i, &qcfg);
4351 rxr->rx_page_size = qcfg.rx_page_size;
4352
4353 ring = &rxr->rx_ring_struct;
4354 rmem = &ring->ring_mem;
4355 rmem->nr_pages = bp->rx_nr_pages;
4356 rmem->page_size = HW_RXBD_RING_SIZE;
4357 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4358 rmem->dma_arr = rxr->rx_desc_mapping;
4359 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4360 rmem->vmem = (void **)&rxr->rx_buf_ring;
4361
4362 ring = &rxr->rx_agg_ring_struct;
4363 rmem = &ring->ring_mem;
4364 rmem->nr_pages = bp->rx_agg_nr_pages;
4365 rmem->page_size = HW_RXBD_RING_SIZE;
4366 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4367 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4368 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4369 rmem->vmem = (void **)&rxr->rx_agg_ring;
4370
4371 skip_rx:
4372 bnxt_for_each_napi_tx(j, bnapi, txr) {
4373 ring = &txr->tx_ring_struct;
4374 rmem = &ring->ring_mem;
4375 rmem->nr_pages = bp->tx_nr_pages;
4376 rmem->page_size = HW_TXBD_RING_SIZE;
4377 rmem->pg_arr = (void **)txr->tx_desc_ring;
4378 rmem->dma_arr = txr->tx_desc_mapping;
4379 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4380 rmem->vmem = (void **)&txr->tx_buf_ring;
4381 }
4382 }
4383 }
4384
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4385 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4386 {
4387 int i;
4388 u32 prod;
4389 struct rx_bd **rx_buf_ring;
4390
4391 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4392 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4393 int j;
4394 struct rx_bd *rxbd;
4395
4396 rxbd = rx_buf_ring[i];
4397 if (!rxbd)
4398 continue;
4399
4400 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4401 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4402 rxbd->rx_bd_opaque = prod;
4403 }
4404 }
4405 }
4406
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4407 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4408 struct bnxt_rx_ring_info *rxr,
4409 int ring_nr)
4410 {
4411 u32 prod;
4412 int i;
4413
4414 prod = rxr->rx_prod;
4415 for (i = 0; i < bp->rx_ring_size; i++) {
4416 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4417 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4418 ring_nr, i, bp->rx_ring_size);
4419 break;
4420 }
4421 prod = NEXT_RX(prod);
4422 }
4423 rxr->rx_prod = prod;
4424 }
4425
bnxt_alloc_one_rx_ring_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4426 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4427 struct bnxt_rx_ring_info *rxr,
4428 int ring_nr)
4429 {
4430 int fill_level, i;
4431 u32 prod;
4432
4433 fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
4434
4435 prod = rxr->rx_agg_prod;
4436 for (i = 0; i < fill_level; i++) {
4437 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4438 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4439 ring_nr, i, bp->rx_agg_ring_size);
4440 break;
4441 }
4442 prod = NEXT_RX_AGG(prod);
4443 }
4444 rxr->rx_agg_prod = prod;
4445 }
4446
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4447 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4448 struct bnxt_rx_ring_info *rxr)
4449 {
4450 dma_addr_t mapping;
4451 u8 *data;
4452 int i;
4453
4454 for (i = 0; i < bp->max_tpa; i++) {
4455 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4456 GFP_KERNEL);
4457 if (!data)
4458 return -ENOMEM;
4459
4460 rxr->rx_tpa[i].data = data;
4461 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4462 rxr->rx_tpa[i].mapping = mapping;
4463 }
4464
4465 return 0;
4466 }
4467
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4468 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4469 {
4470 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4471 int rc;
4472
4473 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4474
4475 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4476 return 0;
4477
4478 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4479
4480 if (rxr->rx_tpa) {
4481 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4482 if (rc)
4483 return rc;
4484 }
4485 return 0;
4486 }
4487
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4488 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4489 struct bnxt_rx_ring_info *rxr)
4490 {
4491 struct bnxt_ring_struct *ring;
4492 u32 type;
4493
4494 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4495 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4496
4497 if (NET_IP_ALIGN == 2)
4498 type |= RX_BD_FLAGS_SOP;
4499
4500 ring = &rxr->rx_ring_struct;
4501 bnxt_init_rxbd_pages(ring, type);
4502 ring->fw_ring_id = INVALID_HW_RING_ID;
4503 }
4504
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4505 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4506 struct bnxt_rx_ring_info *rxr)
4507 {
4508 struct bnxt_ring_struct *ring;
4509 u32 type;
4510
4511 ring = &rxr->rx_agg_ring_struct;
4512 ring->fw_ring_id = INVALID_HW_RING_ID;
4513 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4514 type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
4515 RX_BD_TYPE_RX_AGG_BD;
4516
4517 /* On P7, setting EOP will cause the chip to disable
4518 * Relaxed Ordering (RO) for TPA data. Disable EOP for
4519 * potentially higher performance with RO.
4520 */
4521 if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
4522 type |= RX_BD_FLAGS_AGG_EOP;
4523
4524 bnxt_init_rxbd_pages(ring, type);
4525 }
4526 }
4527
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4528 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4529 {
4530 struct bnxt_rx_ring_info *rxr;
4531
4532 rxr = &bp->rx_ring[ring_nr];
4533 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4534
4535 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4536 &rxr->bnapi->napi);
4537
4538 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4539 bpf_prog_add(bp->xdp_prog, 1);
4540 rxr->xdp_prog = bp->xdp_prog;
4541 }
4542
4543 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4544
4545 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4546 }
4547
bnxt_init_cp_rings(struct bnxt * bp)4548 static void bnxt_init_cp_rings(struct bnxt *bp)
4549 {
4550 int i, j;
4551
4552 for (i = 0; i < bp->cp_nr_rings; i++) {
4553 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4554 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4555
4556 ring->fw_ring_id = INVALID_HW_RING_ID;
4557 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4558 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4559 if (!cpr->cp_ring_arr)
4560 continue;
4561 for (j = 0; j < cpr->cp_ring_count; j++) {
4562 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4563
4564 ring = &cpr2->cp_ring_struct;
4565 ring->fw_ring_id = INVALID_HW_RING_ID;
4566 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4567 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4568 }
4569 }
4570 }
4571
bnxt_init_rx_rings(struct bnxt * bp)4572 static int bnxt_init_rx_rings(struct bnxt *bp)
4573 {
4574 int i, rc = 0;
4575
4576 if (BNXT_RX_PAGE_MODE(bp)) {
4577 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4578 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4579 } else {
4580 bp->rx_offset = BNXT_RX_OFFSET;
4581 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4582 }
4583
4584 for (i = 0; i < bp->rx_nr_rings; i++) {
4585 rc = bnxt_init_one_rx_ring(bp, i);
4586 if (rc)
4587 break;
4588 }
4589
4590 return rc;
4591 }
4592
bnxt_init_tx_rings(struct bnxt * bp)4593 static int bnxt_init_tx_rings(struct bnxt *bp)
4594 {
4595 u16 i;
4596
4597 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4598 BNXT_MIN_TX_DESC_CNT);
4599
4600 for (i = 0; i < bp->tx_nr_rings; i++) {
4601 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4602 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4603
4604 ring->fw_ring_id = INVALID_HW_RING_ID;
4605
4606 if (i >= bp->tx_nr_rings_xdp)
4607 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4608 NETDEV_QUEUE_TYPE_TX,
4609 &txr->bnapi->napi);
4610 }
4611
4612 return 0;
4613 }
4614
bnxt_free_ring_grps(struct bnxt * bp)4615 static void bnxt_free_ring_grps(struct bnxt *bp)
4616 {
4617 kfree(bp->grp_info);
4618 bp->grp_info = NULL;
4619 }
4620
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4621 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4622 {
4623 int i;
4624
4625 if (irq_re_init) {
4626 bp->grp_info = kzalloc_objs(struct bnxt_ring_grp_info,
4627 bp->cp_nr_rings);
4628 if (!bp->grp_info)
4629 return -ENOMEM;
4630 }
4631 for (i = 0; i < bp->cp_nr_rings; i++) {
4632 if (irq_re_init)
4633 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4634 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4635 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4636 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4637 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4638 }
4639 return 0;
4640 }
4641
bnxt_free_vnics(struct bnxt * bp)4642 static void bnxt_free_vnics(struct bnxt *bp)
4643 {
4644 kfree(bp->vnic_info);
4645 bp->vnic_info = NULL;
4646 bp->nr_vnics = 0;
4647 }
4648
bnxt_alloc_vnics(struct bnxt * bp)4649 static int bnxt_alloc_vnics(struct bnxt *bp)
4650 {
4651 int num_vnics = 1;
4652
4653 #ifdef CONFIG_RFS_ACCEL
4654 if (bp->flags & BNXT_FLAG_RFS) {
4655 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4656 num_vnics++;
4657 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4658 num_vnics += bp->rx_nr_rings;
4659 }
4660 #endif
4661
4662 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4663 num_vnics++;
4664
4665 bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics);
4666 if (!bp->vnic_info)
4667 return -ENOMEM;
4668
4669 bp->nr_vnics = num_vnics;
4670 return 0;
4671 }
4672
bnxt_init_vnics(struct bnxt * bp)4673 static void bnxt_init_vnics(struct bnxt *bp)
4674 {
4675 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4676 int i;
4677
4678 for (i = 0; i < bp->nr_vnics; i++) {
4679 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4680 int j;
4681
4682 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4683 vnic->vnic_id = i;
4684 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4685 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4686
4687 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4688
4689 if (bp->vnic_info[i].rss_hash_key) {
4690 if (i == BNXT_VNIC_DEFAULT) {
4691 u8 *key = (void *)vnic->rss_hash_key;
4692 int k;
4693
4694 if (!bp->rss_hash_key_valid &&
4695 !bp->rss_hash_key_updated) {
4696 get_random_bytes(bp->rss_hash_key,
4697 HW_HASH_KEY_SIZE);
4698 bp->rss_hash_key_updated = true;
4699 }
4700
4701 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4702 HW_HASH_KEY_SIZE);
4703
4704 if (!bp->rss_hash_key_updated)
4705 continue;
4706
4707 bp->rss_hash_key_updated = false;
4708 bp->rss_hash_key_valid = true;
4709
4710 bp->toeplitz_prefix = 0;
4711 for (k = 0; k < 8; k++) {
4712 bp->toeplitz_prefix <<= 8;
4713 bp->toeplitz_prefix |= key[k];
4714 }
4715 } else {
4716 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4717 HW_HASH_KEY_SIZE);
4718 }
4719 }
4720 }
4721 }
4722
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4723 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4724 {
4725 int pages;
4726
4727 pages = ring_size / desc_per_pg;
4728
4729 if (!pages)
4730 return 1;
4731
4732 pages++;
4733
4734 while (pages & (pages - 1))
4735 pages++;
4736
4737 return pages;
4738 }
4739
bnxt_set_tpa_flags(struct bnxt * bp)4740 void bnxt_set_tpa_flags(struct bnxt *bp)
4741 {
4742 bp->flags &= ~BNXT_FLAG_TPA;
4743 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4744 return;
4745 if (bp->dev->features & NETIF_F_LRO)
4746 bp->flags |= BNXT_FLAG_LRO;
4747 else if (bp->dev->features & NETIF_F_GRO_HW)
4748 bp->flags |= BNXT_FLAG_GRO;
4749 }
4750
bnxt_init_ring_params(struct bnxt * bp)4751 static void bnxt_init_ring_params(struct bnxt *bp)
4752 {
4753 unsigned int rx_size;
4754
4755 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4756 /* Try to fit 4 chunks into a 4k page */
4757 rx_size = SZ_1K -
4758 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4759 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4760 }
4761
4762 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4763 * be set on entry.
4764 */
bnxt_set_ring_params(struct bnxt * bp)4765 void bnxt_set_ring_params(struct bnxt *bp)
4766 {
4767 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4768 u32 agg_factor = 0, agg_ring_size = 0;
4769
4770 /* 8 for CRC and VLAN */
4771 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4772
4773 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4774 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4775
4776 ring_size = bp->rx_ring_size;
4777 bp->rx_agg_ring_size = 0;
4778 bp->rx_agg_nr_pages = 0;
4779
4780 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4781 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4782
4783 bp->flags &= ~BNXT_FLAG_JUMBO;
4784 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4785 u32 jumbo_factor;
4786
4787 bp->flags |= BNXT_FLAG_JUMBO;
4788 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4789 if (jumbo_factor > agg_factor)
4790 agg_factor = jumbo_factor;
4791 }
4792 if (agg_factor) {
4793 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4794 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4795 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4796 bp->rx_ring_size, ring_size);
4797 bp->rx_ring_size = ring_size;
4798 }
4799 agg_ring_size = ring_size * agg_factor;
4800
4801 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4802 RX_DESC_CNT);
4803 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4804 u32 tmp = agg_ring_size;
4805
4806 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4807 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4808 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4809 tmp, agg_ring_size);
4810 }
4811 bp->rx_agg_ring_size = agg_ring_size;
4812 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4813
4814 if (BNXT_RX_PAGE_MODE(bp)) {
4815 rx_space = PAGE_SIZE;
4816 rx_size = PAGE_SIZE -
4817 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4818 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4819 } else {
4820 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4821 bp->rx_copybreak,
4822 bp->dev->cfg_pending->hds_thresh);
4823 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4824 rx_space = rx_size + NET_SKB_PAD +
4825 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4826 }
4827 }
4828
4829 bp->rx_buf_use_size = rx_size;
4830 bp->rx_buf_size = rx_space;
4831
4832 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4833 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4834
4835 ring_size = bp->tx_ring_size;
4836 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4837 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4838
4839 max_rx_cmpl = bp->rx_ring_size;
4840 /* MAX TPA needs to be added because TPA_START completions are
4841 * immediately recycled, so the TPA completions are not bound by
4842 * the RX ring size.
4843 */
4844 if (bp->flags & BNXT_FLAG_TPA)
4845 max_rx_cmpl += bp->max_tpa;
4846 /* RX and TPA completions are 32-byte, all others are 16-byte */
4847 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4848 bp->cp_ring_size = ring_size;
4849
4850 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4851 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4852 bp->cp_nr_pages = MAX_CP_PAGES;
4853 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4854 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4855 ring_size, bp->cp_ring_size);
4856 }
4857 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4858 bp->cp_ring_mask = bp->cp_bit - 1;
4859 }
4860
4861 /* Changing allocation mode of RX rings.
4862 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4863 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4864 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4865 {
4866 struct net_device *dev = bp->dev;
4867
4868 if (page_mode) {
4869 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4870 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4871
4872 if (bp->xdp_prog->aux->xdp_has_frags)
4873 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4874 else
4875 dev->max_mtu =
4876 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4877 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4878 bp->flags |= BNXT_FLAG_JUMBO;
4879 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4880 } else {
4881 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4882 bp->rx_skb_func = bnxt_rx_page_skb;
4883 }
4884 bp->rx_dir = DMA_BIDIRECTIONAL;
4885 } else {
4886 dev->max_mtu = bp->max_mtu;
4887 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4888 bp->rx_dir = DMA_FROM_DEVICE;
4889 bp->rx_skb_func = bnxt_rx_skb;
4890 }
4891 }
4892
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4893 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4894 {
4895 __bnxt_set_rx_skb_mode(bp, page_mode);
4896
4897 if (!page_mode) {
4898 int rx, tx;
4899
4900 bnxt_get_max_rings(bp, &rx, &tx, true);
4901 if (rx > 1) {
4902 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4903 bp->dev->hw_features |= NETIF_F_LRO;
4904 }
4905 }
4906
4907 /* Update LRO and GRO_HW availability */
4908 netdev_update_features(bp->dev);
4909 }
4910
bnxt_free_vnic_attributes(struct bnxt * bp)4911 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4912 {
4913 int i;
4914 struct bnxt_vnic_info *vnic;
4915 struct pci_dev *pdev = bp->pdev;
4916
4917 if (!bp->vnic_info)
4918 return;
4919
4920 for (i = 0; i < bp->nr_vnics; i++) {
4921 vnic = &bp->vnic_info[i];
4922
4923 kfree(vnic->fw_grp_ids);
4924 vnic->fw_grp_ids = NULL;
4925
4926 kfree(vnic->uc_list);
4927 vnic->uc_list = NULL;
4928
4929 if (vnic->mc_list) {
4930 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4931 vnic->mc_list, vnic->mc_list_mapping);
4932 vnic->mc_list = NULL;
4933 }
4934
4935 if (vnic->rss_table) {
4936 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4937 vnic->rss_table,
4938 vnic->rss_table_dma_addr);
4939 vnic->rss_table = NULL;
4940 }
4941
4942 vnic->rss_hash_key = NULL;
4943 vnic->flags = 0;
4944 }
4945 }
4946
bnxt_alloc_vnic_attributes(struct bnxt * bp)4947 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4948 {
4949 int i, rc = 0, size;
4950 struct bnxt_vnic_info *vnic;
4951 struct pci_dev *pdev = bp->pdev;
4952 int max_rings;
4953
4954 for (i = 0; i < bp->nr_vnics; i++) {
4955 vnic = &bp->vnic_info[i];
4956
4957 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4958 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4959
4960 if (mem_size > 0) {
4961 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4962 if (!vnic->uc_list) {
4963 rc = -ENOMEM;
4964 goto out;
4965 }
4966 }
4967 }
4968
4969 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4970 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4971 vnic->mc_list =
4972 dma_alloc_coherent(&pdev->dev,
4973 vnic->mc_list_size,
4974 &vnic->mc_list_mapping,
4975 GFP_KERNEL);
4976 if (!vnic->mc_list) {
4977 rc = -ENOMEM;
4978 goto out;
4979 }
4980 }
4981
4982 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4983 goto vnic_skip_grps;
4984
4985 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4986 max_rings = bp->rx_nr_rings;
4987 else
4988 max_rings = 1;
4989
4990 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4991 if (!vnic->fw_grp_ids) {
4992 rc = -ENOMEM;
4993 goto out;
4994 }
4995 vnic_skip_grps:
4996 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4997 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4998 continue;
4999
5000 /* Allocate rss table and hash key */
5001 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
5002 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5003 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
5004
5005 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
5006 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
5007 vnic->rss_table_size,
5008 &vnic->rss_table_dma_addr,
5009 GFP_KERNEL);
5010 if (!vnic->rss_table) {
5011 rc = -ENOMEM;
5012 goto out;
5013 }
5014
5015 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
5016 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
5017 }
5018 return 0;
5019
5020 out:
5021 return rc;
5022 }
5023
bnxt_free_hwrm_resources(struct bnxt * bp)5024 static void bnxt_free_hwrm_resources(struct bnxt *bp)
5025 {
5026 struct bnxt_hwrm_wait_token *token;
5027
5028 dma_pool_destroy(bp->hwrm_dma_pool);
5029 bp->hwrm_dma_pool = NULL;
5030
5031 rcu_read_lock();
5032 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
5033 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
5034 rcu_read_unlock();
5035 }
5036
bnxt_alloc_hwrm_resources(struct bnxt * bp)5037 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5038 {
5039 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5040 BNXT_HWRM_DMA_SIZE,
5041 BNXT_HWRM_DMA_ALIGN, 0);
5042 if (!bp->hwrm_dma_pool)
5043 return -ENOMEM;
5044
5045 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5046
5047 return 0;
5048 }
5049
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)5050 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5051 {
5052 kfree(stats->hw_masks);
5053 stats->hw_masks = NULL;
5054 kfree(stats->sw_stats);
5055 stats->sw_stats = NULL;
5056 if (stats->hw_stats) {
5057 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5058 stats->hw_stats_map);
5059 stats->hw_stats = NULL;
5060 }
5061 }
5062
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)5063 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5064 bool alloc_masks)
5065 {
5066 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5067 &stats->hw_stats_map, GFP_KERNEL);
5068 if (!stats->hw_stats)
5069 return -ENOMEM;
5070
5071 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5072 if (!stats->sw_stats)
5073 goto stats_mem_err;
5074
5075 if (alloc_masks) {
5076 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5077 if (!stats->hw_masks)
5078 goto stats_mem_err;
5079 }
5080 return 0;
5081
5082 stats_mem_err:
5083 bnxt_free_stats_mem(bp, stats);
5084 return -ENOMEM;
5085 }
5086
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)5087 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5088 {
5089 int i;
5090
5091 for (i = 0; i < count; i++)
5092 mask_arr[i] = mask;
5093 }
5094
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)5095 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5096 {
5097 int i;
5098
5099 for (i = 0; i < count; i++)
5100 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5101 }
5102
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)5103 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5104 struct bnxt_stats_mem *stats)
5105 {
5106 struct hwrm_func_qstats_ext_output *resp;
5107 struct hwrm_func_qstats_ext_input *req;
5108 __le64 *hw_masks;
5109 int rc;
5110
5111 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5112 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5113 return -EOPNOTSUPP;
5114
5115 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5116 if (rc)
5117 return rc;
5118
5119 req->fid = cpu_to_le16(0xffff);
5120 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5121
5122 resp = hwrm_req_hold(bp, req);
5123 rc = hwrm_req_send(bp, req);
5124 if (!rc) {
5125 hw_masks = &resp->rx_ucast_pkts;
5126 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5127 }
5128 hwrm_req_drop(bp, req);
5129 return rc;
5130 }
5131
5132 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5133 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5134
bnxt_init_stats(struct bnxt * bp)5135 static void bnxt_init_stats(struct bnxt *bp)
5136 {
5137 struct bnxt_napi *bnapi = bp->bnapi[0];
5138 struct bnxt_cp_ring_info *cpr;
5139 struct bnxt_stats_mem *stats;
5140 __le64 *rx_stats, *tx_stats;
5141 int rc, rx_count, tx_count;
5142 u64 *rx_masks, *tx_masks;
5143 u64 mask;
5144 u8 flags;
5145
5146 cpr = &bnapi->cp_ring;
5147 stats = &cpr->stats;
5148 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5149 if (rc) {
5150 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5151 mask = (1ULL << 48) - 1;
5152 else
5153 mask = -1ULL;
5154 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5155 }
5156 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5157 stats = &bp->port_stats;
5158 rx_stats = stats->hw_stats;
5159 rx_masks = stats->hw_masks;
5160 rx_count = sizeof(struct rx_port_stats) / 8;
5161 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5162 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5163 tx_count = sizeof(struct tx_port_stats) / 8;
5164
5165 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5166 rc = bnxt_hwrm_port_qstats(bp, flags);
5167 if (rc) {
5168 mask = (1ULL << 40) - 1;
5169
5170 bnxt_fill_masks(rx_masks, mask, rx_count);
5171 bnxt_fill_masks(tx_masks, mask, tx_count);
5172 } else {
5173 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5174 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5175 bnxt_hwrm_port_qstats(bp, 0);
5176 }
5177 }
5178 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5179 stats = &bp->rx_port_stats_ext;
5180 rx_stats = stats->hw_stats;
5181 rx_masks = stats->hw_masks;
5182 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5183 stats = &bp->tx_port_stats_ext;
5184 tx_stats = stats->hw_stats;
5185 tx_masks = stats->hw_masks;
5186 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5187
5188 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5189 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5190 if (rc) {
5191 mask = (1ULL << 40) - 1;
5192
5193 bnxt_fill_masks(rx_masks, mask, rx_count);
5194 if (tx_stats)
5195 bnxt_fill_masks(tx_masks, mask, tx_count);
5196 } else {
5197 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5198 if (tx_stats)
5199 bnxt_copy_hw_masks(tx_masks, tx_stats,
5200 tx_count);
5201 bnxt_hwrm_port_qstats_ext(bp, 0);
5202 }
5203 }
5204 }
5205
bnxt_free_port_stats(struct bnxt * bp)5206 static void bnxt_free_port_stats(struct bnxt *bp)
5207 {
5208 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5209 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5210
5211 bnxt_free_stats_mem(bp, &bp->port_stats);
5212 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5213 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5214 }
5215
bnxt_free_ring_stats(struct bnxt * bp)5216 static void bnxt_free_ring_stats(struct bnxt *bp)
5217 {
5218 int i;
5219
5220 if (!bp->bnapi)
5221 return;
5222
5223 for (i = 0; i < bp->cp_nr_rings; i++) {
5224 struct bnxt_napi *bnapi = bp->bnapi[i];
5225 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5226
5227 bnxt_free_stats_mem(bp, &cpr->stats);
5228
5229 kfree(cpr->sw_stats);
5230 cpr->sw_stats = NULL;
5231 }
5232 }
5233
bnxt_alloc_stats(struct bnxt * bp)5234 static int bnxt_alloc_stats(struct bnxt *bp)
5235 {
5236 u32 size, i;
5237 int rc;
5238
5239 size = bp->hw_ring_stats_size;
5240
5241 for (i = 0; i < bp->cp_nr_rings; i++) {
5242 struct bnxt_napi *bnapi = bp->bnapi[i];
5243 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5244
5245 cpr->sw_stats = kzalloc_obj(*cpr->sw_stats);
5246 if (!cpr->sw_stats)
5247 return -ENOMEM;
5248
5249 cpr->stats.len = size;
5250 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5251 if (rc)
5252 return rc;
5253
5254 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5255 }
5256
5257 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5258 return 0;
5259
5260 if (bp->port_stats.hw_stats)
5261 goto alloc_ext_stats;
5262
5263 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5264 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5265 if (rc)
5266 return rc;
5267
5268 bp->flags |= BNXT_FLAG_PORT_STATS;
5269
5270 alloc_ext_stats:
5271 /* Display extended statistics only if FW supports it */
5272 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5273 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5274 return 0;
5275
5276 if (bp->rx_port_stats_ext.hw_stats)
5277 goto alloc_tx_ext_stats;
5278
5279 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5280 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5281 /* Extended stats are optional */
5282 if (rc)
5283 return 0;
5284
5285 alloc_tx_ext_stats:
5286 if (bp->tx_port_stats_ext.hw_stats)
5287 return 0;
5288
5289 if (bp->hwrm_spec_code >= 0x10902 ||
5290 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5291 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5292 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5293 /* Extended stats are optional */
5294 if (rc)
5295 return 0;
5296 }
5297 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5298 return 0;
5299 }
5300
bnxt_clear_ring_indices(struct bnxt * bp)5301 static void bnxt_clear_ring_indices(struct bnxt *bp)
5302 {
5303 int i, j;
5304
5305 if (!bp->bnapi)
5306 return;
5307
5308 for (i = 0; i < bp->cp_nr_rings; i++) {
5309 struct bnxt_napi *bnapi = bp->bnapi[i];
5310 struct bnxt_cp_ring_info *cpr;
5311 struct bnxt_rx_ring_info *rxr;
5312 struct bnxt_tx_ring_info *txr;
5313
5314 if (!bnapi)
5315 continue;
5316
5317 cpr = &bnapi->cp_ring;
5318 cpr->cp_raw_cons = 0;
5319
5320 bnxt_for_each_napi_tx(j, bnapi, txr) {
5321 txr->tx_prod = 0;
5322 txr->tx_cons = 0;
5323 txr->tx_hw_cons = 0;
5324 }
5325
5326 rxr = bnapi->rx_ring;
5327 if (rxr) {
5328 rxr->rx_prod = 0;
5329 rxr->rx_agg_prod = 0;
5330 rxr->rx_sw_agg_prod = 0;
5331 rxr->rx_next_cons = 0;
5332 }
5333 bnapi->events = 0;
5334 }
5335 }
5336
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5337 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5338 {
5339 u8 type = fltr->type, flags = fltr->flags;
5340
5341 INIT_LIST_HEAD(&fltr->list);
5342 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5343 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5344 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5345 }
5346
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5347 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5348 {
5349 if (!list_empty(&fltr->list))
5350 list_del_init(&fltr->list);
5351 }
5352
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5353 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5354 {
5355 struct bnxt_filter_base *usr_fltr, *tmp;
5356
5357 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5358 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5359 continue;
5360 bnxt_del_one_usr_fltr(bp, usr_fltr);
5361 }
5362 }
5363
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5364 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5365 {
5366 hlist_del(&fltr->hash);
5367 bnxt_del_one_usr_fltr(bp, fltr);
5368 if (fltr->flags) {
5369 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5370 bp->ntp_fltr_count--;
5371 }
5372 kfree(fltr);
5373 }
5374
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5375 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5376 {
5377 int i;
5378
5379 netdev_assert_locked_or_invisible(bp->dev);
5380
5381 /* Under netdev instance lock and all our NAPIs have been disabled.
5382 * It's safe to delete the hash table.
5383 */
5384 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5385 struct hlist_head *head;
5386 struct hlist_node *tmp;
5387 struct bnxt_ntuple_filter *fltr;
5388
5389 head = &bp->ntp_fltr_hash_tbl[i];
5390 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5391 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5392 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5393 !list_empty(&fltr->base.list)))
5394 continue;
5395 bnxt_del_fltr(bp, &fltr->base);
5396 }
5397 }
5398 if (!all)
5399 return;
5400
5401 bitmap_free(bp->ntp_fltr_bmap);
5402 bp->ntp_fltr_bmap = NULL;
5403 bp->ntp_fltr_count = 0;
5404 }
5405
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5406 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5407 {
5408 int i, rc = 0;
5409
5410 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5411 return 0;
5412
5413 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5414 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5415
5416 bp->ntp_fltr_count = 0;
5417 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5418
5419 if (!bp->ntp_fltr_bmap)
5420 rc = -ENOMEM;
5421
5422 return rc;
5423 }
5424
bnxt_free_l2_filters(struct bnxt * bp,bool all)5425 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5426 {
5427 int i;
5428
5429 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5430 struct hlist_head *head;
5431 struct hlist_node *tmp;
5432 struct bnxt_l2_filter *fltr;
5433
5434 head = &bp->l2_fltr_hash_tbl[i];
5435 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5436 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5437 !list_empty(&fltr->base.list)))
5438 continue;
5439 bnxt_del_fltr(bp, &fltr->base);
5440 }
5441 }
5442 }
5443
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5444 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5445 {
5446 int i;
5447
5448 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5449 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5450 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5451 }
5452
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5453 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5454 {
5455 bnxt_free_vnic_attributes(bp);
5456 bnxt_free_tx_rings(bp);
5457 bnxt_free_rx_rings(bp);
5458 bnxt_free_cp_rings(bp);
5459 bnxt_free_all_cp_arrays(bp);
5460 bnxt_free_ntp_fltrs(bp, false);
5461 bnxt_free_l2_filters(bp, false);
5462 if (irq_re_init) {
5463 bnxt_free_ring_stats(bp);
5464 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5465 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5466 bnxt_free_port_stats(bp);
5467 bnxt_free_ring_grps(bp);
5468 bnxt_free_vnics(bp);
5469 kfree(bp->tx_ring_map);
5470 bp->tx_ring_map = NULL;
5471 kfree(bp->tx_ring);
5472 bp->tx_ring = NULL;
5473 kfree(bp->rx_ring);
5474 bp->rx_ring = NULL;
5475 kfree(bp->bnapi);
5476 bp->bnapi = NULL;
5477 } else {
5478 bnxt_clear_ring_indices(bp);
5479 }
5480 }
5481
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5482 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5483 {
5484 int i, j, rc, size, arr_size;
5485 void *bnapi;
5486
5487 if (irq_re_init) {
5488 /* Allocate bnapi mem pointer array and mem block for
5489 * all queues
5490 */
5491 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5492 bp->cp_nr_rings);
5493 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5494 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5495 if (!bnapi)
5496 return -ENOMEM;
5497
5498 bp->bnapi = bnapi;
5499 bnapi += arr_size;
5500 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5501 bp->bnapi[i] = bnapi;
5502 bp->bnapi[i]->index = i;
5503 bp->bnapi[i]->bp = bp;
5504 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5505 struct bnxt_cp_ring_info *cpr =
5506 &bp->bnapi[i]->cp_ring;
5507
5508 cpr->cp_ring_struct.ring_mem.flags =
5509 BNXT_RMEM_RING_PTE_FLAG;
5510 }
5511 }
5512
5513 bp->rx_ring = kzalloc_objs(struct bnxt_rx_ring_info,
5514 bp->rx_nr_rings);
5515 if (!bp->rx_ring)
5516 return -ENOMEM;
5517
5518 for (i = 0; i < bp->rx_nr_rings; i++) {
5519 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5520
5521 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5522 rxr->rx_ring_struct.ring_mem.flags =
5523 BNXT_RMEM_RING_PTE_FLAG;
5524 rxr->rx_agg_ring_struct.ring_mem.flags =
5525 BNXT_RMEM_RING_PTE_FLAG;
5526 } else {
5527 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5528 }
5529 rxr->bnapi = bp->bnapi[i];
5530 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5531 }
5532
5533 bp->tx_ring = kzalloc_objs(struct bnxt_tx_ring_info,
5534 bp->tx_nr_rings);
5535 if (!bp->tx_ring)
5536 return -ENOMEM;
5537
5538 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5539 GFP_KERNEL);
5540
5541 if (!bp->tx_ring_map)
5542 return -ENOMEM;
5543
5544 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5545 j = 0;
5546 else
5547 j = bp->rx_nr_rings;
5548
5549 for (i = 0; i < bp->tx_nr_rings; i++) {
5550 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5551 struct bnxt_napi *bnapi2;
5552
5553 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5554 txr->tx_ring_struct.ring_mem.flags =
5555 BNXT_RMEM_RING_PTE_FLAG;
5556 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5557 if (i >= bp->tx_nr_rings_xdp) {
5558 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5559
5560 bnapi2 = bp->bnapi[k];
5561 txr->txq_index = i - bp->tx_nr_rings_xdp;
5562 txr->tx_napi_idx =
5563 BNXT_RING_TO_TC(bp, txr->txq_index);
5564 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5565 bnapi2->tx_int = bnxt_tx_int;
5566 } else {
5567 bnapi2 = bp->bnapi[j];
5568 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5569 bnapi2->tx_ring[0] = txr;
5570 bnapi2->tx_int = bnxt_tx_int_xdp;
5571 j++;
5572 }
5573 txr->bnapi = bnapi2;
5574 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5575 txr->tx_cpr = &bnapi2->cp_ring;
5576 }
5577
5578 rc = bnxt_alloc_stats(bp);
5579 if (rc)
5580 goto alloc_mem_err;
5581 bnxt_init_stats(bp);
5582
5583 rc = bnxt_alloc_ntp_fltrs(bp);
5584 if (rc)
5585 goto alloc_mem_err;
5586
5587 rc = bnxt_alloc_vnics(bp);
5588 if (rc)
5589 goto alloc_mem_err;
5590 }
5591
5592 rc = bnxt_alloc_all_cp_arrays(bp);
5593 if (rc)
5594 goto alloc_mem_err;
5595
5596 bnxt_init_ring_struct(bp);
5597
5598 rc = bnxt_alloc_rx_rings(bp);
5599 if (rc)
5600 goto alloc_mem_err;
5601
5602 rc = bnxt_alloc_tx_rings(bp);
5603 if (rc)
5604 goto alloc_mem_err;
5605
5606 rc = bnxt_alloc_cp_rings(bp);
5607 if (rc)
5608 goto alloc_mem_err;
5609
5610 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5611 BNXT_VNIC_MCAST_FLAG |
5612 BNXT_VNIC_UCAST_FLAG;
5613 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5614 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5615 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5616
5617 rc = bnxt_alloc_vnic_attributes(bp);
5618 if (rc)
5619 goto alloc_mem_err;
5620 return 0;
5621
5622 alloc_mem_err:
5623 bnxt_free_mem(bp, true);
5624 return rc;
5625 }
5626
bnxt_disable_int(struct bnxt * bp)5627 static void bnxt_disable_int(struct bnxt *bp)
5628 {
5629 int i;
5630
5631 if (!bp->bnapi)
5632 return;
5633
5634 for (i = 0; i < bp->cp_nr_rings; i++) {
5635 struct bnxt_napi *bnapi = bp->bnapi[i];
5636 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5637 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5638
5639 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5640 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5641 }
5642 }
5643
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5644 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5645 {
5646 struct bnxt_napi *bnapi = bp->bnapi[n];
5647 struct bnxt_cp_ring_info *cpr;
5648
5649 cpr = &bnapi->cp_ring;
5650 return cpr->cp_ring_struct.map_idx;
5651 }
5652
bnxt_disable_int_sync(struct bnxt * bp)5653 static void bnxt_disable_int_sync(struct bnxt *bp)
5654 {
5655 int i;
5656
5657 if (!bp->irq_tbl)
5658 return;
5659
5660 atomic_inc(&bp->intr_sem);
5661
5662 bnxt_disable_int(bp);
5663 for (i = 0; i < bp->cp_nr_rings; i++) {
5664 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5665
5666 synchronize_irq(bp->irq_tbl[map_idx].vector);
5667 }
5668 }
5669
bnxt_enable_int(struct bnxt * bp)5670 static void bnxt_enable_int(struct bnxt *bp)
5671 {
5672 int i;
5673
5674 atomic_set(&bp->intr_sem, 0);
5675 for (i = 0; i < bp->cp_nr_rings; i++) {
5676 struct bnxt_napi *bnapi = bp->bnapi[i];
5677 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5678
5679 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5680 }
5681 }
5682
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5683 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5684 bool async_only)
5685 {
5686 DECLARE_BITMAP(async_events_bmap, 256);
5687 u32 *events = (u32 *)async_events_bmap;
5688 struct hwrm_func_drv_rgtr_output *resp;
5689 struct hwrm_func_drv_rgtr_input *req;
5690 u32 flags;
5691 int rc, i;
5692
5693 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5694 if (rc)
5695 return rc;
5696
5697 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5698 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5699 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5700
5701 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5702 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5703 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5704 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5705 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5706 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5707 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5708 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5709 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5710 req->flags = cpu_to_le32(flags);
5711 req->ver_maj_8b = DRV_VER_MAJ;
5712 req->ver_min_8b = DRV_VER_MIN;
5713 req->ver_upd_8b = DRV_VER_UPD;
5714 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5715 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5716 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5717
5718 if (BNXT_PF(bp)) {
5719 u32 data[8];
5720 int i;
5721
5722 memset(data, 0, sizeof(data));
5723 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5724 u16 cmd = bnxt_vf_req_snif[i];
5725 unsigned int bit, idx;
5726
5727 if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
5728 cmd == HWRM_PORT_PHY_QCFG)
5729 continue;
5730
5731 idx = cmd / 32;
5732 bit = cmd % 32;
5733 data[idx] |= 1 << bit;
5734 }
5735
5736 for (i = 0; i < 8; i++)
5737 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5738
5739 req->enables |=
5740 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5741 }
5742
5743 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5744 req->flags |= cpu_to_le32(
5745 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5746
5747 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5748 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5749 u16 event_id = bnxt_async_events_arr[i];
5750
5751 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5752 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5753 continue;
5754 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5755 !bp->ptp_cfg)
5756 continue;
5757 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5758 }
5759 if (bmap && bmap_size) {
5760 for (i = 0; i < bmap_size; i++) {
5761 if (test_bit(i, bmap))
5762 __set_bit(i, async_events_bmap);
5763 }
5764 }
5765 for (i = 0; i < 8; i++)
5766 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5767
5768 if (async_only)
5769 req->enables =
5770 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5771
5772 resp = hwrm_req_hold(bp, req);
5773 rc = hwrm_req_send(bp, req);
5774 if (!rc) {
5775 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5776 if (resp->flags &
5777 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5778 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5779 }
5780 hwrm_req_drop(bp, req);
5781 return rc;
5782 }
5783
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5784 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5785 {
5786 struct hwrm_func_drv_unrgtr_input *req;
5787 int rc;
5788
5789 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5790 return 0;
5791
5792 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5793 if (rc)
5794 return rc;
5795 return hwrm_req_send(bp, req);
5796 }
5797
5798 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5799
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5800 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5801 {
5802 struct hwrm_tunnel_dst_port_free_input *req;
5803 int rc;
5804
5805 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5806 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5807 return 0;
5808 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5809 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5810 return 0;
5811
5812 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5813 if (rc)
5814 return rc;
5815
5816 req->tunnel_type = tunnel_type;
5817
5818 switch (tunnel_type) {
5819 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5820 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5821 bp->vxlan_port = 0;
5822 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5823 break;
5824 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5825 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5826 bp->nge_port = 0;
5827 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5828 break;
5829 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5830 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5831 bp->vxlan_gpe_port = 0;
5832 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5833 break;
5834 default:
5835 break;
5836 }
5837
5838 rc = hwrm_req_send(bp, req);
5839 if (rc)
5840 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5841 rc);
5842 if (bp->flags & BNXT_FLAG_TPA)
5843 bnxt_set_tpa(bp, true);
5844 return rc;
5845 }
5846
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5847 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5848 u8 tunnel_type)
5849 {
5850 struct hwrm_tunnel_dst_port_alloc_output *resp;
5851 struct hwrm_tunnel_dst_port_alloc_input *req;
5852 int rc;
5853
5854 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5855 if (rc)
5856 return rc;
5857
5858 req->tunnel_type = tunnel_type;
5859 req->tunnel_dst_port_val = port;
5860
5861 resp = hwrm_req_hold(bp, req);
5862 rc = hwrm_req_send(bp, req);
5863 if (rc) {
5864 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5865 rc);
5866 goto err_out;
5867 }
5868
5869 switch (tunnel_type) {
5870 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5871 bp->vxlan_port = port;
5872 bp->vxlan_fw_dst_port_id =
5873 le16_to_cpu(resp->tunnel_dst_port_id);
5874 break;
5875 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5876 bp->nge_port = port;
5877 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5878 break;
5879 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5880 bp->vxlan_gpe_port = port;
5881 bp->vxlan_gpe_fw_dst_port_id =
5882 le16_to_cpu(resp->tunnel_dst_port_id);
5883 break;
5884 default:
5885 break;
5886 }
5887 if (bp->flags & BNXT_FLAG_TPA)
5888 bnxt_set_tpa(bp, true);
5889
5890 err_out:
5891 hwrm_req_drop(bp, req);
5892 return rc;
5893 }
5894
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5895 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5896 {
5897 struct hwrm_cfa_l2_set_rx_mask_input *req;
5898 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5899 int rc;
5900
5901 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5902 if (rc)
5903 return rc;
5904
5905 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5906 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5907 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5908 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5909 }
5910 req->mask = cpu_to_le32(vnic->rx_mask);
5911 return hwrm_req_send_silent(bp, req);
5912 }
5913
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)5914 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5915 {
5916 if (!atomic_dec_and_test(&fltr->refcnt))
5917 return;
5918 spin_lock_bh(&bp->ntp_fltr_lock);
5919 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5920 spin_unlock_bh(&bp->ntp_fltr_lock);
5921 return;
5922 }
5923 hlist_del_rcu(&fltr->base.hash);
5924 bnxt_del_one_usr_fltr(bp, &fltr->base);
5925 if (fltr->base.flags) {
5926 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5927 bp->ntp_fltr_count--;
5928 }
5929 spin_unlock_bh(&bp->ntp_fltr_lock);
5930 kfree_rcu(fltr, base.rcu);
5931 }
5932
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5933 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5934 struct bnxt_l2_key *key,
5935 u32 idx)
5936 {
5937 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5938 struct bnxt_l2_filter *fltr;
5939
5940 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5941 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5942
5943 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5944 l2_key->vlan == key->vlan)
5945 return fltr;
5946 }
5947 return NULL;
5948 }
5949
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5950 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5951 struct bnxt_l2_key *key,
5952 u32 idx)
5953 {
5954 struct bnxt_l2_filter *fltr = NULL;
5955
5956 rcu_read_lock();
5957 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5958 if (fltr)
5959 atomic_inc(&fltr->refcnt);
5960 rcu_read_unlock();
5961 return fltr;
5962 }
5963
5964 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5965 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5966 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5967 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5968 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5969
5970 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5971 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5972 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5973 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5974 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5975
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)5976 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5977 {
5978 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5979 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5980 return sizeof(fkeys->addrs.v4addrs) +
5981 sizeof(fkeys->ports);
5982
5983 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5984 return sizeof(fkeys->addrs.v4addrs);
5985 }
5986
5987 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5988 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5989 return sizeof(fkeys->addrs.v6addrs) +
5990 sizeof(fkeys->ports);
5991
5992 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5993 return sizeof(fkeys->addrs.v6addrs);
5994 }
5995
5996 return 0;
5997 }
5998
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)5999 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
6000 const unsigned char *key)
6001 {
6002 u64 prefix = bp->toeplitz_prefix, hash = 0;
6003 struct bnxt_ipv4_tuple tuple4;
6004 struct bnxt_ipv6_tuple tuple6;
6005 int i, j, len = 0;
6006 u8 *four_tuple;
6007
6008 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
6009 if (!len)
6010 return 0;
6011
6012 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6013 tuple4.v4addrs = fkeys->addrs.v4addrs;
6014 tuple4.ports = fkeys->ports;
6015 four_tuple = (unsigned char *)&tuple4;
6016 } else {
6017 tuple6.v6addrs = fkeys->addrs.v6addrs;
6018 tuple6.ports = fkeys->ports;
6019 four_tuple = (unsigned char *)&tuple6;
6020 }
6021
6022 for (i = 0, j = 8; i < len; i++, j++) {
6023 u8 byte = four_tuple[i];
6024 int bit;
6025
6026 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
6027 if (byte & 0x80)
6028 hash ^= prefix;
6029 }
6030 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
6031 }
6032
6033 /* The valid part of the hash is in the upper 32 bits. */
6034 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
6035 }
6036
6037 #ifdef CONFIG_RFS_ACCEL
6038 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)6039 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6040 {
6041 struct bnxt_l2_filter *fltr;
6042 u32 idx;
6043
6044 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6045 BNXT_L2_FLTR_HASH_MASK;
6046 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6047 return fltr;
6048 }
6049 #endif
6050
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)6051 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6052 struct bnxt_l2_key *key, u32 idx)
6053 {
6054 struct hlist_head *head;
6055
6056 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6057 fltr->l2_key.vlan = key->vlan;
6058 fltr->base.type = BNXT_FLTR_TYPE_L2;
6059 if (fltr->base.flags) {
6060 int bit_id;
6061
6062 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6063 bp->max_fltr, 0);
6064 if (bit_id < 0)
6065 return -ENOMEM;
6066 fltr->base.sw_id = (u16)bit_id;
6067 bp->ntp_fltr_count++;
6068 }
6069 head = &bp->l2_fltr_hash_tbl[idx];
6070 hlist_add_head_rcu(&fltr->base.hash, head);
6071 bnxt_insert_usr_fltr(bp, &fltr->base);
6072 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6073 atomic_set(&fltr->refcnt, 1);
6074 return 0;
6075 }
6076
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)6077 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6078 struct bnxt_l2_key *key,
6079 gfp_t gfp)
6080 {
6081 struct bnxt_l2_filter *fltr;
6082 u32 idx;
6083 int rc;
6084
6085 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6086 BNXT_L2_FLTR_HASH_MASK;
6087 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6088 if (fltr)
6089 return fltr;
6090
6091 fltr = kzalloc_obj(*fltr, gfp);
6092 if (!fltr)
6093 return ERR_PTR(-ENOMEM);
6094 spin_lock_bh(&bp->ntp_fltr_lock);
6095 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6096 spin_unlock_bh(&bp->ntp_fltr_lock);
6097 if (rc) {
6098 bnxt_del_l2_filter(bp, fltr);
6099 fltr = ERR_PTR(rc);
6100 }
6101 return fltr;
6102 }
6103
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)6104 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6105 struct bnxt_l2_key *key,
6106 u16 flags)
6107 {
6108 struct bnxt_l2_filter *fltr;
6109 u32 idx;
6110 int rc;
6111
6112 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6113 BNXT_L2_FLTR_HASH_MASK;
6114 spin_lock_bh(&bp->ntp_fltr_lock);
6115 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6116 if (fltr) {
6117 fltr = ERR_PTR(-EEXIST);
6118 goto l2_filter_exit;
6119 }
6120 fltr = kzalloc_obj(*fltr, GFP_ATOMIC);
6121 if (!fltr) {
6122 fltr = ERR_PTR(-ENOMEM);
6123 goto l2_filter_exit;
6124 }
6125 fltr->base.flags = flags;
6126 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6127 if (rc) {
6128 spin_unlock_bh(&bp->ntp_fltr_lock);
6129 bnxt_del_l2_filter(bp, fltr);
6130 return ERR_PTR(rc);
6131 }
6132
6133 l2_filter_exit:
6134 spin_unlock_bh(&bp->ntp_fltr_lock);
6135 return fltr;
6136 }
6137
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6138 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6139 {
6140 #ifdef CONFIG_BNXT_SRIOV
6141 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6142
6143 return vf->fw_fid;
6144 #else
6145 return INVALID_HW_RING_ID;
6146 #endif
6147 }
6148
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6149 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6150 {
6151 struct hwrm_cfa_l2_filter_free_input *req;
6152 u16 target_id = 0xffff;
6153 int rc;
6154
6155 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6156 struct bnxt_pf_info *pf = &bp->pf;
6157
6158 if (fltr->base.vf_idx >= pf->active_vfs)
6159 return -EINVAL;
6160
6161 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6162 if (target_id == INVALID_HW_RING_ID)
6163 return -EINVAL;
6164 }
6165
6166 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6167 if (rc)
6168 return rc;
6169
6170 req->target_id = cpu_to_le16(target_id);
6171 req->l2_filter_id = fltr->base.filter_id;
6172 return hwrm_req_send(bp, req);
6173 }
6174
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6175 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6176 {
6177 struct hwrm_cfa_l2_filter_alloc_output *resp;
6178 struct hwrm_cfa_l2_filter_alloc_input *req;
6179 u16 target_id = 0xffff;
6180 int rc;
6181
6182 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6183 struct bnxt_pf_info *pf = &bp->pf;
6184
6185 if (fltr->base.vf_idx >= pf->active_vfs)
6186 return -EINVAL;
6187
6188 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6189 }
6190 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6191 if (rc)
6192 return rc;
6193
6194 req->target_id = cpu_to_le16(target_id);
6195 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6196
6197 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6198 req->flags |=
6199 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6200 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6201 req->enables =
6202 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6203 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6204 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6205 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6206 eth_broadcast_addr(req->l2_addr_mask);
6207
6208 if (fltr->l2_key.vlan) {
6209 req->enables |=
6210 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6211 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6212 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6213 req->num_vlans = 1;
6214 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6215 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6216 }
6217
6218 resp = hwrm_req_hold(bp, req);
6219 rc = hwrm_req_send(bp, req);
6220 if (!rc) {
6221 fltr->base.filter_id = resp->l2_filter_id;
6222 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6223 }
6224 hwrm_req_drop(bp, req);
6225 return rc;
6226 }
6227
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6228 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6229 struct bnxt_ntuple_filter *fltr)
6230 {
6231 struct hwrm_cfa_ntuple_filter_free_input *req;
6232 int rc;
6233
6234 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6235 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
6236 return 0;
6237
6238 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6239 if (rc)
6240 return rc;
6241
6242 req->ntuple_filter_id = fltr->base.filter_id;
6243 return hwrm_req_send(bp, req);
6244 }
6245
6246 #define BNXT_NTP_FLTR_FLAGS \
6247 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6248 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6249 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6250 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6251 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6252 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6253 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6254 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6255 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6256 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6257 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6258 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6259 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6260
6261 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6262 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6263
bnxt_fill_ipv6_mask(__be32 mask[4])6264 void bnxt_fill_ipv6_mask(__be32 mask[4])
6265 {
6266 int i;
6267
6268 for (i = 0; i < 4; i++)
6269 mask[i] = cpu_to_be32(~0);
6270 }
6271
6272 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6273 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6274 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6275 struct bnxt_ntuple_filter *fltr)
6276 {
6277 u16 rxq = fltr->base.rxq;
6278
6279 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6280 struct ethtool_rxfh_context *ctx;
6281 struct bnxt_rss_ctx *rss_ctx;
6282 struct bnxt_vnic_info *vnic;
6283
6284 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6285 fltr->base.fw_vnic_id);
6286 if (ctx) {
6287 rss_ctx = ethtool_rxfh_context_priv(ctx);
6288 vnic = &rss_ctx->vnic;
6289
6290 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6291 }
6292 return;
6293 }
6294 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6295 struct bnxt_vnic_info *vnic;
6296 u32 enables;
6297
6298 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6299 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6300 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6301 req->enables |= cpu_to_le32(enables);
6302 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6303 } else {
6304 u32 flags;
6305
6306 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6307 req->flags |= cpu_to_le32(flags);
6308 req->dst_id = cpu_to_le16(rxq);
6309 }
6310 }
6311
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6312 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6313 struct bnxt_ntuple_filter *fltr)
6314 {
6315 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6316 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6317 struct bnxt_flow_masks *masks = &fltr->fmasks;
6318 struct flow_keys *keys = &fltr->fkeys;
6319 struct bnxt_l2_filter *l2_fltr;
6320 struct bnxt_vnic_info *vnic;
6321 int rc;
6322
6323 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6324 if (rc)
6325 return rc;
6326
6327 l2_fltr = fltr->l2_fltr;
6328 req->l2_filter_id = l2_fltr->base.filter_id;
6329
6330 if (fltr->base.flags & BNXT_ACT_DROP) {
6331 req->flags =
6332 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6333 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6334 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6335 } else {
6336 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6337 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6338 }
6339 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6340
6341 req->ethertype = htons(ETH_P_IP);
6342 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6343 req->ip_protocol = keys->basic.ip_proto;
6344
6345 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6346 req->ethertype = htons(ETH_P_IPV6);
6347 req->ip_addr_type =
6348 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6349 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6350 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6351 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6352 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6353 } else {
6354 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6355 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6356 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6357 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6358 }
6359 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6360 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6361 req->tunnel_type =
6362 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6363 }
6364
6365 req->src_port = keys->ports.src;
6366 req->src_port_mask = masks->ports.src;
6367 req->dst_port = keys->ports.dst;
6368 req->dst_port_mask = masks->ports.dst;
6369
6370 resp = hwrm_req_hold(bp, req);
6371 rc = hwrm_req_send(bp, req);
6372 if (!rc)
6373 fltr->base.filter_id = resp->ntuple_filter_id;
6374 hwrm_req_drop(bp, req);
6375 return rc;
6376 }
6377
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6378 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6379 const u8 *mac_addr)
6380 {
6381 struct bnxt_l2_filter *fltr;
6382 struct bnxt_l2_key key;
6383 int rc;
6384
6385 ether_addr_copy(key.dst_mac_addr, mac_addr);
6386 key.vlan = 0;
6387 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6388 if (IS_ERR(fltr))
6389 return PTR_ERR(fltr);
6390
6391 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6392 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6393 if (rc)
6394 bnxt_del_l2_filter(bp, fltr);
6395 else
6396 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6397 return rc;
6398 }
6399
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6400 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6401 {
6402 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6403
6404 /* Any associated ntuple filters will also be cleared by firmware. */
6405 for (i = 0; i < num_of_vnics; i++) {
6406 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6407
6408 for (j = 0; j < vnic->uc_filter_count; j++) {
6409 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6410
6411 bnxt_hwrm_l2_filter_free(bp, fltr);
6412 bnxt_del_l2_filter(bp, fltr);
6413 }
6414 vnic->uc_filter_count = 0;
6415 }
6416 }
6417
6418 #define BNXT_DFLT_TUNL_TPA_BMAP \
6419 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6420 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6421 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6422
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6423 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6424 struct hwrm_vnic_tpa_cfg_input *req)
6425 {
6426 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6427
6428 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6429 return;
6430
6431 if (bp->vxlan_port)
6432 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6433 if (bp->vxlan_gpe_port)
6434 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6435 if (bp->nge_port)
6436 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6437
6438 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6439 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6440 }
6441
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6442 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6443 u32 tpa_flags)
6444 {
6445 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6446 struct hwrm_vnic_tpa_cfg_input *req;
6447 int rc;
6448
6449 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6450 return 0;
6451
6452 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6453 if (rc)
6454 return rc;
6455
6456 if (tpa_flags) {
6457 u16 mss = bp->dev->mtu - 40;
6458 u32 nsegs, n, segs = 0, flags;
6459
6460 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6461 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6462 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6463 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6464 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6465 if (tpa_flags & BNXT_FLAG_GRO)
6466 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6467
6468 req->flags = cpu_to_le32(flags);
6469
6470 req->enables =
6471 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6472 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6473 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6474
6475 /* Number of segs are log2 units, and first packet is not
6476 * included as part of this units.
6477 */
6478 if (mss <= BNXT_RX_PAGE_SIZE) {
6479 n = BNXT_RX_PAGE_SIZE / mss;
6480 nsegs = (MAX_SKB_FRAGS - 1) * n;
6481 } else {
6482 n = mss / BNXT_RX_PAGE_SIZE;
6483 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6484 n++;
6485 nsegs = (MAX_SKB_FRAGS - n) / n;
6486 }
6487
6488 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6489 segs = MAX_TPA_SEGS_P5;
6490 max_aggs = bp->max_tpa;
6491 } else {
6492 segs = ilog2(nsegs);
6493 }
6494 req->max_agg_segs = cpu_to_le16(segs);
6495 req->max_aggs = cpu_to_le16(max_aggs);
6496
6497 req->min_agg_len = cpu_to_le32(512);
6498 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6499 }
6500 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6501
6502 return hwrm_req_send(bp, req);
6503 }
6504
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6505 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6506 {
6507 struct bnxt_ring_grp_info *grp_info;
6508
6509 grp_info = &bp->grp_info[ring->grp_idx];
6510 return grp_info->cp_fw_ring_id;
6511 }
6512
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6513 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6514 {
6515 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6516 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6517 else
6518 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6519 }
6520
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6521 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6522 {
6523 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6524 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6525 else
6526 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6527 }
6528
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6529 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6530 {
6531 int entries;
6532
6533 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6534 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6535 else
6536 entries = HW_HASH_INDEX_SIZE;
6537
6538 bp->rss_indir_tbl_entries = entries;
6539 bp->rss_indir_tbl =
6540 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6541 if (!bp->rss_indir_tbl)
6542 return -ENOMEM;
6543
6544 return 0;
6545 }
6546
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6547 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6548 struct ethtool_rxfh_context *rss_ctx)
6549 {
6550 u16 max_rings, max_entries, pad, i;
6551 u32 *rss_indir_tbl;
6552
6553 if (!bp->rx_nr_rings)
6554 return;
6555
6556 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6557 max_rings = bp->rx_nr_rings - 1;
6558 else
6559 max_rings = bp->rx_nr_rings;
6560
6561 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6562 if (rss_ctx)
6563 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6564 else
6565 rss_indir_tbl = &bp->rss_indir_tbl[0];
6566
6567 for (i = 0; i < max_entries; i++)
6568 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6569
6570 pad = bp->rss_indir_tbl_entries - max_entries;
6571 if (pad)
6572 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6573 }
6574
bnxt_get_max_rss_ring(struct bnxt * bp)6575 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6576 {
6577 u32 i, tbl_size, max_ring = 0;
6578
6579 if (!bp->rss_indir_tbl)
6580 return 0;
6581
6582 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6583 for (i = 0; i < tbl_size; i++)
6584 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6585 return max_ring;
6586 }
6587
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6588 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6589 {
6590 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6591 if (!rx_rings)
6592 return 0;
6593 if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
6594 return BNXT_RSS_TABLE_MAX_TBL_P5;
6595
6596 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6597 BNXT_RSS_TABLE_ENTRIES_P5);
6598 }
6599 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6600 return 2;
6601 return 1;
6602 }
6603
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6604 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6605 {
6606 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6607 u16 i, j;
6608
6609 /* Fill the RSS indirection table with ring group ids */
6610 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6611 if (!no_rss)
6612 j = bp->rss_indir_tbl[i];
6613 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6614 }
6615 }
6616
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6617 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6618 struct bnxt_vnic_info *vnic)
6619 {
6620 __le16 *ring_tbl = vnic->rss_table;
6621 struct bnxt_rx_ring_info *rxr;
6622 u16 tbl_size, i;
6623
6624 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6625
6626 for (i = 0; i < tbl_size; i++) {
6627 u16 ring_id, j;
6628
6629 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6630 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6631 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6632 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6633 else
6634 j = bp->rss_indir_tbl[i];
6635 rxr = &bp->rx_ring[j];
6636
6637 ring_id = rxr->rx_ring_struct.fw_ring_id;
6638 *ring_tbl++ = cpu_to_le16(ring_id);
6639 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6640 *ring_tbl++ = cpu_to_le16(ring_id);
6641 }
6642 }
6643
6644 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6645 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6646 struct bnxt_vnic_info *vnic)
6647 {
6648 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6649 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6650 if (bp->flags & BNXT_FLAG_CHIP_P7)
6651 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6652 } else {
6653 bnxt_fill_hw_rss_tbl(bp, vnic);
6654 }
6655
6656 if (bp->rss_hash_delta) {
6657 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6658 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6659 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6660 else
6661 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6662 } else {
6663 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6664 }
6665 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6666 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6667 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6668 }
6669
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6670 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6671 bool set_rss)
6672 {
6673 struct hwrm_vnic_rss_cfg_input *req;
6674 int rc;
6675
6676 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6677 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6678 return 0;
6679
6680 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6681 if (rc)
6682 return rc;
6683
6684 if (set_rss)
6685 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6686 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6687 return hwrm_req_send(bp, req);
6688 }
6689
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6690 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6691 struct bnxt_vnic_info *vnic, bool set_rss)
6692 {
6693 struct hwrm_vnic_rss_cfg_input *req;
6694 dma_addr_t ring_tbl_map;
6695 u32 i, nr_ctxs;
6696 int rc;
6697
6698 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6699 if (rc)
6700 return rc;
6701
6702 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6703 if (!set_rss)
6704 return hwrm_req_send(bp, req);
6705
6706 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6707 ring_tbl_map = vnic->rss_table_dma_addr;
6708 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6709
6710 hwrm_req_hold(bp, req);
6711 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6712 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6713 req->ring_table_pair_index = i;
6714 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6715 rc = hwrm_req_send(bp, req);
6716 if (rc)
6717 goto exit;
6718 }
6719
6720 exit:
6721 hwrm_req_drop(bp, req);
6722 return rc;
6723 }
6724
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6725 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6726 {
6727 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6728 struct hwrm_vnic_rss_qcfg_output *resp;
6729 struct hwrm_vnic_rss_qcfg_input *req;
6730
6731 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6732 return;
6733
6734 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6735 /* all contexts configured to same hash_type, zero always exists */
6736 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6737 resp = hwrm_req_hold(bp, req);
6738 if (!hwrm_req_send(bp, req)) {
6739 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6740 bp->rss_hash_delta = 0;
6741 }
6742 hwrm_req_drop(bp, req);
6743 }
6744
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6745 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6746 {
6747 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6748 struct hwrm_vnic_plcmodes_cfg_input *req;
6749 int rc;
6750
6751 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6752 if (rc)
6753 return rc;
6754
6755 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6756 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6757 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6758
6759 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6760 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6761 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6762 req->enables |=
6763 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6764 req->hds_threshold = cpu_to_le16(hds_thresh);
6765 }
6766 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6767 return hwrm_req_send(bp, req);
6768 }
6769
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6770 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6771 struct bnxt_vnic_info *vnic,
6772 u16 ctx_idx)
6773 {
6774 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6775
6776 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6777 return;
6778
6779 req->rss_cos_lb_ctx_id =
6780 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6781
6782 hwrm_req_send(bp, req);
6783 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6784 }
6785
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6786 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6787 {
6788 int i, j;
6789
6790 for (i = 0; i < bp->nr_vnics; i++) {
6791 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6792
6793 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6794 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6795 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6796 }
6797 }
6798 bp->rsscos_nr_ctxs = 0;
6799 }
6800
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6801 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6802 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6803 {
6804 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6805 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6806 int rc;
6807
6808 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6809 if (rc)
6810 return rc;
6811
6812 resp = hwrm_req_hold(bp, req);
6813 rc = hwrm_req_send(bp, req);
6814 if (!rc)
6815 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6816 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6817 hwrm_req_drop(bp, req);
6818
6819 return rc;
6820 }
6821
bnxt_get_roce_vnic_mode(struct bnxt * bp)6822 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6823 {
6824 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6825 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6826 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6827 }
6828
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6829 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6830 {
6831 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6832 struct hwrm_vnic_cfg_input *req;
6833 unsigned int ring = 0, grp_idx;
6834 u16 def_vlan = 0;
6835 int rc;
6836
6837 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6838 if (rc)
6839 return rc;
6840
6841 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6842 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6843
6844 req->default_rx_ring_id =
6845 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6846 req->default_cmpl_ring_id =
6847 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6848 req->enables =
6849 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6850 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6851 goto vnic_mru;
6852 }
6853 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6854 /* Only RSS support for now TBD: COS & LB */
6855 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6856 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6857 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6858 VNIC_CFG_REQ_ENABLES_MRU);
6859 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6860 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6861 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6862 VNIC_CFG_REQ_ENABLES_MRU);
6863 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6864 } else {
6865 req->rss_rule = cpu_to_le16(0xffff);
6866 }
6867
6868 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6869 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6870 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6871 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6872 } else {
6873 req->cos_rule = cpu_to_le16(0xffff);
6874 }
6875
6876 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6877 ring = 0;
6878 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6879 ring = vnic->vnic_id - 1;
6880 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6881 ring = bp->rx_nr_rings - 1;
6882
6883 grp_idx = bp->rx_ring[ring].bnapi->index;
6884 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6885 req->lb_rule = cpu_to_le16(0xffff);
6886 vnic_mru:
6887 vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6888 req->mru = cpu_to_le16(vnic->mru);
6889
6890 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6891 #ifdef CONFIG_BNXT_SRIOV
6892 if (BNXT_VF(bp))
6893 def_vlan = bp->vf.vlan;
6894 #endif
6895 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6896 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6897 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6898 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6899
6900 return hwrm_req_send(bp, req);
6901 }
6902
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6903 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6904 struct bnxt_vnic_info *vnic)
6905 {
6906 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6907 struct hwrm_vnic_free_input *req;
6908
6909 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6910 return;
6911
6912 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6913
6914 hwrm_req_send(bp, req);
6915 vnic->fw_vnic_id = INVALID_HW_RING_ID;
6916 }
6917 }
6918
bnxt_hwrm_vnic_free(struct bnxt * bp)6919 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6920 {
6921 u16 i;
6922
6923 for (i = 0; i < bp->nr_vnics; i++)
6924 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6925 }
6926
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)6927 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6928 unsigned int start_rx_ring_idx,
6929 unsigned int nr_rings)
6930 {
6931 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6932 struct hwrm_vnic_alloc_output *resp;
6933 struct hwrm_vnic_alloc_input *req;
6934 int rc;
6935
6936 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6937 if (rc)
6938 return rc;
6939
6940 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6941 goto vnic_no_ring_grps;
6942
6943 /* map ring groups to this vnic */
6944 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6945 grp_idx = bp->rx_ring[i].bnapi->index;
6946 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6947 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6948 j, nr_rings);
6949 break;
6950 }
6951 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6952 }
6953
6954 vnic_no_ring_grps:
6955 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6956 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6957 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6958 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6959
6960 resp = hwrm_req_hold(bp, req);
6961 rc = hwrm_req_send(bp, req);
6962 if (!rc)
6963 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6964 hwrm_req_drop(bp, req);
6965 return rc;
6966 }
6967
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)6968 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6969 {
6970 struct hwrm_vnic_qcaps_output *resp;
6971 struct hwrm_vnic_qcaps_input *req;
6972 int rc;
6973
6974 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6975 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6976 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6977 if (bp->hwrm_spec_code < 0x10600)
6978 return 0;
6979
6980 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6981 if (rc)
6982 return rc;
6983
6984 resp = hwrm_req_hold(bp, req);
6985 rc = hwrm_req_send(bp, req);
6986 if (!rc) {
6987 u32 flags = le32_to_cpu(resp->flags);
6988
6989 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6990 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6991 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6992 if (flags &
6993 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6994 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6995
6996 /* Older P5 fw before EXT_HW_STATS support did not set
6997 * VLAN_STRIP_CAP properly.
6998 */
6999 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
7000 (BNXT_CHIP_P5(bp) &&
7001 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
7002 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
7003 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
7004 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
7005 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
7006 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
7007 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
7008 if (bp->max_tpa_v2) {
7009 if (BNXT_CHIP_P5(bp))
7010 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
7011 else
7012 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
7013 }
7014 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
7015 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
7016 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
7017 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
7018 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
7019 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
7020 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
7021 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
7022 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
7023 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
7024 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
7025 bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
7026 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
7027 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
7028 }
7029 hwrm_req_drop(bp, req);
7030 return rc;
7031 }
7032
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)7033 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
7034 {
7035 struct hwrm_ring_grp_alloc_output *resp;
7036 struct hwrm_ring_grp_alloc_input *req;
7037 int rc;
7038 u16 i;
7039
7040 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7041 return 0;
7042
7043 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
7044 if (rc)
7045 return rc;
7046
7047 resp = hwrm_req_hold(bp, req);
7048 for (i = 0; i < bp->rx_nr_rings; i++) {
7049 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7050
7051 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7052 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7053 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7054 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7055
7056 rc = hwrm_req_send(bp, req);
7057
7058 if (rc)
7059 break;
7060
7061 bp->grp_info[grp_idx].fw_grp_id =
7062 le32_to_cpu(resp->ring_group_id);
7063 }
7064 hwrm_req_drop(bp, req);
7065 return rc;
7066 }
7067
bnxt_hwrm_ring_grp_free(struct bnxt * bp)7068 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7069 {
7070 struct hwrm_ring_grp_free_input *req;
7071 u16 i;
7072
7073 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7074 return;
7075
7076 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7077 return;
7078
7079 hwrm_req_hold(bp, req);
7080 for (i = 0; i < bp->cp_nr_rings; i++) {
7081 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7082 continue;
7083 req->ring_group_id =
7084 cpu_to_le32(bp->grp_info[i].fw_grp_id);
7085
7086 hwrm_req_send(bp, req);
7087 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7088 }
7089 hwrm_req_drop(bp, req);
7090 }
7091
bnxt_set_rx_ring_params_p5(struct bnxt * bp,u32 ring_type,struct hwrm_ring_alloc_input * req,struct bnxt_rx_ring_info * rxr,struct bnxt_ring_struct * ring)7092 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7093 struct hwrm_ring_alloc_input *req,
7094 struct bnxt_rx_ring_info *rxr,
7095 struct bnxt_ring_struct *ring)
7096 {
7097 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7098 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7099 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7100
7101 if (ring_type == HWRM_RING_ALLOC_AGG) {
7102 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7103 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7104 req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
7105 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7106 } else {
7107 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7108 if (NET_IP_ALIGN == 2)
7109 req->flags =
7110 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7111 }
7112 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7113 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7114 req->enables |= cpu_to_le32(enables);
7115 }
7116
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)7117 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7118 struct bnxt_rx_ring_info *rxr,
7119 struct bnxt_ring_struct *ring,
7120 u32 ring_type, u32 map_index)
7121 {
7122 struct hwrm_ring_alloc_output *resp;
7123 struct hwrm_ring_alloc_input *req;
7124 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7125 struct bnxt_ring_grp_info *grp_info;
7126 int rc, err = 0;
7127 u16 ring_id;
7128
7129 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7130 if (rc)
7131 goto exit;
7132
7133 req->enables = 0;
7134 if (rmem->nr_pages > 1) {
7135 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7136 /* Page size is in log2 units */
7137 req->page_size = BNXT_PAGE_SHIFT;
7138 req->page_tbl_depth = 1;
7139 } else {
7140 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
7141 }
7142 req->fbo = 0;
7143 /* Association of ring index with doorbell index and MSIX number */
7144 req->logical_id = cpu_to_le16(map_index);
7145
7146 switch (ring_type) {
7147 case HWRM_RING_ALLOC_TX: {
7148 struct bnxt_tx_ring_info *txr;
7149 u16 flags = 0;
7150
7151 txr = container_of(ring, struct bnxt_tx_ring_info,
7152 tx_ring_struct);
7153 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7154 /* Association of transmit ring with completion ring */
7155 grp_info = &bp->grp_info[ring->grp_idx];
7156 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7157 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7158 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7159 req->queue_id = cpu_to_le16(ring->queue_id);
7160 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7161 req->cmpl_coal_cnt =
7162 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7163 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7164 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7165 req->flags = cpu_to_le16(flags);
7166 break;
7167 }
7168 case HWRM_RING_ALLOC_RX:
7169 case HWRM_RING_ALLOC_AGG:
7170 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7171 req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7172 cpu_to_le32(bp->rx_ring_mask + 1) :
7173 cpu_to_le32(bp->rx_agg_ring_mask + 1);
7174 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7175 bnxt_set_rx_ring_params_p5(bp, ring_type, req,
7176 rxr, ring);
7177 break;
7178 case HWRM_RING_ALLOC_CMPL:
7179 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7180 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7181 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7182 /* Association of cp ring with nq */
7183 grp_info = &bp->grp_info[map_index];
7184 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7185 req->cq_handle = cpu_to_le64(ring->handle);
7186 req->enables |= cpu_to_le32(
7187 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7188 } else {
7189 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7190 }
7191 break;
7192 case HWRM_RING_ALLOC_NQ:
7193 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7194 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7195 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7196 break;
7197 default:
7198 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7199 ring_type);
7200 return -EINVAL;
7201 }
7202
7203 resp = hwrm_req_hold(bp, req);
7204 rc = hwrm_req_send(bp, req);
7205 err = le16_to_cpu(resp->error_code);
7206 ring_id = le16_to_cpu(resp->ring_id);
7207 hwrm_req_drop(bp, req);
7208
7209 exit:
7210 if (rc || err) {
7211 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7212 ring_type, rc, err);
7213 return -EIO;
7214 }
7215 ring->fw_ring_id = ring_id;
7216 return rc;
7217 }
7218
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7219 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7220 {
7221 int rc;
7222
7223 if (BNXT_PF(bp)) {
7224 struct hwrm_func_cfg_input *req;
7225
7226 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7227 if (rc)
7228 return rc;
7229
7230 req->fid = cpu_to_le16(0xffff);
7231 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7232 req->async_event_cr = cpu_to_le16(idx);
7233 return hwrm_req_send(bp, req);
7234 } else {
7235 struct hwrm_func_vf_cfg_input *req;
7236
7237 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7238 if (rc)
7239 return rc;
7240
7241 req->enables =
7242 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7243 req->async_event_cr = cpu_to_le16(idx);
7244 return hwrm_req_send(bp, req);
7245 }
7246 }
7247
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7248 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7249 u32 ring_type)
7250 {
7251 switch (ring_type) {
7252 case HWRM_RING_ALLOC_TX:
7253 db->db_ring_mask = bp->tx_ring_mask;
7254 break;
7255 case HWRM_RING_ALLOC_RX:
7256 db->db_ring_mask = bp->rx_ring_mask;
7257 break;
7258 case HWRM_RING_ALLOC_AGG:
7259 db->db_ring_mask = bp->rx_agg_ring_mask;
7260 break;
7261 case HWRM_RING_ALLOC_CMPL:
7262 case HWRM_RING_ALLOC_NQ:
7263 db->db_ring_mask = bp->cp_ring_mask;
7264 break;
7265 }
7266 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7267 db->db_epoch_mask = db->db_ring_mask + 1;
7268 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7269 }
7270 }
7271
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7272 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7273 u32 map_idx, u32 xid)
7274 {
7275 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7276 switch (ring_type) {
7277 case HWRM_RING_ALLOC_TX:
7278 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7279 break;
7280 case HWRM_RING_ALLOC_RX:
7281 case HWRM_RING_ALLOC_AGG:
7282 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7283 break;
7284 case HWRM_RING_ALLOC_CMPL:
7285 db->db_key64 = DBR_PATH_L2;
7286 break;
7287 case HWRM_RING_ALLOC_NQ:
7288 db->db_key64 = DBR_PATH_L2;
7289 break;
7290 }
7291 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7292
7293 if (bp->flags & BNXT_FLAG_CHIP_P7)
7294 db->db_key64 |= DBR_VALID;
7295
7296 db->doorbell = bp->bar1 + bp->db_offset;
7297 } else {
7298 db->doorbell = bp->bar1 + map_idx * 0x80;
7299 switch (ring_type) {
7300 case HWRM_RING_ALLOC_TX:
7301 db->db_key32 = DB_KEY_TX;
7302 break;
7303 case HWRM_RING_ALLOC_RX:
7304 case HWRM_RING_ALLOC_AGG:
7305 db->db_key32 = DB_KEY_RX;
7306 break;
7307 case HWRM_RING_ALLOC_CMPL:
7308 db->db_key32 = DB_KEY_CP;
7309 break;
7310 }
7311 }
7312 bnxt_set_db_mask(bp, db, ring_type);
7313 }
7314
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7315 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7316 struct bnxt_rx_ring_info *rxr)
7317 {
7318 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7319 struct bnxt_napi *bnapi = rxr->bnapi;
7320 u32 type = HWRM_RING_ALLOC_RX;
7321 u32 map_idx = bnapi->index;
7322 int rc;
7323
7324 rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7325 if (rc)
7326 return rc;
7327
7328 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7329 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7330
7331 return 0;
7332 }
7333
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7334 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7335 struct bnxt_rx_ring_info *rxr)
7336 {
7337 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7338 u32 type = HWRM_RING_ALLOC_AGG;
7339 u32 grp_idx = ring->grp_idx;
7340 u32 map_idx;
7341 int rc;
7342
7343 map_idx = grp_idx + bp->rx_nr_rings;
7344 rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7345 if (rc)
7346 return rc;
7347
7348 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7349 ring->fw_ring_id);
7350 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7351 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7352 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7353
7354 return 0;
7355 }
7356
bnxt_hwrm_cp_ring_alloc_p5(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7357 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7358 struct bnxt_cp_ring_info *cpr)
7359 {
7360 const u32 type = HWRM_RING_ALLOC_CMPL;
7361 struct bnxt_napi *bnapi = cpr->bnapi;
7362 struct bnxt_ring_struct *ring;
7363 u32 map_idx = bnapi->index;
7364 int rc;
7365
7366 ring = &cpr->cp_ring_struct;
7367 ring->handle = BNXT_SET_NQ_HDL(cpr);
7368 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7369 if (rc)
7370 return rc;
7371 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7372 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7373 return 0;
7374 }
7375
bnxt_hwrm_tx_ring_alloc(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u32 tx_idx)7376 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7377 struct bnxt_tx_ring_info *txr, u32 tx_idx)
7378 {
7379 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7380 const u32 type = HWRM_RING_ALLOC_TX;
7381 int rc;
7382
7383 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
7384 if (rc)
7385 return rc;
7386 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7387 return 0;
7388 }
7389
bnxt_hwrm_ring_alloc(struct bnxt * bp)7390 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7391 {
7392 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7393 int i, rc = 0;
7394 u32 type;
7395
7396 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7397 type = HWRM_RING_ALLOC_NQ;
7398 else
7399 type = HWRM_RING_ALLOC_CMPL;
7400 for (i = 0; i < bp->cp_nr_rings; i++) {
7401 struct bnxt_napi *bnapi = bp->bnapi[i];
7402 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7403 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7404 u32 map_idx = ring->map_idx;
7405 unsigned int vector;
7406
7407 vector = bp->irq_tbl[map_idx].vector;
7408 disable_irq_nosync(vector);
7409 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7410 if (rc) {
7411 enable_irq(vector);
7412 goto err_out;
7413 }
7414 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7415 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7416 enable_irq(vector);
7417 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7418
7419 if (!i) {
7420 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7421 if (rc)
7422 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7423 }
7424 }
7425
7426 for (i = 0; i < bp->tx_nr_rings; i++) {
7427 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7428
7429 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7430 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7431 if (rc)
7432 goto err_out;
7433 }
7434 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7435 if (rc)
7436 goto err_out;
7437 }
7438
7439 for (i = 0; i < bp->rx_nr_rings; i++) {
7440 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7441
7442 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7443 if (rc)
7444 goto err_out;
7445 /* If we have agg rings, post agg buffers first. */
7446 if (!agg_rings)
7447 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7448 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7449 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7450 if (rc)
7451 goto err_out;
7452 }
7453 }
7454
7455 if (agg_rings) {
7456 for (i = 0; i < bp->rx_nr_rings; i++) {
7457 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7458 if (rc)
7459 goto err_out;
7460 }
7461 }
7462 err_out:
7463 return rc;
7464 }
7465
bnxt_cancel_dim(struct bnxt * bp)7466 static void bnxt_cancel_dim(struct bnxt *bp)
7467 {
7468 int i;
7469
7470 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7471 * if NAPI is enabled.
7472 */
7473 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7474 return;
7475
7476 /* Make sure NAPI sees that the VNIC is disabled */
7477 synchronize_net();
7478 for (i = 0; i < bp->rx_nr_rings; i++) {
7479 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7480 struct bnxt_napi *bnapi = rxr->bnapi;
7481
7482 cancel_work_sync(&bnapi->cp_ring.dim.work);
7483 }
7484 }
7485
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7486 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7487 struct bnxt_ring_struct *ring,
7488 u32 ring_type, int cmpl_ring_id)
7489 {
7490 struct hwrm_ring_free_output *resp;
7491 struct hwrm_ring_free_input *req;
7492 u16 error_code = 0;
7493 int rc;
7494
7495 if (BNXT_NO_FW_ACCESS(bp))
7496 return 0;
7497
7498 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7499 if (rc)
7500 goto exit;
7501
7502 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7503 req->ring_type = ring_type;
7504 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7505
7506 resp = hwrm_req_hold(bp, req);
7507 rc = hwrm_req_send(bp, req);
7508 error_code = le16_to_cpu(resp->error_code);
7509 hwrm_req_drop(bp, req);
7510 exit:
7511 if (rc || error_code) {
7512 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7513 ring_type, rc, error_code);
7514 return -EIO;
7515 }
7516 return 0;
7517 }
7518
bnxt_hwrm_tx_ring_free(struct bnxt * bp,struct bnxt_tx_ring_info * txr,bool close_path)7519 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7520 struct bnxt_tx_ring_info *txr,
7521 bool close_path)
7522 {
7523 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7524 u32 cmpl_ring_id;
7525
7526 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7527 return;
7528
7529 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7530 INVALID_HW_RING_ID;
7531 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7532 cmpl_ring_id);
7533 ring->fw_ring_id = INVALID_HW_RING_ID;
7534 }
7535
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7536 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7537 struct bnxt_rx_ring_info *rxr,
7538 bool close_path)
7539 {
7540 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7541 u32 grp_idx = rxr->bnapi->index;
7542 u32 cmpl_ring_id;
7543
7544 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7545 return;
7546
7547 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7548 hwrm_ring_free_send_msg(bp, ring,
7549 RING_FREE_REQ_RING_TYPE_RX,
7550 close_path ? cmpl_ring_id :
7551 INVALID_HW_RING_ID);
7552 ring->fw_ring_id = INVALID_HW_RING_ID;
7553 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7554 }
7555
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7556 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7557 struct bnxt_rx_ring_info *rxr,
7558 bool close_path)
7559 {
7560 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7561 u32 grp_idx = rxr->bnapi->index;
7562 u32 type, cmpl_ring_id;
7563
7564 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7565 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7566 else
7567 type = RING_FREE_REQ_RING_TYPE_RX;
7568
7569 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7570 return;
7571
7572 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7573 hwrm_ring_free_send_msg(bp, ring, type,
7574 close_path ? cmpl_ring_id :
7575 INVALID_HW_RING_ID);
7576 ring->fw_ring_id = INVALID_HW_RING_ID;
7577 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7578 }
7579
bnxt_hwrm_cp_ring_free(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7580 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7581 struct bnxt_cp_ring_info *cpr)
7582 {
7583 struct bnxt_ring_struct *ring;
7584
7585 ring = &cpr->cp_ring_struct;
7586 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7587 return;
7588
7589 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7590 INVALID_HW_RING_ID);
7591 ring->fw_ring_id = INVALID_HW_RING_ID;
7592 }
7593
bnxt_clear_one_cp_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7594 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7595 {
7596 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7597 int i, size = ring->ring_mem.page_size;
7598
7599 cpr->cp_raw_cons = 0;
7600 cpr->toggle = 0;
7601
7602 for (i = 0; i < bp->cp_nr_pages; i++)
7603 if (cpr->cp_desc_ring[i])
7604 memset(cpr->cp_desc_ring[i], 0, size);
7605 }
7606
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7607 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7608 {
7609 u32 type;
7610 int i;
7611
7612 if (!bp->bnapi)
7613 return;
7614
7615 for (i = 0; i < bp->tx_nr_rings; i++)
7616 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7617
7618 bnxt_cancel_dim(bp);
7619 for (i = 0; i < bp->rx_nr_rings; i++) {
7620 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7621 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7622 }
7623
7624 /* The completion rings are about to be freed. After that the
7625 * IRQ doorbell will not work anymore. So we need to disable
7626 * IRQ here.
7627 */
7628 bnxt_disable_int_sync(bp);
7629
7630 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7631 type = RING_FREE_REQ_RING_TYPE_NQ;
7632 else
7633 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7634 for (i = 0; i < bp->cp_nr_rings; i++) {
7635 struct bnxt_napi *bnapi = bp->bnapi[i];
7636 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7637 struct bnxt_ring_struct *ring;
7638 int j;
7639
7640 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7641 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7642
7643 ring = &cpr->cp_ring_struct;
7644 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7645 hwrm_ring_free_send_msg(bp, ring, type,
7646 INVALID_HW_RING_ID);
7647 ring->fw_ring_id = INVALID_HW_RING_ID;
7648 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7649 }
7650 }
7651 }
7652
7653 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7654 bool shared);
7655 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7656 bool shared);
7657
bnxt_hwrm_get_rings(struct bnxt * bp)7658 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7659 {
7660 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7661 struct hwrm_func_qcfg_output *resp;
7662 struct hwrm_func_qcfg_input *req;
7663 int rc;
7664
7665 if (bp->hwrm_spec_code < 0x10601)
7666 return 0;
7667
7668 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7669 if (rc)
7670 return rc;
7671
7672 req->fid = cpu_to_le16(0xffff);
7673 resp = hwrm_req_hold(bp, req);
7674 rc = hwrm_req_send(bp, req);
7675 if (rc) {
7676 hwrm_req_drop(bp, req);
7677 return rc;
7678 }
7679
7680 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7681 if (BNXT_NEW_RM(bp)) {
7682 u16 cp, stats;
7683
7684 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7685 hw_resc->resv_hw_ring_grps =
7686 le32_to_cpu(resp->alloc_hw_ring_grps);
7687 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7688 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7689 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7690 stats = le16_to_cpu(resp->alloc_stat_ctx);
7691 hw_resc->resv_irqs = cp;
7692 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7693 int rx = hw_resc->resv_rx_rings;
7694 int tx = hw_resc->resv_tx_rings;
7695
7696 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7697 rx >>= 1;
7698 if (cp < (rx + tx)) {
7699 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7700 if (rc)
7701 goto get_rings_exit;
7702 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7703 rx <<= 1;
7704 hw_resc->resv_rx_rings = rx;
7705 hw_resc->resv_tx_rings = tx;
7706 }
7707 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7708 hw_resc->resv_hw_ring_grps = rx;
7709 }
7710 hw_resc->resv_cp_rings = cp;
7711 hw_resc->resv_stat_ctxs = stats;
7712 }
7713 get_rings_exit:
7714 hwrm_req_drop(bp, req);
7715 return rc;
7716 }
7717
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7718 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7719 {
7720 struct hwrm_func_qcfg_output *resp;
7721 struct hwrm_func_qcfg_input *req;
7722 int rc;
7723
7724 if (bp->hwrm_spec_code < 0x10601)
7725 return 0;
7726
7727 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7728 if (rc)
7729 return rc;
7730
7731 req->fid = cpu_to_le16(fid);
7732 resp = hwrm_req_hold(bp, req);
7733 rc = hwrm_req_send(bp, req);
7734 if (!rc)
7735 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7736
7737 hwrm_req_drop(bp, req);
7738 return rc;
7739 }
7740
7741 static bool bnxt_rfs_supported(struct bnxt *bp);
7742
7743 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7744 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7745 {
7746 struct hwrm_func_cfg_input *req;
7747 u32 enables = 0;
7748
7749 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7750 return NULL;
7751
7752 req->fid = cpu_to_le16(0xffff);
7753 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7754 req->num_tx_rings = cpu_to_le16(hwr->tx);
7755 if (BNXT_NEW_RM(bp)) {
7756 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7757 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7758 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7759 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7760 enables |= hwr->cp_p5 ?
7761 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7762 } else {
7763 enables |= hwr->cp ?
7764 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7765 enables |= hwr->grp ?
7766 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7767 }
7768 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7769 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7770 0;
7771 req->num_rx_rings = cpu_to_le16(hwr->rx);
7772 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7773 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7774 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7775 req->num_msix = cpu_to_le16(hwr->cp);
7776 } else {
7777 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7778 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7779 }
7780 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7781 req->num_vnics = cpu_to_le16(hwr->vnic);
7782 }
7783 req->enables = cpu_to_le32(enables);
7784 return req;
7785 }
7786
7787 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7788 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7789 {
7790 struct hwrm_func_vf_cfg_input *req;
7791 u32 enables = 0;
7792
7793 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7794 return NULL;
7795
7796 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7797 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7798 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7799 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7800 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7801 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7802 enables |= hwr->cp_p5 ?
7803 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7804 } else {
7805 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7806 enables |= hwr->grp ?
7807 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7808 }
7809 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7810 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7811
7812 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7813 req->num_tx_rings = cpu_to_le16(hwr->tx);
7814 req->num_rx_rings = cpu_to_le16(hwr->rx);
7815 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7816 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7817 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7818 } else {
7819 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7820 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7821 }
7822 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7823 req->num_vnics = cpu_to_le16(hwr->vnic);
7824
7825 req->enables = cpu_to_le32(enables);
7826 return req;
7827 }
7828
7829 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7830 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7831 {
7832 struct hwrm_func_cfg_input *req;
7833 int rc;
7834
7835 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7836 if (!req)
7837 return -ENOMEM;
7838
7839 if (!req->enables) {
7840 hwrm_req_drop(bp, req);
7841 return 0;
7842 }
7843
7844 rc = hwrm_req_send(bp, req);
7845 if (rc)
7846 return rc;
7847
7848 if (bp->hwrm_spec_code < 0x10601)
7849 bp->hw_resc.resv_tx_rings = hwr->tx;
7850
7851 return bnxt_hwrm_get_rings(bp);
7852 }
7853
7854 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7855 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7856 {
7857 struct hwrm_func_vf_cfg_input *req;
7858 int rc;
7859
7860 if (!BNXT_NEW_RM(bp)) {
7861 bp->hw_resc.resv_tx_rings = hwr->tx;
7862 return 0;
7863 }
7864
7865 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7866 if (!req)
7867 return -ENOMEM;
7868
7869 rc = hwrm_req_send(bp, req);
7870 if (rc)
7871 return rc;
7872
7873 return bnxt_hwrm_get_rings(bp);
7874 }
7875
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7876 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7877 {
7878 if (BNXT_PF(bp))
7879 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7880 else
7881 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7882 }
7883
bnxt_nq_rings_in_use(struct bnxt * bp)7884 int bnxt_nq_rings_in_use(struct bnxt *bp)
7885 {
7886 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7887 }
7888
bnxt_cp_rings_in_use(struct bnxt * bp)7889 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7890 {
7891 int cp;
7892
7893 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7894 return bnxt_nq_rings_in_use(bp);
7895
7896 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7897 return cp;
7898 }
7899
bnxt_get_func_stat_ctxs(struct bnxt * bp)7900 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7901 {
7902 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7903 }
7904
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7905 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7906 {
7907 if (!hwr->grp)
7908 return 0;
7909 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7910 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7911
7912 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7913 rss_ctx *= hwr->vnic;
7914 return rss_ctx;
7915 }
7916 if (BNXT_VF(bp))
7917 return BNXT_VF_MAX_RSS_CTX;
7918 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7919 return hwr->grp + 1;
7920 return 1;
7921 }
7922
7923 /* Check if a default RSS map needs to be setup. This function is only
7924 * used on older firmware that does not require reserving RX rings.
7925 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)7926 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7927 {
7928 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7929
7930 /* The RSS map is valid for RX rings set to resv_rx_rings */
7931 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7932 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7933 if (!netif_is_rxfh_configured(bp->dev))
7934 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7935 }
7936 }
7937
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)7938 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7939 {
7940 if (bp->flags & BNXT_FLAG_RFS) {
7941 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7942 return 2 + bp->num_rss_ctx;
7943 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7944 return rx_rings + 1;
7945 }
7946 return 1;
7947 }
7948
bnxt_get_total_resources(struct bnxt * bp,struct bnxt_hw_rings * hwr)7949 static void bnxt_get_total_resources(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7950 {
7951 hwr->cp = bnxt_nq_rings_in_use(bp);
7952 hwr->cp_p5 = 0;
7953 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7954 hwr->cp_p5 = bnxt_cp_rings_in_use(bp);
7955 hwr->tx = bp->tx_nr_rings;
7956 hwr->rx = bp->rx_nr_rings;
7957 hwr->grp = hwr->rx;
7958 hwr->vnic = bnxt_get_total_vnics(bp, hwr->rx);
7959 hwr->rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr);
7960 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7961 hwr->rx <<= 1;
7962 hwr->stat = bnxt_get_func_stat_ctxs(bp);
7963 }
7964
bnxt_need_reserve_rings(struct bnxt * bp)7965 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7966 {
7967 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7968 struct bnxt_hw_rings hwr;
7969
7970 bnxt_get_total_resources(bp, &hwr);
7971
7972 /* Old firmware does not need RX ring reservations but we still
7973 * need to setup a default RSS map when needed. With new firmware
7974 * we go through RX ring reservations first and then set up the
7975 * RSS map for the successfully reserved RX rings when needed.
7976 */
7977 if (!BNXT_NEW_RM(bp))
7978 bnxt_check_rss_tbl_no_rmgr(bp);
7979
7980 if (hw_resc->resv_tx_rings != hwr.tx && bp->hwrm_spec_code >= 0x10601)
7981 return true;
7982
7983 if (!BNXT_NEW_RM(bp))
7984 return false;
7985
7986 if (hw_resc->resv_rx_rings != hwr.rx ||
7987 hw_resc->resv_vnics != hwr.vnic ||
7988 hw_resc->resv_stat_ctxs != hwr.stat ||
7989 hw_resc->resv_rsscos_ctxs != hwr.rss_ctx ||
7990 (hw_resc->resv_hw_ring_grps != hwr.grp &&
7991 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7992 return true;
7993 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7994 if (hw_resc->resv_cp_rings != hwr.cp_p5)
7995 return true;
7996 } else if (hw_resc->resv_cp_rings != hwr.cp) {
7997 return true;
7998 }
7999 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
8000 hw_resc->resv_irqs != hwr.cp)
8001 return true;
8002 return false;
8003 }
8004
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8005 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8006 {
8007 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8008
8009 hwr->tx = hw_resc->resv_tx_rings;
8010 if (BNXT_NEW_RM(bp)) {
8011 hwr->rx = hw_resc->resv_rx_rings;
8012 hwr->cp = hw_resc->resv_irqs;
8013 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8014 hwr->cp_p5 = hw_resc->resv_cp_rings;
8015 hwr->grp = hw_resc->resv_hw_ring_grps;
8016 hwr->vnic = hw_resc->resv_vnics;
8017 hwr->stat = hw_resc->resv_stat_ctxs;
8018 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
8019 }
8020 }
8021
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)8022 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8023 {
8024 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
8025 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
8026 }
8027
8028 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
8029
__bnxt_reserve_rings(struct bnxt * bp)8030 static int __bnxt_reserve_rings(struct bnxt *bp)
8031 {
8032 struct bnxt_hw_rings hwr = {0};
8033 int rx_rings, old_rx_rings, rc;
8034 int cp = bp->cp_nr_rings;
8035 int ulp_msix = 0;
8036 bool sh = false;
8037 int tx_cp;
8038
8039 if (!bnxt_need_reserve_rings(bp))
8040 return 0;
8041
8042 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
8043 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
8044 if (!ulp_msix)
8045 bnxt_set_ulp_stat_ctxs(bp, 0);
8046
8047 if (ulp_msix > bp->ulp_num_msix_want)
8048 ulp_msix = bp->ulp_num_msix_want;
8049 hwr.cp = cp + ulp_msix;
8050 } else {
8051 hwr.cp = bnxt_nq_rings_in_use(bp);
8052 }
8053
8054 hwr.tx = bp->tx_nr_rings;
8055 hwr.rx = bp->rx_nr_rings;
8056 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8057 sh = true;
8058 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8059 hwr.cp_p5 = hwr.rx + hwr.tx;
8060
8061 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
8062
8063 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8064 hwr.rx <<= 1;
8065 hwr.grp = bp->rx_nr_rings;
8066 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8067 hwr.stat = bnxt_get_func_stat_ctxs(bp);
8068 old_rx_rings = bp->hw_resc.resv_rx_rings;
8069
8070 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8071 if (rc)
8072 return rc;
8073
8074 bnxt_copy_reserved_rings(bp, &hwr);
8075
8076 rx_rings = hwr.rx;
8077 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8078 if (hwr.rx >= 2) {
8079 rx_rings = hwr.rx >> 1;
8080 } else {
8081 if (netif_running(bp->dev))
8082 return -ENOMEM;
8083
8084 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8085 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8086 bp->dev->hw_features &= ~NETIF_F_LRO;
8087 bp->dev->features &= ~NETIF_F_LRO;
8088 bnxt_set_ring_params(bp);
8089 }
8090 }
8091 rx_rings = min_t(int, rx_rings, hwr.grp);
8092 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8093 if (bnxt_ulp_registered(bp->edev) &&
8094 hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8095 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8096 hwr.cp = min_t(int, hwr.cp, hwr.stat);
8097 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8098 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8099 hwr.rx = rx_rings << 1;
8100 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8101 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8102 if (hwr.tx != bp->tx_nr_rings) {
8103 netdev_warn(bp->dev,
8104 "Able to reserve only %d out of %d requested TX rings\n",
8105 hwr.tx, bp->tx_nr_rings);
8106 }
8107 bp->tx_nr_rings = hwr.tx;
8108
8109 /* If we cannot reserve all the RX rings, reset the RSS map only
8110 * if absolutely necessary
8111 */
8112 if (rx_rings != bp->rx_nr_rings) {
8113 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8114 rx_rings, bp->rx_nr_rings);
8115 if (netif_is_rxfh_configured(bp->dev) &&
8116 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8117 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8118 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8119 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
8120 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
8121 }
8122 }
8123 bp->rx_nr_rings = rx_rings;
8124 bp->cp_nr_rings = hwr.cp;
8125
8126 /* Fall back if we cannot reserve enough HW RSS contexts */
8127 if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
8128 hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
8129 bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
8130
8131 if (!bnxt_rings_ok(bp, &hwr))
8132 return -ENOMEM;
8133
8134 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8135 !netif_is_rxfh_configured(bp->dev))
8136 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8137
8138 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8139 int resv_msix, resv_ctx, ulp_ctxs;
8140 struct bnxt_hw_resc *hw_resc;
8141
8142 hw_resc = &bp->hw_resc;
8143 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8144 ulp_msix = min_t(int, resv_msix, ulp_msix);
8145 bnxt_set_ulp_msix_num(bp, ulp_msix);
8146 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
8147 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8148 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8149 }
8150
8151 return rc;
8152 }
8153
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8154 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8155 {
8156 struct hwrm_func_vf_cfg_input *req;
8157 u32 flags;
8158
8159 if (!BNXT_NEW_RM(bp))
8160 return 0;
8161
8162 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8163 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8164 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8165 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8166 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8167 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8168 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8169 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8170 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8171
8172 req->flags = cpu_to_le32(flags);
8173 return hwrm_req_send_silent(bp, req);
8174 }
8175
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8176 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8177 {
8178 struct hwrm_func_cfg_input *req;
8179 u32 flags;
8180
8181 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8182 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8183 if (BNXT_NEW_RM(bp)) {
8184 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8185 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8186 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8187 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8188 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8189 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8190 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8191 else
8192 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8193 }
8194
8195 req->flags = cpu_to_le32(flags);
8196 return hwrm_req_send_silent(bp, req);
8197 }
8198
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8199 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8200 {
8201 if (bp->hwrm_spec_code < 0x10801)
8202 return 0;
8203
8204 if (BNXT_PF(bp))
8205 return bnxt_hwrm_check_pf_rings(bp, hwr);
8206
8207 return bnxt_hwrm_check_vf_rings(bp, hwr);
8208 }
8209
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8210 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8211 {
8212 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8213 struct hwrm_ring_aggint_qcaps_output *resp;
8214 struct hwrm_ring_aggint_qcaps_input *req;
8215 int rc;
8216
8217 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8218 coal_cap->num_cmpl_dma_aggr_max = 63;
8219 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8220 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8221 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8222 coal_cap->int_lat_tmr_min_max = 65535;
8223 coal_cap->int_lat_tmr_max_max = 65535;
8224 coal_cap->num_cmpl_aggr_int_max = 65535;
8225 coal_cap->timer_units = 80;
8226
8227 if (bp->hwrm_spec_code < 0x10902)
8228 return;
8229
8230 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8231 return;
8232
8233 resp = hwrm_req_hold(bp, req);
8234 rc = hwrm_req_send_silent(bp, req);
8235 if (!rc) {
8236 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8237 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8238 coal_cap->num_cmpl_dma_aggr_max =
8239 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8240 coal_cap->num_cmpl_dma_aggr_during_int_max =
8241 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8242 coal_cap->cmpl_aggr_dma_tmr_max =
8243 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8244 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8245 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8246 coal_cap->int_lat_tmr_min_max =
8247 le16_to_cpu(resp->int_lat_tmr_min_max);
8248 coal_cap->int_lat_tmr_max_max =
8249 le16_to_cpu(resp->int_lat_tmr_max_max);
8250 coal_cap->num_cmpl_aggr_int_max =
8251 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8252 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8253 }
8254 hwrm_req_drop(bp, req);
8255 }
8256
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8257 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8258 {
8259 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8260
8261 return usec * 1000 / coal_cap->timer_units;
8262 }
8263
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8264 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8265 struct bnxt_coal *hw_coal,
8266 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8267 {
8268 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8269 u16 val, tmr, max, flags = hw_coal->flags;
8270 u32 cmpl_params = coal_cap->cmpl_params;
8271
8272 max = hw_coal->bufs_per_record * 128;
8273 if (hw_coal->budget)
8274 max = hw_coal->bufs_per_record * hw_coal->budget;
8275 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8276
8277 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8278 req->num_cmpl_aggr_int = cpu_to_le16(val);
8279
8280 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8281 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8282
8283 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8284 coal_cap->num_cmpl_dma_aggr_during_int_max);
8285 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8286
8287 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8288 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8289 req->int_lat_tmr_max = cpu_to_le16(tmr);
8290
8291 /* min timer set to 1/2 of interrupt timer */
8292 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8293 val = tmr / 2;
8294 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8295 req->int_lat_tmr_min = cpu_to_le16(val);
8296 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8297 }
8298
8299 /* buf timer set to 1/4 of interrupt timer */
8300 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8301 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8302
8303 if (cmpl_params &
8304 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8305 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8306 val = clamp_t(u16, tmr, 1,
8307 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8308 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8309 req->enables |=
8310 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8311 }
8312
8313 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8314 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8315 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8316 req->flags = cpu_to_le16(flags);
8317 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8318 }
8319
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8320 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8321 struct bnxt_coal *hw_coal)
8322 {
8323 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8324 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8325 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8326 u32 nq_params = coal_cap->nq_params;
8327 u16 tmr;
8328 int rc;
8329
8330 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8331 return 0;
8332
8333 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8334 if (rc)
8335 return rc;
8336
8337 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8338 req->flags =
8339 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8340
8341 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8342 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8343 req->int_lat_tmr_min = cpu_to_le16(tmr);
8344 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8345 return hwrm_req_send(bp, req);
8346 }
8347
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8348 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8349 {
8350 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8351 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8352 struct bnxt_coal coal;
8353 int rc;
8354
8355 /* Tick values in micro seconds.
8356 * 1 coal_buf x bufs_per_record = 1 completion record.
8357 */
8358 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8359
8360 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8361 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8362
8363 if (!bnapi->rx_ring)
8364 return -ENODEV;
8365
8366 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8367 if (rc)
8368 return rc;
8369
8370 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8371
8372 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8373
8374 return hwrm_req_send(bp, req_rx);
8375 }
8376
8377 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8378 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8379 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8380 {
8381 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8382
8383 req->ring_id = cpu_to_le16(ring_id);
8384 return hwrm_req_send(bp, req);
8385 }
8386
8387 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8388 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8389 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8390 {
8391 struct bnxt_tx_ring_info *txr;
8392 int i, rc;
8393
8394 bnxt_for_each_napi_tx(i, bnapi, txr) {
8395 u16 ring_id;
8396
8397 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8398 req->ring_id = cpu_to_le16(ring_id);
8399 rc = hwrm_req_send(bp, req);
8400 if (rc)
8401 return rc;
8402 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8403 return 0;
8404 }
8405 return 0;
8406 }
8407
bnxt_hwrm_set_coal(struct bnxt * bp)8408 int bnxt_hwrm_set_coal(struct bnxt *bp)
8409 {
8410 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8411 int i, rc;
8412
8413 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8414 if (rc)
8415 return rc;
8416
8417 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8418 if (rc) {
8419 hwrm_req_drop(bp, req_rx);
8420 return rc;
8421 }
8422
8423 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8424 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8425
8426 hwrm_req_hold(bp, req_rx);
8427 hwrm_req_hold(bp, req_tx);
8428 for (i = 0; i < bp->cp_nr_rings; i++) {
8429 struct bnxt_napi *bnapi = bp->bnapi[i];
8430 struct bnxt_coal *hw_coal;
8431
8432 if (!bnapi->rx_ring)
8433 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8434 else
8435 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8436 if (rc)
8437 break;
8438
8439 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8440 continue;
8441
8442 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8443 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8444 if (rc)
8445 break;
8446 }
8447 if (bnapi->rx_ring)
8448 hw_coal = &bp->rx_coal;
8449 else
8450 hw_coal = &bp->tx_coal;
8451 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8452 }
8453 hwrm_req_drop(bp, req_rx);
8454 hwrm_req_drop(bp, req_tx);
8455 return rc;
8456 }
8457
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8458 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8459 {
8460 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8461 struct hwrm_stat_ctx_free_input *req;
8462 int i;
8463
8464 if (!bp->bnapi)
8465 return;
8466
8467 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8468 return;
8469
8470 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8471 return;
8472 if (BNXT_FW_MAJ(bp) <= 20) {
8473 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8474 hwrm_req_drop(bp, req);
8475 return;
8476 }
8477 hwrm_req_hold(bp, req0);
8478 }
8479 hwrm_req_hold(bp, req);
8480 for (i = 0; i < bp->cp_nr_rings; i++) {
8481 struct bnxt_napi *bnapi = bp->bnapi[i];
8482 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8483
8484 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8485 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8486 if (req0) {
8487 req0->stat_ctx_id = req->stat_ctx_id;
8488 hwrm_req_send(bp, req0);
8489 }
8490 hwrm_req_send(bp, req);
8491
8492 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8493 }
8494 }
8495 hwrm_req_drop(bp, req);
8496 if (req0)
8497 hwrm_req_drop(bp, req0);
8498 }
8499
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8500 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8501 {
8502 struct hwrm_stat_ctx_alloc_output *resp;
8503 struct hwrm_stat_ctx_alloc_input *req;
8504 int rc, i;
8505
8506 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8507 return 0;
8508
8509 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8510 if (rc)
8511 return rc;
8512
8513 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8514 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8515
8516 resp = hwrm_req_hold(bp, req);
8517 for (i = 0; i < bp->cp_nr_rings; i++) {
8518 struct bnxt_napi *bnapi = bp->bnapi[i];
8519 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8520
8521 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8522
8523 rc = hwrm_req_send(bp, req);
8524 if (rc)
8525 break;
8526
8527 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8528
8529 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8530 }
8531 hwrm_req_drop(bp, req);
8532 return rc;
8533 }
8534
bnxt_hwrm_func_qcfg(struct bnxt * bp)8535 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8536 {
8537 struct hwrm_func_qcfg_output *resp;
8538 struct hwrm_func_qcfg_input *req;
8539 u16 flags;
8540 int rc;
8541
8542 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8543 if (rc)
8544 return rc;
8545
8546 req->fid = cpu_to_le16(0xffff);
8547 resp = hwrm_req_hold(bp, req);
8548 rc = hwrm_req_send(bp, req);
8549 if (rc)
8550 goto func_qcfg_exit;
8551
8552 flags = le16_to_cpu(resp->flags);
8553 #ifdef CONFIG_BNXT_SRIOV
8554 if (BNXT_VF(bp)) {
8555 struct bnxt_vf_info *vf = &bp->vf;
8556
8557 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8558 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8559 vf->flags |= BNXT_VF_TRUST;
8560 else
8561 vf->flags &= ~BNXT_VF_TRUST;
8562 } else {
8563 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8564 }
8565 #endif
8566 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8567 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8568 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8569 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8570 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8571 }
8572 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8573 bp->flags |= BNXT_FLAG_MULTI_HOST;
8574
8575 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8576 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8577
8578 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8579 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8580 if (resp->roce_bidi_opt_mode &
8581 FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
8582 bp->cos0_cos1_shared = 1;
8583 else
8584 bp->cos0_cos1_shared = 0;
8585
8586 switch (resp->port_partition_type) {
8587 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8588 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8589 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8590 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8591 bp->port_partition_type = resp->port_partition_type;
8592 break;
8593 }
8594 if (bp->hwrm_spec_code < 0x10707 ||
8595 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8596 bp->br_mode = BRIDGE_MODE_VEB;
8597 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8598 bp->br_mode = BRIDGE_MODE_VEPA;
8599 else
8600 bp->br_mode = BRIDGE_MODE_UNDEF;
8601
8602 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8603 if (!bp->max_mtu)
8604 bp->max_mtu = BNXT_MAX_MTU;
8605
8606 if (bp->db_size)
8607 goto func_qcfg_exit;
8608
8609 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8610 if (BNXT_CHIP_P5(bp)) {
8611 if (BNXT_PF(bp))
8612 bp->db_offset = DB_PF_OFFSET_P5;
8613 else
8614 bp->db_offset = DB_VF_OFFSET_P5;
8615 }
8616 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8617 1024);
8618 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8619 bp->db_size <= bp->db_offset)
8620 bp->db_size = pci_resource_len(bp->pdev, 2);
8621
8622 func_qcfg_exit:
8623 hwrm_req_drop(bp, req);
8624 return rc;
8625 }
8626
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8627 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8628 u8 init_val, u8 init_offset,
8629 bool init_mask_set)
8630 {
8631 ctxm->init_value = init_val;
8632 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8633 if (init_mask_set)
8634 ctxm->init_offset = init_offset * 4;
8635 else
8636 ctxm->init_value = 0;
8637 }
8638
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8639 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8640 {
8641 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8642 u16 type;
8643
8644 for (type = 0; type < ctx_max; type++) {
8645 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8646 int n = 1;
8647
8648 if (!ctxm->max_entries || ctxm->pg_info)
8649 continue;
8650
8651 if (ctxm->instance_bmap)
8652 n = hweight32(ctxm->instance_bmap);
8653 ctxm->pg_info = kzalloc_objs(*ctxm->pg_info, n);
8654 if (!ctxm->pg_info)
8655 return -ENOMEM;
8656 }
8657 return 0;
8658 }
8659
8660 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8661 struct bnxt_ctx_mem_type *ctxm, bool force);
8662
8663 #define BNXT_CTX_INIT_VALID(flags) \
8664 (!!((flags) & \
8665 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8666
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8667 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8668 {
8669 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8670 struct hwrm_func_backing_store_qcaps_v2_input *req;
8671 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8672 u16 type;
8673 int rc;
8674
8675 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8676 if (rc)
8677 return rc;
8678
8679 if (!ctx) {
8680 ctx = kzalloc_obj(*ctx);
8681 if (!ctx)
8682 return -ENOMEM;
8683 bp->ctx = ctx;
8684 }
8685
8686 resp = hwrm_req_hold(bp, req);
8687
8688 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8689 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8690 u8 init_val, init_off, i;
8691 u32 max_entries;
8692 u16 entry_size;
8693 __le32 *p;
8694 u32 flags;
8695
8696 req->type = cpu_to_le16(type);
8697 rc = hwrm_req_send(bp, req);
8698 if (rc)
8699 goto ctx_done;
8700 flags = le32_to_cpu(resp->flags);
8701 type = le16_to_cpu(resp->next_valid_type);
8702 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8703 bnxt_free_one_ctx_mem(bp, ctxm, true);
8704 continue;
8705 }
8706 entry_size = le16_to_cpu(resp->entry_size);
8707 max_entries = le32_to_cpu(resp->max_num_entries);
8708 if (ctxm->mem_valid) {
8709 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8710 ctxm->entry_size != entry_size ||
8711 ctxm->max_entries != max_entries)
8712 bnxt_free_one_ctx_mem(bp, ctxm, true);
8713 else
8714 continue;
8715 }
8716 ctxm->type = le16_to_cpu(resp->type);
8717 ctxm->entry_size = entry_size;
8718 ctxm->flags = flags;
8719 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8720 ctxm->entry_multiple = resp->entry_multiple;
8721 ctxm->max_entries = max_entries;
8722 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8723 init_val = resp->ctx_init_value;
8724 init_off = resp->ctx_init_offset;
8725 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8726 BNXT_CTX_INIT_VALID(flags));
8727 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8728 BNXT_MAX_SPLIT_ENTRY);
8729 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8730 i++, p++)
8731 ctxm->split[i] = le32_to_cpu(*p);
8732 }
8733 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8734
8735 ctx_done:
8736 hwrm_req_drop(bp, req);
8737 return rc;
8738 }
8739
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8740 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8741 {
8742 struct hwrm_func_backing_store_qcaps_output *resp;
8743 struct hwrm_func_backing_store_qcaps_input *req;
8744 int rc;
8745
8746 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8747 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8748 return 0;
8749
8750 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8751 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8752
8753 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8754 if (rc)
8755 return rc;
8756
8757 resp = hwrm_req_hold(bp, req);
8758 rc = hwrm_req_send_silent(bp, req);
8759 if (!rc) {
8760 struct bnxt_ctx_mem_type *ctxm;
8761 struct bnxt_ctx_mem_info *ctx;
8762 u8 init_val, init_idx = 0;
8763 u16 init_mask;
8764
8765 ctx = bp->ctx;
8766 if (!ctx) {
8767 ctx = kzalloc_obj(*ctx);
8768 if (!ctx) {
8769 rc = -ENOMEM;
8770 goto ctx_err;
8771 }
8772 bp->ctx = ctx;
8773 }
8774 init_val = resp->ctx_kind_initializer;
8775 init_mask = le16_to_cpu(resp->ctx_init_mask);
8776
8777 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8778 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8779 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8780 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8781 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8782 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8783 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8784 (init_mask & (1 << init_idx++)) != 0);
8785
8786 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8787 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8788 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8789 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8790 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8791 (init_mask & (1 << init_idx++)) != 0);
8792
8793 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8794 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8795 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8796 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8797 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8798 (init_mask & (1 << init_idx++)) != 0);
8799
8800 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8801 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8802 ctxm->max_entries = ctxm->vnic_entries +
8803 le16_to_cpu(resp->vnic_max_ring_table_entries);
8804 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8805 bnxt_init_ctx_initializer(ctxm, init_val,
8806 resp->vnic_init_offset,
8807 (init_mask & (1 << init_idx++)) != 0);
8808
8809 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8810 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8811 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8812 bnxt_init_ctx_initializer(ctxm, init_val,
8813 resp->stat_init_offset,
8814 (init_mask & (1 << init_idx++)) != 0);
8815
8816 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8817 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8818 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8819 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8820 ctxm->entry_multiple = resp->tqm_entries_multiple;
8821 if (!ctxm->entry_multiple)
8822 ctxm->entry_multiple = 1;
8823
8824 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8825
8826 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8827 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8828 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8829 ctxm->mrav_num_entries_units =
8830 le16_to_cpu(resp->mrav_num_entries_units);
8831 bnxt_init_ctx_initializer(ctxm, init_val,
8832 resp->mrav_init_offset,
8833 (init_mask & (1 << init_idx++)) != 0);
8834
8835 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8836 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8837 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8838
8839 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8840 if (!ctx->tqm_fp_rings_count)
8841 ctx->tqm_fp_rings_count = bp->max_q;
8842 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8843 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8844
8845 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8846 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8847 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8848
8849 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8850 } else {
8851 rc = 0;
8852 }
8853 ctx_err:
8854 hwrm_req_drop(bp, req);
8855 return rc;
8856 }
8857
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8858 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8859 __le64 *pg_dir)
8860 {
8861 if (!rmem->nr_pages)
8862 return;
8863
8864 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8865 if (rmem->depth >= 1) {
8866 if (rmem->depth == 2)
8867 *pg_attr |= 2;
8868 else
8869 *pg_attr |= 1;
8870 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8871 } else {
8872 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8873 }
8874 }
8875
8876 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8877 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8878 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8879 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8880 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8881 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8882
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8883 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8884 {
8885 struct hwrm_func_backing_store_cfg_input *req;
8886 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8887 struct bnxt_ctx_pg_info *ctx_pg;
8888 struct bnxt_ctx_mem_type *ctxm;
8889 void **__req = (void **)&req;
8890 u32 req_len = sizeof(*req);
8891 __le32 *num_entries;
8892 __le64 *pg_dir;
8893 u32 flags = 0;
8894 u8 *pg_attr;
8895 u32 ena;
8896 int rc;
8897 int i;
8898
8899 if (!ctx)
8900 return 0;
8901
8902 if (req_len > bp->hwrm_max_ext_req_len)
8903 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8904 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8905 if (rc)
8906 return rc;
8907
8908 req->enables = cpu_to_le32(enables);
8909 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8910 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8911 ctx_pg = ctxm->pg_info;
8912 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8913 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8914 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8915 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8916 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8917 &req->qpc_pg_size_qpc_lvl,
8918 &req->qpc_page_dir);
8919
8920 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8921 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8922 }
8923 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8924 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8925 ctx_pg = ctxm->pg_info;
8926 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8927 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8928 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8929 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8930 &req->srq_pg_size_srq_lvl,
8931 &req->srq_page_dir);
8932 }
8933 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8934 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8935 ctx_pg = ctxm->pg_info;
8936 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8937 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8938 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8939 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8940 &req->cq_pg_size_cq_lvl,
8941 &req->cq_page_dir);
8942 }
8943 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8944 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8945 ctx_pg = ctxm->pg_info;
8946 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8947 req->vnic_num_ring_table_entries =
8948 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8949 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8950 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8951 &req->vnic_pg_size_vnic_lvl,
8952 &req->vnic_page_dir);
8953 }
8954 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8955 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8956 ctx_pg = ctxm->pg_info;
8957 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8958 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8959 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8960 &req->stat_pg_size_stat_lvl,
8961 &req->stat_page_dir);
8962 }
8963 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8964 u32 units;
8965
8966 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8967 ctx_pg = ctxm->pg_info;
8968 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8969 units = ctxm->mrav_num_entries_units;
8970 if (units) {
8971 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8972 u32 entries;
8973
8974 num_mr = ctx_pg->entries - num_ah;
8975 entries = ((num_mr / units) << 16) | (num_ah / units);
8976 req->mrav_num_entries = cpu_to_le32(entries);
8977 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8978 }
8979 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8980 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8981 &req->mrav_pg_size_mrav_lvl,
8982 &req->mrav_page_dir);
8983 }
8984 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8985 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8986 ctx_pg = ctxm->pg_info;
8987 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8988 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8989 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8990 &req->tim_pg_size_tim_lvl,
8991 &req->tim_page_dir);
8992 }
8993 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8994 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8995 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8996 pg_dir = &req->tqm_sp_page_dir,
8997 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8998 ctx_pg = ctxm->pg_info;
8999 i < BNXT_MAX_TQM_RINGS;
9000 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
9001 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
9002 if (!(enables & ena))
9003 continue;
9004
9005 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
9006 *num_entries = cpu_to_le32(ctx_pg->entries);
9007 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
9008 }
9009 req->flags = cpu_to_le32(flags);
9010 return hwrm_req_send(bp, req);
9011 }
9012
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9013 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
9014 struct bnxt_ctx_pg_info *ctx_pg)
9015 {
9016 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9017
9018 rmem->page_size = BNXT_PAGE_SIZE;
9019 rmem->pg_arr = ctx_pg->ctx_pg_arr;
9020 rmem->dma_arr = ctx_pg->ctx_dma_arr;
9021 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
9022 if (rmem->depth >= 1)
9023 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
9024 return bnxt_alloc_ring(bp, rmem);
9025 }
9026
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)9027 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
9028 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
9029 u8 depth, struct bnxt_ctx_mem_type *ctxm)
9030 {
9031 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9032 int rc;
9033
9034 if (!mem_size)
9035 return -EINVAL;
9036
9037 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9038 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
9039 ctx_pg->nr_pages = 0;
9040 return -EINVAL;
9041 }
9042 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
9043 int nr_tbls, i;
9044
9045 rmem->depth = 2;
9046 ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES);
9047 if (!ctx_pg->ctx_pg_tbl)
9048 return -ENOMEM;
9049 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
9050 rmem->nr_pages = nr_tbls;
9051 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9052 if (rc)
9053 return rc;
9054 for (i = 0; i < nr_tbls; i++) {
9055 struct bnxt_ctx_pg_info *pg_tbl;
9056
9057 pg_tbl = kzalloc_obj(*pg_tbl);
9058 if (!pg_tbl)
9059 return -ENOMEM;
9060 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
9061 rmem = &pg_tbl->ring_mem;
9062 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
9063 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
9064 rmem->depth = 1;
9065 rmem->nr_pages = MAX_CTX_PAGES;
9066 rmem->ctx_mem = ctxm;
9067 if (i == (nr_tbls - 1)) {
9068 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
9069
9070 if (rem)
9071 rmem->nr_pages = rem;
9072 }
9073 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
9074 if (rc)
9075 break;
9076 }
9077 } else {
9078 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9079 if (rmem->nr_pages > 1 || depth)
9080 rmem->depth = 1;
9081 rmem->ctx_mem = ctxm;
9082 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9083 }
9084 return rc;
9085 }
9086
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)9087 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9088 struct bnxt_ctx_pg_info *ctx_pg,
9089 void *buf, size_t offset, size_t head,
9090 size_t tail)
9091 {
9092 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9093 size_t nr_pages = ctx_pg->nr_pages;
9094 int page_size = rmem->page_size;
9095 size_t len = 0, total_len = 0;
9096 u16 depth = rmem->depth;
9097
9098 tail %= nr_pages * page_size;
9099 do {
9100 if (depth > 1) {
9101 int i = head / (page_size * MAX_CTX_PAGES);
9102 struct bnxt_ctx_pg_info *pg_tbl;
9103
9104 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9105 rmem = &pg_tbl->ring_mem;
9106 }
9107 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9108 head += len;
9109 offset += len;
9110 total_len += len;
9111 if (head >= nr_pages * page_size)
9112 head = 0;
9113 } while (head != tail);
9114 return total_len;
9115 }
9116
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9117 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9118 struct bnxt_ctx_pg_info *ctx_pg)
9119 {
9120 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9121
9122 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9123 ctx_pg->ctx_pg_tbl) {
9124 int i, nr_tbls = rmem->nr_pages;
9125
9126 for (i = 0; i < nr_tbls; i++) {
9127 struct bnxt_ctx_pg_info *pg_tbl;
9128 struct bnxt_ring_mem_info *rmem2;
9129
9130 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9131 if (!pg_tbl)
9132 continue;
9133 rmem2 = &pg_tbl->ring_mem;
9134 bnxt_free_ring(bp, rmem2);
9135 ctx_pg->ctx_pg_arr[i] = NULL;
9136 kfree(pg_tbl);
9137 ctx_pg->ctx_pg_tbl[i] = NULL;
9138 }
9139 kfree(ctx_pg->ctx_pg_tbl);
9140 ctx_pg->ctx_pg_tbl = NULL;
9141 }
9142 bnxt_free_ring(bp, rmem);
9143 ctx_pg->nr_pages = 0;
9144 }
9145
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)9146 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9147 struct bnxt_ctx_mem_type *ctxm, u32 entries,
9148 u8 pg_lvl)
9149 {
9150 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9151 int i, rc = 0, n = 1;
9152 u32 mem_size;
9153
9154 if (!ctxm->entry_size || !ctx_pg)
9155 return -EINVAL;
9156 if (ctxm->instance_bmap)
9157 n = hweight32(ctxm->instance_bmap);
9158 if (ctxm->entry_multiple)
9159 entries = roundup(entries, ctxm->entry_multiple);
9160 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9161 mem_size = entries * ctxm->entry_size;
9162 for (i = 0; i < n && !rc; i++) {
9163 ctx_pg[i].entries = entries;
9164 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9165 ctxm->init_value ? ctxm : NULL);
9166 }
9167 if (!rc)
9168 ctxm->mem_valid = 1;
9169 return rc;
9170 }
9171
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)9172 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9173 struct bnxt_ctx_mem_type *ctxm,
9174 bool last)
9175 {
9176 struct hwrm_func_backing_store_cfg_v2_input *req;
9177 u32 instance_bmap = ctxm->instance_bmap;
9178 int i, j, rc = 0, n = 1;
9179 __le32 *p;
9180
9181 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9182 return 0;
9183
9184 if (instance_bmap)
9185 n = hweight32(ctxm->instance_bmap);
9186 else
9187 instance_bmap = 1;
9188
9189 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9190 if (rc)
9191 return rc;
9192 hwrm_req_hold(bp, req);
9193 req->type = cpu_to_le16(ctxm->type);
9194 req->entry_size = cpu_to_le16(ctxm->entry_size);
9195 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9196 bnxt_bs_trace_avail(bp, ctxm->type)) {
9197 struct bnxt_bs_trace_info *bs_trace;
9198 u32 enables;
9199
9200 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9201 req->enables = cpu_to_le32(enables);
9202 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9203 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9204 }
9205 req->subtype_valid_cnt = ctxm->split_entry_cnt;
9206 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9207 p[i] = cpu_to_le32(ctxm->split[i]);
9208 for (i = 0, j = 0; j < n && !rc; i++) {
9209 struct bnxt_ctx_pg_info *ctx_pg;
9210
9211 if (!(instance_bmap & (1 << i)))
9212 continue;
9213 req->instance = cpu_to_le16(i);
9214 ctx_pg = &ctxm->pg_info[j++];
9215 if (!ctx_pg->entries)
9216 continue;
9217 req->num_entries = cpu_to_le32(ctx_pg->entries);
9218 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9219 &req->page_size_pbl_level,
9220 &req->page_dir);
9221 if (last && j == n)
9222 req->flags =
9223 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9224 rc = hwrm_req_send(bp, req);
9225 }
9226 hwrm_req_drop(bp, req);
9227 return rc;
9228 }
9229
bnxt_backing_store_cfg_v2(struct bnxt * bp)9230 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9231 {
9232 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9233 struct bnxt_ctx_mem_type *ctxm;
9234 u16 last_type = BNXT_CTX_INV;
9235 int rc = 0;
9236 u16 type;
9237
9238 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9239 ctxm = &ctx->ctx_arr[type];
9240 if (!bnxt_bs_trace_avail(bp, type))
9241 continue;
9242 if (!ctxm->mem_valid) {
9243 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9244 ctxm->max_entries, 1);
9245 if (rc) {
9246 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9247 type);
9248 continue;
9249 }
9250 bnxt_bs_trace_init(bp, ctxm);
9251 }
9252 last_type = type;
9253 }
9254
9255 if (last_type == BNXT_CTX_INV) {
9256 for (type = 0; type < BNXT_CTX_MAX; type++) {
9257 ctxm = &ctx->ctx_arr[type];
9258 if (ctxm->mem_valid)
9259 last_type = type;
9260 }
9261 if (last_type == BNXT_CTX_INV)
9262 return 0;
9263 }
9264 ctx->ctx_arr[last_type].last = 1;
9265
9266 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9267 ctxm = &ctx->ctx_arr[type];
9268
9269 if (!ctxm->mem_valid)
9270 continue;
9271 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9272 if (rc)
9273 return rc;
9274 }
9275 return 0;
9276 }
9277
9278 /**
9279 * __bnxt_copy_ctx_mem - copy host context memory
9280 * @bp: The driver context
9281 * @ctxm: The pointer to the context memory type
9282 * @buf: The destination buffer or NULL to just obtain the length
9283 * @offset: The buffer offset to copy the data to
9284 * @head: The head offset of context memory to copy from
9285 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9286 *
9287 * This function is called for debugging purposes to dump the host context
9288 * used by the chip.
9289 *
9290 * Return: Length of memory copied
9291 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9292 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9293 struct bnxt_ctx_mem_type *ctxm, void *buf,
9294 size_t offset, size_t head, size_t tail)
9295 {
9296 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9297 size_t len = 0, total_len = 0;
9298 int i, n = 1;
9299
9300 if (!ctx_pg)
9301 return 0;
9302
9303 if (ctxm->instance_bmap)
9304 n = hweight32(ctxm->instance_bmap);
9305 for (i = 0; i < n; i++) {
9306 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9307 tail);
9308 offset += len;
9309 total_len += len;
9310 }
9311 return total_len;
9312 }
9313
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9314 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9315 void *buf, size_t offset)
9316 {
9317 size_t tail = ctxm->max_entries * ctxm->entry_size;
9318
9319 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9320 }
9321
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9322 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9323 struct bnxt_ctx_mem_type *ctxm, bool force)
9324 {
9325 struct bnxt_ctx_pg_info *ctx_pg;
9326 int i, n = 1;
9327
9328 ctxm->last = 0;
9329
9330 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9331 return;
9332
9333 ctx_pg = ctxm->pg_info;
9334 if (ctx_pg) {
9335 if (ctxm->instance_bmap)
9336 n = hweight32(ctxm->instance_bmap);
9337 for (i = 0; i < n; i++)
9338 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9339
9340 kfree(ctx_pg);
9341 ctxm->pg_info = NULL;
9342 ctxm->mem_valid = 0;
9343 }
9344 memset(ctxm, 0, sizeof(*ctxm));
9345 }
9346
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9347 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9348 {
9349 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9350 u16 type;
9351
9352 if (!ctx)
9353 return;
9354
9355 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9356 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9357
9358 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9359 if (force) {
9360 kfree(ctx);
9361 bp->ctx = NULL;
9362 }
9363 }
9364
bnxt_alloc_ctx_mem(struct bnxt * bp)9365 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9366 {
9367 struct bnxt_ctx_mem_type *ctxm;
9368 struct bnxt_ctx_mem_info *ctx;
9369 u32 l2_qps, qp1_qps, max_qps;
9370 u32 ena, entries_sp, entries;
9371 u32 srqs, max_srqs, min;
9372 u32 num_mr, num_ah;
9373 u32 extra_srqs = 0;
9374 u32 extra_qps = 0;
9375 u32 fast_qpmd_qps;
9376 u8 pg_lvl = 1;
9377 int i, rc;
9378
9379 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9380 if (rc) {
9381 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9382 rc);
9383 return rc;
9384 }
9385 ctx = bp->ctx;
9386 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9387 return 0;
9388
9389 ena = 0;
9390 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9391 goto skip_legacy;
9392
9393 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9394 l2_qps = ctxm->qp_l2_entries;
9395 qp1_qps = ctxm->qp_qp1_entries;
9396 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9397 max_qps = ctxm->max_entries;
9398 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9399 srqs = ctxm->srq_l2_entries;
9400 max_srqs = ctxm->max_entries;
9401 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9402 pg_lvl = 2;
9403 if (BNXT_SW_RES_LMT(bp)) {
9404 extra_qps = max_qps - l2_qps - qp1_qps;
9405 extra_srqs = max_srqs - srqs;
9406 } else {
9407 extra_qps = min_t(u32, 65536,
9408 max_qps - l2_qps - qp1_qps);
9409 /* allocate extra qps if fw supports RoCE fast qp
9410 * destroy feature
9411 */
9412 extra_qps += fast_qpmd_qps;
9413 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9414 }
9415 if (fast_qpmd_qps)
9416 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9417 }
9418
9419 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9420 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9421 pg_lvl);
9422 if (rc)
9423 return rc;
9424
9425 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9426 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9427 if (rc)
9428 return rc;
9429
9430 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9431 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9432 extra_qps * 2, pg_lvl);
9433 if (rc)
9434 return rc;
9435
9436 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9437 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9438 if (rc)
9439 return rc;
9440
9441 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9442 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9443 if (rc)
9444 return rc;
9445
9446 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9447 goto skip_rdma;
9448
9449 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9450 if (BNXT_SW_RES_LMT(bp) &&
9451 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9452 num_ah = ctxm->mrav_av_entries;
9453 num_mr = ctxm->max_entries - num_ah;
9454 } else {
9455 /* 128K extra is needed to accommodate static AH context
9456 * allocation by f/w.
9457 */
9458 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9459 num_ah = min_t(u32, num_mr, 1024 * 128);
9460 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9461 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9462 ctxm->mrav_av_entries = num_ah;
9463 }
9464
9465 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9466 if (rc)
9467 return rc;
9468 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9469
9470 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9471 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9472 if (rc)
9473 return rc;
9474 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9475
9476 skip_rdma:
9477 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9478 min = ctxm->min_entries;
9479 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9480 2 * (extra_qps + qp1_qps) + min;
9481 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9482 if (rc)
9483 return rc;
9484
9485 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9486 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9487 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9488 if (rc)
9489 return rc;
9490 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9491 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9492 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9493
9494 skip_legacy:
9495 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9496 rc = bnxt_backing_store_cfg_v2(bp);
9497 else
9498 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9499 if (rc) {
9500 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9501 rc);
9502 return rc;
9503 }
9504 ctx->flags |= BNXT_CTX_FLAG_INITED;
9505 return 0;
9506 }
9507
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9508 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9509 {
9510 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9511 u16 page_attr;
9512 int rc;
9513
9514 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9515 return 0;
9516
9517 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9518 if (rc)
9519 return rc;
9520
9521 if (BNXT_PAGE_SIZE == 0x2000)
9522 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9523 else if (BNXT_PAGE_SIZE == 0x10000)
9524 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9525 else
9526 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9527 req->pg_size_lvl = cpu_to_le16(page_attr |
9528 bp->fw_crash_mem->ring_mem.depth);
9529 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9530 req->size = cpu_to_le32(bp->fw_crash_len);
9531 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9532 return hwrm_req_send(bp, req);
9533 }
9534
bnxt_free_crash_dump_mem(struct bnxt * bp)9535 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9536 {
9537 if (bp->fw_crash_mem) {
9538 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9539 kfree(bp->fw_crash_mem);
9540 bp->fw_crash_mem = NULL;
9541 }
9542 }
9543
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9544 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9545 {
9546 u32 mem_size = 0;
9547 int rc;
9548
9549 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9550 return 0;
9551
9552 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9553 if (rc)
9554 return rc;
9555
9556 mem_size = round_up(mem_size, 4);
9557
9558 /* keep and use the existing pages */
9559 if (bp->fw_crash_mem &&
9560 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9561 goto alloc_done;
9562
9563 if (bp->fw_crash_mem)
9564 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9565 else
9566 bp->fw_crash_mem = kzalloc_obj(*bp->fw_crash_mem);
9567 if (!bp->fw_crash_mem)
9568 return -ENOMEM;
9569
9570 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9571 if (rc) {
9572 bnxt_free_crash_dump_mem(bp);
9573 return rc;
9574 }
9575
9576 alloc_done:
9577 bp->fw_crash_len = mem_size;
9578 return 0;
9579 }
9580
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9581 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9582 {
9583 struct hwrm_func_resource_qcaps_output *resp;
9584 struct hwrm_func_resource_qcaps_input *req;
9585 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9586 int rc;
9587
9588 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9589 if (rc)
9590 return rc;
9591
9592 req->fid = cpu_to_le16(0xffff);
9593 resp = hwrm_req_hold(bp, req);
9594 rc = hwrm_req_send_silent(bp, req);
9595 if (rc)
9596 goto hwrm_func_resc_qcaps_exit;
9597
9598 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9599 if (!all)
9600 goto hwrm_func_resc_qcaps_exit;
9601
9602 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9603 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9604 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9605 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9606 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9607 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9608 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9609 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9610 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9611 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9612 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9613 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9614 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9615 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9616 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9617 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9618
9619 if (hw_resc->max_rsscos_ctxs >=
9620 hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
9621 bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
9622
9623 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9624 u16 max_msix = le16_to_cpu(resp->max_msix);
9625
9626 hw_resc->max_nqs = max_msix;
9627 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9628 }
9629
9630 if (BNXT_PF(bp)) {
9631 struct bnxt_pf_info *pf = &bp->pf;
9632
9633 pf->vf_resv_strategy =
9634 le16_to_cpu(resp->vf_reservation_strategy);
9635 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9636 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9637 }
9638 hwrm_func_resc_qcaps_exit:
9639 hwrm_req_drop(bp, req);
9640 return rc;
9641 }
9642
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9643 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9644 {
9645 struct hwrm_port_mac_ptp_qcfg_output *resp;
9646 struct hwrm_port_mac_ptp_qcfg_input *req;
9647 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9648 u8 flags;
9649 int rc;
9650
9651 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9652 rc = -ENODEV;
9653 goto no_ptp;
9654 }
9655
9656 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9657 if (rc)
9658 goto no_ptp;
9659
9660 req->port_id = cpu_to_le16(bp->pf.port_id);
9661 resp = hwrm_req_hold(bp, req);
9662 rc = hwrm_req_send(bp, req);
9663 if (rc)
9664 goto exit;
9665
9666 flags = resp->flags;
9667 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9668 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9669 rc = -ENODEV;
9670 goto exit;
9671 }
9672 if (!ptp) {
9673 ptp = kzalloc_obj(*ptp);
9674 if (!ptp) {
9675 rc = -ENOMEM;
9676 goto exit;
9677 }
9678 ptp->bp = bp;
9679 bp->ptp_cfg = ptp;
9680 }
9681
9682 if (flags &
9683 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9684 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9685 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9686 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9687 } else if (BNXT_CHIP_P5(bp)) {
9688 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9689 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9690 } else {
9691 rc = -ENODEV;
9692 goto exit;
9693 }
9694 ptp->rtc_configured =
9695 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9696 rc = bnxt_ptp_init(bp);
9697 if (rc)
9698 netdev_warn(bp->dev, "PTP initialization failed.\n");
9699 exit:
9700 hwrm_req_drop(bp, req);
9701 if (!rc)
9702 return 0;
9703
9704 no_ptp:
9705 bnxt_ptp_clear(bp);
9706 kfree(ptp);
9707 bp->ptp_cfg = NULL;
9708 return rc;
9709 }
9710
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9711 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9712 {
9713 u32 flags, flags_ext, flags_ext2, flags_ext3;
9714 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9715 struct hwrm_func_qcaps_output *resp;
9716 struct hwrm_func_qcaps_input *req;
9717 int rc;
9718
9719 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9720 if (rc)
9721 return rc;
9722
9723 req->fid = cpu_to_le16(0xffff);
9724 resp = hwrm_req_hold(bp, req);
9725 rc = hwrm_req_send(bp, req);
9726 if (rc)
9727 goto hwrm_func_qcaps_exit;
9728
9729 flags = le32_to_cpu(resp->flags);
9730 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9731 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9732 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9733 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9734 if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
9735 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
9736 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9737 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9738 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9739 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9740 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9741 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9742 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9743 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9744 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9745 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9746 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9747 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9748 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9749 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9750
9751 flags_ext = le32_to_cpu(resp->flags_ext);
9752 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9753 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9754 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9755 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9756 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
9757 bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
9758 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9759 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9760 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9761 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9762 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9763 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9764 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9765 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9766 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9767 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9768 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9769 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9770 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9771 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9772
9773 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9774 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9775 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9776 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9777 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9778 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9779 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9780 if (flags_ext2 &
9781 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9782 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9783 if (BNXT_PF(bp) &&
9784 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9785 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9786
9787 flags_ext3 = le32_to_cpu(resp->flags_ext3);
9788 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9789 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9790 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9791 bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9792
9793 bp->tx_push_thresh = 0;
9794 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9795 BNXT_FW_MAJ(bp) > 217)
9796 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9797
9798 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9799 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9800 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9801 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9802 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9803 if (!hw_resc->max_hw_ring_grps)
9804 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9805 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9806 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9807 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9808
9809 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9810 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9811 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9812 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9813 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9814 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9815
9816 if (BNXT_PF(bp)) {
9817 struct bnxt_pf_info *pf = &bp->pf;
9818
9819 pf->fw_fid = le16_to_cpu(resp->fid);
9820 pf->port_id = le16_to_cpu(resp->port_id);
9821 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9822 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9823 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9824 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9825 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9826 bp->flags |= BNXT_FLAG_WOL_CAP;
9827 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9828 bp->fw_cap |= BNXT_FW_CAP_PTP;
9829 } else {
9830 bnxt_ptp_clear(bp);
9831 kfree(bp->ptp_cfg);
9832 bp->ptp_cfg = NULL;
9833 }
9834 } else {
9835 #ifdef CONFIG_BNXT_SRIOV
9836 struct bnxt_vf_info *vf = &bp->vf;
9837
9838 vf->fw_fid = le16_to_cpu(resp->fid);
9839 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9840 #endif
9841 }
9842 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9843
9844 hwrm_func_qcaps_exit:
9845 hwrm_req_drop(bp, req);
9846 return rc;
9847 }
9848
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9849 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9850 {
9851 struct hwrm_dbg_qcaps_output *resp;
9852 struct hwrm_dbg_qcaps_input *req;
9853 int rc;
9854
9855 bp->fw_dbg_cap = 0;
9856 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9857 return;
9858
9859 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9860 if (rc)
9861 return;
9862
9863 req->fid = cpu_to_le16(0xffff);
9864 resp = hwrm_req_hold(bp, req);
9865 rc = hwrm_req_send(bp, req);
9866 if (rc)
9867 goto hwrm_dbg_qcaps_exit;
9868
9869 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9870
9871 hwrm_dbg_qcaps_exit:
9872 hwrm_req_drop(bp, req);
9873 }
9874
9875 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9876
bnxt_hwrm_func_qcaps(struct bnxt * bp)9877 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9878 {
9879 int rc;
9880
9881 rc = __bnxt_hwrm_func_qcaps(bp);
9882 if (rc)
9883 return rc;
9884
9885 bnxt_hwrm_dbg_qcaps(bp);
9886
9887 rc = bnxt_hwrm_queue_qportcfg(bp);
9888 if (rc) {
9889 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9890 return rc;
9891 }
9892 if (bp->hwrm_spec_code >= 0x10803) {
9893 rc = bnxt_alloc_ctx_mem(bp);
9894 if (rc)
9895 return rc;
9896 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9897 if (!rc)
9898 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9899 }
9900 return 0;
9901 }
9902
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9903 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9904 {
9905 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9906 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9907 u32 flags;
9908 int rc;
9909
9910 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9911 return 0;
9912
9913 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9914 if (rc)
9915 return rc;
9916
9917 resp = hwrm_req_hold(bp, req);
9918 rc = hwrm_req_send(bp, req);
9919 if (rc)
9920 goto hwrm_cfa_adv_qcaps_exit;
9921
9922 flags = le32_to_cpu(resp->flags);
9923 if (flags &
9924 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9925 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9926
9927 if (flags &
9928 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9929 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9930
9931 if (flags &
9932 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9933 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9934
9935 hwrm_cfa_adv_qcaps_exit:
9936 hwrm_req_drop(bp, req);
9937 return rc;
9938 }
9939
__bnxt_alloc_fw_health(struct bnxt * bp)9940 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9941 {
9942 if (bp->fw_health)
9943 return 0;
9944
9945 bp->fw_health = kzalloc_obj(*bp->fw_health);
9946 if (!bp->fw_health)
9947 return -ENOMEM;
9948
9949 mutex_init(&bp->fw_health->lock);
9950 return 0;
9951 }
9952
bnxt_alloc_fw_health(struct bnxt * bp)9953 static int bnxt_alloc_fw_health(struct bnxt *bp)
9954 {
9955 int rc;
9956
9957 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9958 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9959 return 0;
9960
9961 rc = __bnxt_alloc_fw_health(bp);
9962 if (rc) {
9963 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9964 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9965 return rc;
9966 }
9967
9968 return 0;
9969 }
9970
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)9971 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9972 {
9973 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9974 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9975 BNXT_FW_HEALTH_WIN_MAP_OFF);
9976 }
9977
bnxt_inv_fw_health_reg(struct bnxt * bp)9978 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9979 {
9980 struct bnxt_fw_health *fw_health = bp->fw_health;
9981 u32 reg_type;
9982
9983 if (!fw_health)
9984 return;
9985
9986 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9987 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9988 fw_health->status_reliable = false;
9989
9990 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9991 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9992 fw_health->resets_reliable = false;
9993 }
9994
bnxt_try_map_fw_health_reg(struct bnxt * bp)9995 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9996 {
9997 void __iomem *hs;
9998 u32 status_loc;
9999 u32 reg_type;
10000 u32 sig;
10001
10002 if (bp->fw_health)
10003 bp->fw_health->status_reliable = false;
10004
10005 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
10006 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
10007
10008 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
10009 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
10010 if (!bp->chip_num) {
10011 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
10012 bp->chip_num = readl(bp->bar0 +
10013 BNXT_FW_HEALTH_WIN_BASE +
10014 BNXT_GRC_REG_CHIP_NUM);
10015 }
10016 if (!BNXT_CHIP_P5_PLUS(bp))
10017 return;
10018
10019 status_loc = BNXT_GRC_REG_STATUS_P5 |
10020 BNXT_FW_HEALTH_REG_TYPE_BAR0;
10021 } else {
10022 status_loc = readl(hs + offsetof(struct hcomm_status,
10023 fw_status_loc));
10024 }
10025
10026 if (__bnxt_alloc_fw_health(bp)) {
10027 netdev_warn(bp->dev, "no memory for firmware status checks\n");
10028 return;
10029 }
10030
10031 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
10032 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
10033 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
10034 __bnxt_map_fw_health_reg(bp, status_loc);
10035 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
10036 BNXT_FW_HEALTH_WIN_OFF(status_loc);
10037 }
10038
10039 bp->fw_health->status_reliable = true;
10040 }
10041
bnxt_map_fw_health_regs(struct bnxt * bp)10042 static int bnxt_map_fw_health_regs(struct bnxt *bp)
10043 {
10044 struct bnxt_fw_health *fw_health = bp->fw_health;
10045 u32 reg_base = 0xffffffff;
10046 int i;
10047
10048 bp->fw_health->status_reliable = false;
10049 bp->fw_health->resets_reliable = false;
10050 /* Only pre-map the monitoring GRC registers using window 3 */
10051 for (i = 0; i < 4; i++) {
10052 u32 reg = fw_health->regs[i];
10053
10054 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
10055 continue;
10056 if (reg_base == 0xffffffff)
10057 reg_base = reg & BNXT_GRC_BASE_MASK;
10058 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
10059 return -ERANGE;
10060 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
10061 }
10062 bp->fw_health->status_reliable = true;
10063 bp->fw_health->resets_reliable = true;
10064 if (reg_base == 0xffffffff)
10065 return 0;
10066
10067 __bnxt_map_fw_health_reg(bp, reg_base);
10068 return 0;
10069 }
10070
bnxt_remap_fw_health_regs(struct bnxt * bp)10071 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
10072 {
10073 if (!bp->fw_health)
10074 return;
10075
10076 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
10077 bp->fw_health->status_reliable = true;
10078 bp->fw_health->resets_reliable = true;
10079 } else {
10080 bnxt_try_map_fw_health_reg(bp);
10081 }
10082 }
10083
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)10084 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10085 {
10086 struct bnxt_fw_health *fw_health = bp->fw_health;
10087 struct hwrm_error_recovery_qcfg_output *resp;
10088 struct hwrm_error_recovery_qcfg_input *req;
10089 int rc, i;
10090
10091 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10092 return 0;
10093
10094 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10095 if (rc)
10096 return rc;
10097
10098 resp = hwrm_req_hold(bp, req);
10099 rc = hwrm_req_send(bp, req);
10100 if (rc)
10101 goto err_recovery_out;
10102 fw_health->flags = le32_to_cpu(resp->flags);
10103 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10104 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10105 rc = -EINVAL;
10106 goto err_recovery_out;
10107 }
10108 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10109 fw_health->master_func_wait_dsecs =
10110 le32_to_cpu(resp->master_func_wait_period);
10111 fw_health->normal_func_wait_dsecs =
10112 le32_to_cpu(resp->normal_func_wait_period);
10113 fw_health->post_reset_wait_dsecs =
10114 le32_to_cpu(resp->master_func_wait_period_after_reset);
10115 fw_health->post_reset_max_wait_dsecs =
10116 le32_to_cpu(resp->max_bailout_time_after_reset);
10117 fw_health->regs[BNXT_FW_HEALTH_REG] =
10118 le32_to_cpu(resp->fw_health_status_reg);
10119 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10120 le32_to_cpu(resp->fw_heartbeat_reg);
10121 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10122 le32_to_cpu(resp->fw_reset_cnt_reg);
10123 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10124 le32_to_cpu(resp->reset_inprogress_reg);
10125 fw_health->fw_reset_inprog_reg_mask =
10126 le32_to_cpu(resp->reset_inprogress_reg_mask);
10127 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10128 if (fw_health->fw_reset_seq_cnt >= 16) {
10129 rc = -EINVAL;
10130 goto err_recovery_out;
10131 }
10132 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10133 fw_health->fw_reset_seq_regs[i] =
10134 le32_to_cpu(resp->reset_reg[i]);
10135 fw_health->fw_reset_seq_vals[i] =
10136 le32_to_cpu(resp->reset_reg_val[i]);
10137 fw_health->fw_reset_seq_delay_msec[i] =
10138 resp->delay_after_reset[i];
10139 }
10140 err_recovery_out:
10141 hwrm_req_drop(bp, req);
10142 if (!rc)
10143 rc = bnxt_map_fw_health_regs(bp);
10144 if (rc)
10145 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10146 return rc;
10147 }
10148
bnxt_hwrm_func_reset(struct bnxt * bp)10149 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10150 {
10151 struct hwrm_func_reset_input *req;
10152 int rc;
10153
10154 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10155 if (rc)
10156 return rc;
10157
10158 req->enables = 0;
10159 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10160 return hwrm_req_send(bp, req);
10161 }
10162
bnxt_nvm_cfg_ver_get(struct bnxt * bp)10163 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10164 {
10165 struct hwrm_nvm_get_dev_info_output nvm_info;
10166
10167 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10168 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10169 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10170 nvm_info.nvm_cfg_ver_upd);
10171 }
10172
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)10173 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10174 {
10175 struct hwrm_queue_qportcfg_output *resp;
10176 struct hwrm_queue_qportcfg_input *req;
10177 u8 i, j, *qptr;
10178 bool no_rdma;
10179 int rc = 0;
10180
10181 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10182 if (rc)
10183 return rc;
10184
10185 resp = hwrm_req_hold(bp, req);
10186 rc = hwrm_req_send(bp, req);
10187 if (rc)
10188 goto qportcfg_exit;
10189
10190 if (!resp->max_configurable_queues) {
10191 rc = -EINVAL;
10192 goto qportcfg_exit;
10193 }
10194 bp->max_tc = resp->max_configurable_queues;
10195 bp->max_lltc = resp->max_configurable_lossless_queues;
10196 if (bp->max_tc > BNXT_MAX_QUEUE)
10197 bp->max_tc = BNXT_MAX_QUEUE;
10198
10199 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10200 qptr = &resp->queue_id0;
10201 for (i = 0, j = 0; i < bp->max_tc; i++) {
10202 bp->q_info[j].queue_id = *qptr;
10203 bp->q_ids[i] = *qptr++;
10204 bp->q_info[j].queue_profile = *qptr++;
10205 bp->tc_to_qidx[j] = j;
10206 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10207 (no_rdma && BNXT_PF(bp)))
10208 j++;
10209 }
10210 bp->max_q = bp->max_tc;
10211 bp->max_tc = max_t(u8, j, 1);
10212
10213 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10214 bp->max_tc = 1;
10215
10216 if (bp->max_lltc > bp->max_tc)
10217 bp->max_lltc = bp->max_tc;
10218
10219 qportcfg_exit:
10220 hwrm_req_drop(bp, req);
10221 return rc;
10222 }
10223
bnxt_hwrm_poll(struct bnxt * bp)10224 static int bnxt_hwrm_poll(struct bnxt *bp)
10225 {
10226 struct hwrm_ver_get_input *req;
10227 int rc;
10228
10229 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10230 if (rc)
10231 return rc;
10232
10233 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10234 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10235 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10236
10237 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10238 rc = hwrm_req_send(bp, req);
10239 return rc;
10240 }
10241
bnxt_hwrm_ver_get(struct bnxt * bp)10242 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10243 {
10244 struct hwrm_ver_get_output *resp;
10245 struct hwrm_ver_get_input *req;
10246 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10247 u32 dev_caps_cfg, hwrm_ver;
10248 int rc, len, max_tmo_secs;
10249
10250 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10251 if (rc)
10252 return rc;
10253
10254 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10255 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10256 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10257 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10258 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10259
10260 resp = hwrm_req_hold(bp, req);
10261 rc = hwrm_req_send(bp, req);
10262 if (rc)
10263 goto hwrm_ver_get_exit;
10264
10265 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10266
10267 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10268 resp->hwrm_intf_min_8b << 8 |
10269 resp->hwrm_intf_upd_8b;
10270 if (resp->hwrm_intf_maj_8b < 1) {
10271 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10272 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10273 resp->hwrm_intf_upd_8b);
10274 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10275 }
10276
10277 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10278 HWRM_VERSION_UPDATE;
10279
10280 if (bp->hwrm_spec_code > hwrm_ver)
10281 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10282 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10283 HWRM_VERSION_UPDATE);
10284 else
10285 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10286 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10287 resp->hwrm_intf_upd_8b);
10288
10289 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10290 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10291 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10292 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10293 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10294 len = FW_VER_STR_LEN;
10295 } else {
10296 fw_maj = resp->hwrm_fw_maj_8b;
10297 fw_min = resp->hwrm_fw_min_8b;
10298 fw_bld = resp->hwrm_fw_bld_8b;
10299 fw_rsv = resp->hwrm_fw_rsvd_8b;
10300 len = BC_HWRM_STR_LEN;
10301 }
10302 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10303 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10304 fw_rsv);
10305
10306 if (strlen(resp->active_pkg_name)) {
10307 int fw_ver_len = strlen(bp->fw_ver_str);
10308
10309 snprintf(bp->fw_ver_str + fw_ver_len,
10310 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10311 resp->active_pkg_name);
10312 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10313 }
10314
10315 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10316 if (!bp->hwrm_cmd_timeout)
10317 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10318 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10319 if (!bp->hwrm_cmd_max_timeout)
10320 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10321 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10322 #ifdef CONFIG_DETECT_HUNG_TASK
10323 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10324 max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10325 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10326 max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10327 }
10328 #endif
10329
10330 if (resp->hwrm_intf_maj_8b >= 1) {
10331 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10332 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10333 }
10334 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10335 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10336
10337 bp->chip_num = le16_to_cpu(resp->chip_num);
10338 bp->chip_rev = resp->chip_rev;
10339 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10340 !resp->chip_metal)
10341 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10342
10343 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10344 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10345 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10346 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10347
10348 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10349 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10350
10351 if (dev_caps_cfg &
10352 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10353 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10354
10355 if (dev_caps_cfg &
10356 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10357 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10358
10359 if (dev_caps_cfg &
10360 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10361 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10362
10363 hwrm_ver_get_exit:
10364 hwrm_req_drop(bp, req);
10365 return rc;
10366 }
10367
bnxt_hwrm_fw_set_time(struct bnxt * bp)10368 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10369 {
10370 struct hwrm_fw_set_time_input *req;
10371 struct tm tm;
10372 time64_t now = ktime_get_real_seconds();
10373 int rc;
10374
10375 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10376 bp->hwrm_spec_code < 0x10400)
10377 return -EOPNOTSUPP;
10378
10379 time64_to_tm(now, 0, &tm);
10380 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10381 if (rc)
10382 return rc;
10383
10384 req->year = cpu_to_le16(1900 + tm.tm_year);
10385 req->month = 1 + tm.tm_mon;
10386 req->day = tm.tm_mday;
10387 req->hour = tm.tm_hour;
10388 req->minute = tm.tm_min;
10389 req->second = tm.tm_sec;
10390 return hwrm_req_send(bp, req);
10391 }
10392
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10393 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10394 {
10395 u64 sw_tmp;
10396
10397 hw &= mask;
10398 sw_tmp = (*sw & ~mask) | hw;
10399 if (hw < (*sw & mask))
10400 sw_tmp += mask + 1;
10401 WRITE_ONCE(*sw, sw_tmp);
10402 }
10403
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10404 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10405 int count, bool ignore_zero)
10406 {
10407 int i;
10408
10409 for (i = 0; i < count; i++) {
10410 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10411
10412 if (ignore_zero && !hw)
10413 continue;
10414
10415 if (masks[i] == -1ULL)
10416 sw_stats[i] = hw;
10417 else
10418 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10419 }
10420 }
10421
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10422 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10423 {
10424 if (!stats->hw_stats)
10425 return;
10426
10427 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10428 stats->hw_masks, stats->len / 8, false);
10429 }
10430
bnxt_accumulate_all_stats(struct bnxt * bp)10431 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10432 {
10433 struct bnxt_stats_mem *ring0_stats;
10434 bool ignore_zero = false;
10435 int i;
10436
10437 /* Chip bug. Counter intermittently becomes 0. */
10438 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10439 ignore_zero = true;
10440
10441 for (i = 0; i < bp->cp_nr_rings; i++) {
10442 struct bnxt_napi *bnapi = bp->bnapi[i];
10443 struct bnxt_cp_ring_info *cpr;
10444 struct bnxt_stats_mem *stats;
10445
10446 cpr = &bnapi->cp_ring;
10447 stats = &cpr->stats;
10448 if (!i)
10449 ring0_stats = stats;
10450 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10451 ring0_stats->hw_masks,
10452 ring0_stats->len / 8, ignore_zero);
10453 }
10454 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10455 struct bnxt_stats_mem *stats = &bp->port_stats;
10456 __le64 *hw_stats = stats->hw_stats;
10457 u64 *sw_stats = stats->sw_stats;
10458 u64 *masks = stats->hw_masks;
10459 int cnt;
10460
10461 cnt = sizeof(struct rx_port_stats) / 8;
10462 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10463
10464 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10465 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10466 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10467 cnt = sizeof(struct tx_port_stats) / 8;
10468 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10469 }
10470 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10471 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10472 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10473 }
10474 }
10475
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10476 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10477 {
10478 struct hwrm_port_qstats_input *req;
10479 struct bnxt_pf_info *pf = &bp->pf;
10480 int rc;
10481
10482 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10483 return 0;
10484
10485 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10486 return -EOPNOTSUPP;
10487
10488 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10489 if (rc)
10490 return rc;
10491
10492 req->flags = flags;
10493 req->port_id = cpu_to_le16(pf->port_id);
10494 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10495 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10496 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10497 return hwrm_req_send(bp, req);
10498 }
10499
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10500 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10501 {
10502 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10503 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10504 struct hwrm_port_qstats_ext_output *resp_qs;
10505 struct hwrm_port_qstats_ext_input *req_qs;
10506 struct bnxt_pf_info *pf = &bp->pf;
10507 u32 tx_stat_size;
10508 int rc;
10509
10510 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10511 return 0;
10512
10513 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10514 return -EOPNOTSUPP;
10515
10516 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10517 if (rc)
10518 return rc;
10519
10520 req_qs->flags = flags;
10521 req_qs->port_id = cpu_to_le16(pf->port_id);
10522 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10523 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10524 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10525 sizeof(struct tx_port_stats_ext) : 0;
10526 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10527 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10528 resp_qs = hwrm_req_hold(bp, req_qs);
10529 rc = hwrm_req_send(bp, req_qs);
10530 if (!rc) {
10531 bp->fw_rx_stats_ext_size =
10532 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10533 if (BNXT_FW_MAJ(bp) < 220 &&
10534 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10535 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10536
10537 bp->fw_tx_stats_ext_size = tx_stat_size ?
10538 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10539 } else {
10540 bp->fw_rx_stats_ext_size = 0;
10541 bp->fw_tx_stats_ext_size = 0;
10542 }
10543 hwrm_req_drop(bp, req_qs);
10544
10545 if (flags)
10546 return rc;
10547
10548 if (bp->fw_tx_stats_ext_size <=
10549 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10550 bp->pri2cos_valid = 0;
10551 return rc;
10552 }
10553
10554 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10555 if (rc)
10556 return rc;
10557
10558 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10559
10560 resp_qc = hwrm_req_hold(bp, req_qc);
10561 rc = hwrm_req_send(bp, req_qc);
10562 if (!rc) {
10563 u8 *pri2cos;
10564 int i, j;
10565
10566 pri2cos = &resp_qc->pri0_cos_queue_id;
10567 for (i = 0; i < 8; i++) {
10568 u8 queue_id = pri2cos[i];
10569 u8 queue_idx;
10570
10571 /* Per port queue IDs start from 0, 10, 20, etc */
10572 queue_idx = queue_id % 10;
10573 if (queue_idx > BNXT_MAX_QUEUE) {
10574 bp->pri2cos_valid = false;
10575 hwrm_req_drop(bp, req_qc);
10576 return rc;
10577 }
10578 for (j = 0; j < bp->max_q; j++) {
10579 if (bp->q_ids[j] == queue_id)
10580 bp->pri2cos_idx[i] = queue_idx;
10581 }
10582 }
10583 bp->pri2cos_valid = true;
10584 }
10585 hwrm_req_drop(bp, req_qc);
10586
10587 return rc;
10588 }
10589
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10590 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10591 {
10592 bnxt_hwrm_tunnel_dst_port_free(bp,
10593 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10594 bnxt_hwrm_tunnel_dst_port_free(bp,
10595 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10596 }
10597
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10598 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10599 {
10600 int rc, i;
10601 u32 tpa_flags = 0;
10602
10603 if (set_tpa)
10604 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10605 else if (BNXT_NO_FW_ACCESS(bp))
10606 return 0;
10607 for (i = 0; i < bp->nr_vnics; i++) {
10608 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10609 if (rc) {
10610 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10611 i, rc);
10612 return rc;
10613 }
10614 }
10615 return 0;
10616 }
10617
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10618 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10619 {
10620 int i;
10621
10622 for (i = 0; i < bp->nr_vnics; i++)
10623 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10624 }
10625
bnxt_clear_vnic(struct bnxt * bp)10626 static void bnxt_clear_vnic(struct bnxt *bp)
10627 {
10628 if (!bp->vnic_info)
10629 return;
10630
10631 bnxt_hwrm_clear_vnic_filter(bp);
10632 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10633 /* clear all RSS setting before free vnic ctx */
10634 bnxt_hwrm_clear_vnic_rss(bp);
10635 bnxt_hwrm_vnic_ctx_free(bp);
10636 }
10637 /* before free the vnic, undo the vnic tpa settings */
10638 if (bp->flags & BNXT_FLAG_TPA)
10639 bnxt_set_tpa(bp, false);
10640 bnxt_hwrm_vnic_free(bp);
10641 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10642 bnxt_hwrm_vnic_ctx_free(bp);
10643 }
10644
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10645 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10646 bool irq_re_init)
10647 {
10648 bnxt_clear_vnic(bp);
10649 bnxt_hwrm_ring_free(bp, close_path);
10650 bnxt_hwrm_ring_grp_free(bp);
10651 if (irq_re_init) {
10652 bnxt_hwrm_stat_ctx_free(bp);
10653 bnxt_hwrm_free_tunnel_ports(bp);
10654 }
10655 }
10656
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10657 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10658 {
10659 struct hwrm_func_cfg_input *req;
10660 u8 evb_mode;
10661 int rc;
10662
10663 if (br_mode == BRIDGE_MODE_VEB)
10664 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10665 else if (br_mode == BRIDGE_MODE_VEPA)
10666 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10667 else
10668 return -EINVAL;
10669
10670 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10671 if (rc)
10672 return rc;
10673
10674 req->fid = cpu_to_le16(0xffff);
10675 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10676 req->evb_mode = evb_mode;
10677 return hwrm_req_send(bp, req);
10678 }
10679
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10680 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10681 {
10682 struct hwrm_func_cfg_input *req;
10683 int rc;
10684
10685 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10686 return 0;
10687
10688 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10689 if (rc)
10690 return rc;
10691
10692 req->fid = cpu_to_le16(0xffff);
10693 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10694 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10695 if (size == 128)
10696 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10697
10698 return hwrm_req_send(bp, req);
10699 }
10700
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10701 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10702 {
10703 int rc;
10704
10705 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10706 goto skip_rss_ctx;
10707
10708 /* allocate context for vnic */
10709 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10710 if (rc) {
10711 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10712 vnic->vnic_id, rc);
10713 goto vnic_setup_err;
10714 }
10715 bp->rsscos_nr_ctxs++;
10716
10717 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10718 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10719 if (rc) {
10720 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10721 vnic->vnic_id, rc);
10722 goto vnic_setup_err;
10723 }
10724 bp->rsscos_nr_ctxs++;
10725 }
10726
10727 skip_rss_ctx:
10728 /* configure default vnic, ring grp */
10729 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10730 if (rc) {
10731 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10732 vnic->vnic_id, rc);
10733 goto vnic_setup_err;
10734 }
10735
10736 /* Enable RSS hashing on vnic */
10737 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10738 if (rc) {
10739 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10740 vnic->vnic_id, rc);
10741 goto vnic_setup_err;
10742 }
10743
10744 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10745 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10746 if (rc) {
10747 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10748 vnic->vnic_id, rc);
10749 }
10750 }
10751
10752 vnic_setup_err:
10753 return rc;
10754 }
10755
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10756 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10757 u8 valid)
10758 {
10759 struct hwrm_vnic_update_input *req;
10760 int rc;
10761
10762 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10763 if (rc)
10764 return rc;
10765
10766 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10767
10768 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10769 req->mru = cpu_to_le16(vnic->mru);
10770
10771 req->enables = cpu_to_le32(valid);
10772
10773 return hwrm_req_send(bp, req);
10774 }
10775
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10776 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10777 {
10778 int rc;
10779
10780 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10781 if (rc) {
10782 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10783 vnic->vnic_id, rc);
10784 return rc;
10785 }
10786 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10787 if (rc)
10788 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10789 vnic->vnic_id, rc);
10790 return rc;
10791 }
10792
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10793 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10794 {
10795 int rc, i, nr_ctxs;
10796
10797 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10798 for (i = 0; i < nr_ctxs; i++) {
10799 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10800 if (rc) {
10801 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10802 vnic->vnic_id, i, rc);
10803 break;
10804 }
10805 bp->rsscos_nr_ctxs++;
10806 }
10807 if (i < nr_ctxs)
10808 return -ENOMEM;
10809
10810 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10811 if (rc)
10812 return rc;
10813
10814 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10815 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10816 if (rc) {
10817 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10818 vnic->vnic_id, rc);
10819 }
10820 }
10821 return rc;
10822 }
10823
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10824 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10825 {
10826 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10827 return __bnxt_setup_vnic_p5(bp, vnic);
10828 else
10829 return __bnxt_setup_vnic(bp, vnic);
10830 }
10831
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10832 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10833 struct bnxt_vnic_info *vnic,
10834 u16 start_rx_ring_idx, int rx_rings)
10835 {
10836 int rc;
10837
10838 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10839 if (rc) {
10840 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10841 vnic->vnic_id, rc);
10842 return rc;
10843 }
10844 return bnxt_setup_vnic(bp, vnic);
10845 }
10846
bnxt_alloc_rfs_vnics(struct bnxt * bp)10847 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10848 {
10849 struct bnxt_vnic_info *vnic;
10850 int i, rc = 0;
10851
10852 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10853 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10854 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10855 }
10856
10857 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10858 return 0;
10859
10860 for (i = 0; i < bp->rx_nr_rings; i++) {
10861 u16 vnic_id = i + 1;
10862 u16 ring_id = i;
10863
10864 if (vnic_id >= bp->nr_vnics)
10865 break;
10866
10867 vnic = &bp->vnic_info[vnic_id];
10868 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10869 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10870 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10871 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10872 break;
10873 }
10874 return rc;
10875 }
10876
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10877 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10878 bool all)
10879 {
10880 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10881 struct bnxt_filter_base *usr_fltr, *tmp;
10882 struct bnxt_ntuple_filter *ntp_fltr;
10883 int i;
10884
10885 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10886 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10887 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10888 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10889 }
10890 if (!all)
10891 return;
10892
10893 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10894 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10895 usr_fltr->fw_vnic_id == rss_ctx->index) {
10896 ntp_fltr = container_of(usr_fltr,
10897 struct bnxt_ntuple_filter,
10898 base);
10899 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10900 bnxt_del_ntp_filter(bp, ntp_fltr);
10901 bnxt_del_one_usr_fltr(bp, usr_fltr);
10902 }
10903 }
10904
10905 if (vnic->rss_table)
10906 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10907 vnic->rss_table,
10908 vnic->rss_table_dma_addr);
10909 bp->num_rss_ctx--;
10910 }
10911
bnxt_vnic_has_rx_ring(struct bnxt * bp,struct bnxt_vnic_info * vnic,int rxr_id)10912 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10913 int rxr_id)
10914 {
10915 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10916 int i, vnic_rx;
10917
10918 /* Ntuple VNIC always has all the rx rings. Any change of ring id
10919 * must be updated because a future filter may use it.
10920 */
10921 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10922 return true;
10923
10924 for (i = 0; i < tbl_size; i++) {
10925 if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10926 vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10927 else
10928 vnic_rx = bp->rss_indir_tbl[i];
10929
10930 if (rxr_id == vnic_rx)
10931 return true;
10932 }
10933
10934 return false;
10935 }
10936
bnxt_set_vnic_mru_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 mru,int rxr_id)10937 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10938 u16 mru, int rxr_id)
10939 {
10940 int rc;
10941
10942 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10943 return 0;
10944
10945 if (mru) {
10946 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10947 if (rc) {
10948 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10949 vnic->vnic_id, rc);
10950 return rc;
10951 }
10952 }
10953 vnic->mru = mru;
10954 bnxt_hwrm_vnic_update(bp, vnic,
10955 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10956
10957 return 0;
10958 }
10959
bnxt_set_rss_ctx_vnic_mru(struct bnxt * bp,u16 mru,int rxr_id)10960 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10961 {
10962 struct ethtool_rxfh_context *ctx;
10963 unsigned long context;
10964 int rc;
10965
10966 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10967 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10968 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10969
10970 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10971 if (rc)
10972 return rc;
10973 }
10974
10975 return 0;
10976 }
10977
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)10978 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10979 {
10980 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10981 struct ethtool_rxfh_context *ctx;
10982 unsigned long context;
10983
10984 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10985 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10986 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10987
10988 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10989 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10990 __bnxt_setup_vnic_p5(bp, vnic)) {
10991 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10992 rss_ctx->index);
10993 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10994 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10995 }
10996 }
10997 }
10998
bnxt_clear_rss_ctxs(struct bnxt * bp)10999 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
11000 {
11001 struct ethtool_rxfh_context *ctx;
11002 unsigned long context;
11003
11004 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11005 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11006
11007 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
11008 }
11009 }
11010
11011 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)11012 static bool bnxt_promisc_ok(struct bnxt *bp)
11013 {
11014 #ifdef CONFIG_BNXT_SRIOV
11015 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
11016 return false;
11017 #endif
11018 return true;
11019 }
11020
bnxt_setup_nitroa0_vnic(struct bnxt * bp)11021 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
11022 {
11023 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
11024 unsigned int rc = 0;
11025
11026 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
11027 if (rc) {
11028 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11029 rc);
11030 return rc;
11031 }
11032
11033 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
11034 if (rc) {
11035 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11036 rc);
11037 return rc;
11038 }
11039 return rc;
11040 }
11041
11042 static int bnxt_cfg_rx_mode(struct bnxt *);
11043 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
11044
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)11045 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
11046 {
11047 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
11048 int rc = 0;
11049 unsigned int rx_nr_rings = bp->rx_nr_rings;
11050
11051 if (irq_re_init) {
11052 rc = bnxt_hwrm_stat_ctx_alloc(bp);
11053 if (rc) {
11054 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
11055 rc);
11056 goto err_out;
11057 }
11058 }
11059
11060 rc = bnxt_hwrm_ring_alloc(bp);
11061 if (rc) {
11062 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
11063 goto err_out;
11064 }
11065
11066 rc = bnxt_hwrm_ring_grp_alloc(bp);
11067 if (rc) {
11068 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
11069 goto err_out;
11070 }
11071
11072 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11073 rx_nr_rings--;
11074
11075 /* default vnic 0 */
11076 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
11077 if (rc) {
11078 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
11079 goto err_out;
11080 }
11081
11082 if (BNXT_VF(bp))
11083 bnxt_hwrm_func_qcfg(bp);
11084
11085 rc = bnxt_setup_vnic(bp, vnic);
11086 if (rc)
11087 goto err_out;
11088 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11089 bnxt_hwrm_update_rss_hash_cfg(bp);
11090
11091 if (bp->flags & BNXT_FLAG_RFS) {
11092 rc = bnxt_alloc_rfs_vnics(bp);
11093 if (rc)
11094 goto err_out;
11095 }
11096
11097 if (bp->flags & BNXT_FLAG_TPA) {
11098 rc = bnxt_set_tpa(bp, true);
11099 if (rc)
11100 goto err_out;
11101 }
11102
11103 if (BNXT_VF(bp))
11104 bnxt_update_vf_mac(bp);
11105
11106 /* Filter for default vnic 0 */
11107 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11108 if (rc) {
11109 if (BNXT_VF(bp) && rc == -ENODEV)
11110 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11111 else
11112 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11113 goto err_out;
11114 }
11115 vnic->uc_filter_count = 1;
11116
11117 vnic->rx_mask = 0;
11118 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11119 goto skip_rx_mask;
11120
11121 if (bp->dev->flags & IFF_BROADCAST)
11122 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11123
11124 if (bp->dev->flags & IFF_PROMISC)
11125 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11126
11127 if (bp->dev->flags & IFF_ALLMULTI) {
11128 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11129 vnic->mc_list_count = 0;
11130 } else if (bp->dev->flags & IFF_MULTICAST) {
11131 u32 mask = 0;
11132
11133 bnxt_mc_list_updated(bp, &mask);
11134 vnic->rx_mask |= mask;
11135 }
11136
11137 rc = bnxt_cfg_rx_mode(bp);
11138 if (rc)
11139 goto err_out;
11140
11141 skip_rx_mask:
11142 rc = bnxt_hwrm_set_coal(bp);
11143 if (rc)
11144 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11145 rc);
11146
11147 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11148 rc = bnxt_setup_nitroa0_vnic(bp);
11149 if (rc)
11150 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11151 rc);
11152 }
11153
11154 if (BNXT_VF(bp)) {
11155 bnxt_hwrm_func_qcfg(bp);
11156 netdev_update_features(bp->dev);
11157 }
11158
11159 return 0;
11160
11161 err_out:
11162 bnxt_hwrm_resource_free(bp, 0, true);
11163
11164 return rc;
11165 }
11166
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)11167 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11168 {
11169 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11170 return 0;
11171 }
11172
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)11173 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11174 {
11175 bnxt_init_cp_rings(bp);
11176 bnxt_init_rx_rings(bp);
11177 bnxt_init_tx_rings(bp);
11178 bnxt_init_ring_grps(bp, irq_re_init);
11179 bnxt_init_vnics(bp);
11180
11181 return bnxt_init_chip(bp, irq_re_init);
11182 }
11183
bnxt_set_real_num_queues(struct bnxt * bp)11184 static int bnxt_set_real_num_queues(struct bnxt *bp)
11185 {
11186 int rc;
11187 struct net_device *dev = bp->dev;
11188
11189 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11190 bp->tx_nr_rings_xdp);
11191 if (rc)
11192 return rc;
11193
11194 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11195 if (rc)
11196 return rc;
11197
11198 #ifdef CONFIG_RFS_ACCEL
11199 if (bp->flags & BNXT_FLAG_RFS)
11200 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11201 #endif
11202
11203 return rc;
11204 }
11205
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)11206 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11207 bool shared)
11208 {
11209 int _rx = *rx, _tx = *tx;
11210
11211 if (shared) {
11212 *rx = min_t(int, _rx, max);
11213 *tx = min_t(int, _tx, max);
11214 } else {
11215 if (max < 2)
11216 return -ENOMEM;
11217
11218 while (_rx + _tx > max) {
11219 if (_rx > _tx && _rx > 1)
11220 _rx--;
11221 else if (_tx > 1)
11222 _tx--;
11223 }
11224 *rx = _rx;
11225 *tx = _tx;
11226 }
11227 return 0;
11228 }
11229
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)11230 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11231 {
11232 return (tx - tx_xdp) / tx_sets + tx_xdp;
11233 }
11234
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)11235 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11236 {
11237 int tcs = bp->num_tc;
11238
11239 if (!tcs)
11240 tcs = 1;
11241 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11242 }
11243
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)11244 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11245 {
11246 int tcs = bp->num_tc;
11247
11248 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11249 bp->tx_nr_rings_xdp;
11250 }
11251
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)11252 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11253 bool sh)
11254 {
11255 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11256
11257 if (tx_cp != *tx) {
11258 int tx_saved = tx_cp, rc;
11259
11260 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11261 if (rc)
11262 return rc;
11263 if (tx_cp != tx_saved)
11264 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
11265 return 0;
11266 }
11267 return __bnxt_trim_rings(bp, rx, tx, max, sh);
11268 }
11269
bnxt_setup_msix(struct bnxt * bp)11270 static void bnxt_setup_msix(struct bnxt *bp)
11271 {
11272 const int len = sizeof(bp->irq_tbl[0].name);
11273 struct net_device *dev = bp->dev;
11274 int tcs, i;
11275
11276 tcs = bp->num_tc;
11277 if (tcs) {
11278 int i, off, count;
11279
11280 for (i = 0; i < tcs; i++) {
11281 count = bp->tx_nr_rings_per_tc;
11282 off = BNXT_TC_TO_RING_BASE(bp, i);
11283 netdev_set_tc_queue(dev, i, count, off);
11284 }
11285 }
11286
11287 for (i = 0; i < bp->cp_nr_rings; i++) {
11288 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11289 char *attr;
11290
11291 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11292 attr = "TxRx";
11293 else if (i < bp->rx_nr_rings)
11294 attr = "rx";
11295 else
11296 attr = "tx";
11297
11298 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11299 attr, i);
11300 bp->irq_tbl[map_idx].handler = bnxt_msix;
11301 }
11302 }
11303
11304 static int bnxt_init_int_mode(struct bnxt *bp);
11305
bnxt_change_msix(struct bnxt * bp,int total)11306 static int bnxt_change_msix(struct bnxt *bp, int total)
11307 {
11308 struct msi_map map;
11309 int i;
11310
11311 /* add MSIX to the end if needed */
11312 for (i = bp->total_irqs; i < total; i++) {
11313 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11314 if (map.index < 0)
11315 return bp->total_irqs;
11316 bp->irq_tbl[i].vector = map.virq;
11317 bp->total_irqs++;
11318 }
11319
11320 /* trim MSIX from the end if needed */
11321 for (i = bp->total_irqs; i > total; i--) {
11322 map.index = i - 1;
11323 map.virq = bp->irq_tbl[i - 1].vector;
11324 pci_msix_free_irq(bp->pdev, map);
11325 bp->total_irqs--;
11326 }
11327 return bp->total_irqs;
11328 }
11329
bnxt_setup_int_mode(struct bnxt * bp)11330 static int bnxt_setup_int_mode(struct bnxt *bp)
11331 {
11332 int rc;
11333
11334 if (!bp->irq_tbl) {
11335 rc = bnxt_init_int_mode(bp);
11336 if (rc || !bp->irq_tbl)
11337 return rc ?: -ENODEV;
11338 }
11339
11340 bnxt_setup_msix(bp);
11341
11342 rc = bnxt_set_real_num_queues(bp);
11343 return rc;
11344 }
11345
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11346 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11347 {
11348 return bp->hw_resc.max_rsscos_ctxs;
11349 }
11350
bnxt_get_max_func_vnics(struct bnxt * bp)11351 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11352 {
11353 return bp->hw_resc.max_vnics;
11354 }
11355
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11356 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11357 {
11358 return bp->hw_resc.max_stat_ctxs;
11359 }
11360
bnxt_get_max_func_cp_rings(struct bnxt * bp)11361 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11362 {
11363 return bp->hw_resc.max_cp_rings;
11364 }
11365
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11366 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11367 {
11368 unsigned int cp = bp->hw_resc.max_cp_rings;
11369
11370 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11371 cp -= bnxt_get_ulp_msix_num(bp);
11372
11373 return cp;
11374 }
11375
bnxt_get_max_func_irqs(struct bnxt * bp)11376 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11377 {
11378 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11379
11380 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11381 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11382
11383 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11384 }
11385
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11386 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11387 {
11388 bp->hw_resc.max_irqs = max_irqs;
11389 }
11390
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11391 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11392 {
11393 unsigned int cp;
11394
11395 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11396 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11397 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11398 else
11399 return cp - bp->cp_nr_rings;
11400 }
11401
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11402 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11403 {
11404 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11405 }
11406
bnxt_get_avail_msix(struct bnxt * bp,int num)11407 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11408 {
11409 int max_irq = bnxt_get_max_func_irqs(bp);
11410 int total_req = bp->cp_nr_rings + num;
11411
11412 if (max_irq < total_req) {
11413 num = max_irq - bp->cp_nr_rings;
11414 if (num <= 0)
11415 return 0;
11416 }
11417 return num;
11418 }
11419
bnxt_get_num_msix(struct bnxt * bp)11420 static int bnxt_get_num_msix(struct bnxt *bp)
11421 {
11422 if (!BNXT_NEW_RM(bp))
11423 return bnxt_get_max_func_irqs(bp);
11424
11425 return bnxt_nq_rings_in_use(bp);
11426 }
11427
bnxt_init_int_mode(struct bnxt * bp)11428 static int bnxt_init_int_mode(struct bnxt *bp)
11429 {
11430 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11431
11432 total_vecs = bnxt_get_num_msix(bp);
11433 max = bnxt_get_max_func_irqs(bp);
11434 if (total_vecs > max)
11435 total_vecs = max;
11436
11437 if (!total_vecs)
11438 return 0;
11439
11440 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11441 min = 2;
11442
11443 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11444 PCI_IRQ_MSIX);
11445 ulp_msix = bnxt_get_ulp_msix_num(bp);
11446 if (total_vecs < 0 || total_vecs < ulp_msix) {
11447 rc = -ENODEV;
11448 goto msix_setup_exit;
11449 }
11450
11451 tbl_size = total_vecs;
11452 if (pci_msix_can_alloc_dyn(bp->pdev))
11453 tbl_size = max;
11454 bp->irq_tbl = kzalloc_objs(*bp->irq_tbl, tbl_size);
11455 if (bp->irq_tbl) {
11456 for (i = 0; i < total_vecs; i++)
11457 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11458
11459 bp->total_irqs = total_vecs;
11460 /* Trim rings based upon num of vectors allocated */
11461 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11462 total_vecs - ulp_msix, min == 1);
11463 if (rc)
11464 goto msix_setup_exit;
11465
11466 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11467 bp->cp_nr_rings = (min == 1) ?
11468 max_t(int, tx_cp, bp->rx_nr_rings) :
11469 tx_cp + bp->rx_nr_rings;
11470
11471 } else {
11472 rc = -ENOMEM;
11473 goto msix_setup_exit;
11474 }
11475 return 0;
11476
11477 msix_setup_exit:
11478 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11479 kfree(bp->irq_tbl);
11480 bp->irq_tbl = NULL;
11481 pci_free_irq_vectors(bp->pdev);
11482 return rc;
11483 }
11484
bnxt_clear_int_mode(struct bnxt * bp)11485 static void bnxt_clear_int_mode(struct bnxt *bp)
11486 {
11487 pci_free_irq_vectors(bp->pdev);
11488
11489 kfree(bp->irq_tbl);
11490 bp->irq_tbl = NULL;
11491 }
11492
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11493 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11494 {
11495 bool irq_cleared = false;
11496 bool irq_change = false;
11497 int tcs = bp->num_tc;
11498 int irqs_required;
11499 int rc;
11500
11501 if (!bnxt_need_reserve_rings(bp))
11502 return 0;
11503
11504 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11505 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11506
11507 if (ulp_msix > bp->ulp_num_msix_want)
11508 ulp_msix = bp->ulp_num_msix_want;
11509 irqs_required = ulp_msix + bp->cp_nr_rings;
11510 } else {
11511 irqs_required = bnxt_get_num_msix(bp);
11512 }
11513
11514 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11515 irq_change = true;
11516 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11517 bnxt_ulp_irq_stop(bp);
11518 bnxt_clear_int_mode(bp);
11519 irq_cleared = true;
11520 }
11521 }
11522 rc = __bnxt_reserve_rings(bp);
11523 if (irq_cleared) {
11524 if (!rc)
11525 rc = bnxt_init_int_mode(bp);
11526 bnxt_ulp_irq_restart(bp, rc);
11527 } else if (irq_change && !rc) {
11528 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11529 rc = -ENOSPC;
11530 }
11531 if (rc) {
11532 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11533 return rc;
11534 }
11535 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11536 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11537 netdev_err(bp->dev, "tx ring reservation failure\n");
11538 netdev_reset_tc(bp->dev);
11539 bp->num_tc = 0;
11540 if (bp->tx_nr_rings_xdp)
11541 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11542 else
11543 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11544 return -ENOMEM;
11545 }
11546 return 0;
11547 }
11548
bnxt_tx_queue_stop(struct bnxt * bp,int idx)11549 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11550 {
11551 struct bnxt_tx_ring_info *txr;
11552 struct netdev_queue *txq;
11553 struct bnxt_napi *bnapi;
11554 int i;
11555
11556 bnapi = bp->bnapi[idx];
11557 bnxt_for_each_napi_tx(i, bnapi, txr) {
11558 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11559 synchronize_net();
11560
11561 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11562 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11563 if (txq) {
11564 __netif_tx_lock_bh(txq);
11565 netif_tx_stop_queue(txq);
11566 __netif_tx_unlock_bh(txq);
11567 }
11568 }
11569
11570 if (!bp->tph_mode)
11571 continue;
11572
11573 bnxt_hwrm_tx_ring_free(bp, txr, true);
11574 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11575 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11576 bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11577 }
11578 }
11579
bnxt_tx_queue_start(struct bnxt * bp,int idx)11580 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11581 {
11582 struct bnxt_tx_ring_info *txr;
11583 struct netdev_queue *txq;
11584 struct bnxt_napi *bnapi;
11585 int rc, i;
11586
11587 bnapi = bp->bnapi[idx];
11588 /* All rings have been reserved and previously allocated.
11589 * Reallocating with the same parameters should never fail.
11590 */
11591 bnxt_for_each_napi_tx(i, bnapi, txr) {
11592 if (!bp->tph_mode)
11593 goto start_tx;
11594
11595 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11596 if (rc)
11597 return rc;
11598
11599 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11600 if (rc)
11601 return rc;
11602
11603 txr->tx_prod = 0;
11604 txr->tx_cons = 0;
11605 txr->tx_hw_cons = 0;
11606 start_tx:
11607 WRITE_ONCE(txr->dev_state, 0);
11608 synchronize_net();
11609
11610 if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11611 continue;
11612
11613 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11614 if (txq)
11615 netif_tx_start_queue(txq);
11616 }
11617
11618 return 0;
11619 }
11620
bnxt_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)11621 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11622 const cpumask_t *mask)
11623 {
11624 struct bnxt_irq *irq;
11625 u16 tag;
11626 int err;
11627
11628 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11629
11630 if (!irq->bp->tph_mode)
11631 return;
11632
11633 cpumask_copy(irq->cpu_mask, mask);
11634
11635 if (irq->ring_nr >= irq->bp->rx_nr_rings)
11636 return;
11637
11638 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11639 cpumask_first(irq->cpu_mask), &tag))
11640 return;
11641
11642 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11643 return;
11644
11645 netdev_lock(irq->bp->dev);
11646 if (netif_running(irq->bp->dev)) {
11647 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11648 if (err)
11649 netdev_err(irq->bp->dev,
11650 "RX queue restart failed: err=%d\n", err);
11651 }
11652 netdev_unlock(irq->bp->dev);
11653 }
11654
bnxt_irq_affinity_release(struct kref * ref)11655 static void bnxt_irq_affinity_release(struct kref *ref)
11656 {
11657 struct irq_affinity_notify *notify =
11658 container_of(ref, struct irq_affinity_notify, kref);
11659 struct bnxt_irq *irq;
11660
11661 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11662
11663 if (!irq->bp->tph_mode)
11664 return;
11665
11666 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11667 netdev_err(irq->bp->dev,
11668 "Setting ST=0 for MSIX entry %d failed\n",
11669 irq->msix_nr);
11670 return;
11671 }
11672 }
11673
bnxt_release_irq_notifier(struct bnxt_irq * irq)11674 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11675 {
11676 irq_set_affinity_notifier(irq->vector, NULL);
11677 }
11678
bnxt_register_irq_notifier(struct bnxt * bp,struct bnxt_irq * irq)11679 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11680 {
11681 struct irq_affinity_notify *notify;
11682
11683 irq->bp = bp;
11684
11685 /* Nothing to do if TPH is not enabled */
11686 if (!bp->tph_mode)
11687 return;
11688
11689 /* Register IRQ affinity notifier */
11690 notify = &irq->affinity_notify;
11691 notify->irq = irq->vector;
11692 notify->notify = bnxt_irq_affinity_notify;
11693 notify->release = bnxt_irq_affinity_release;
11694
11695 irq_set_affinity_notifier(irq->vector, notify);
11696 }
11697
bnxt_free_irq(struct bnxt * bp)11698 static void bnxt_free_irq(struct bnxt *bp)
11699 {
11700 struct bnxt_irq *irq;
11701 int i;
11702
11703 #ifdef CONFIG_RFS_ACCEL
11704 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11705 bp->dev->rx_cpu_rmap = NULL;
11706 #endif
11707 if (!bp->irq_tbl || !bp->bnapi)
11708 return;
11709
11710 for (i = 0; i < bp->cp_nr_rings; i++) {
11711 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11712
11713 irq = &bp->irq_tbl[map_idx];
11714 if (irq->requested) {
11715 if (irq->have_cpumask) {
11716 irq_update_affinity_hint(irq->vector, NULL);
11717 free_cpumask_var(irq->cpu_mask);
11718 irq->have_cpumask = 0;
11719 }
11720
11721 bnxt_release_irq_notifier(irq);
11722
11723 free_irq(irq->vector, bp->bnapi[i]);
11724 }
11725
11726 irq->requested = 0;
11727 }
11728
11729 /* Disable TPH support */
11730 pcie_disable_tph(bp->pdev);
11731 bp->tph_mode = 0;
11732 }
11733
bnxt_request_irq(struct bnxt * bp)11734 static int bnxt_request_irq(struct bnxt *bp)
11735 {
11736 struct cpu_rmap *rmap = NULL;
11737 int i, j, rc = 0;
11738 unsigned long flags = 0;
11739
11740 rc = bnxt_setup_int_mode(bp);
11741 if (rc) {
11742 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11743 rc);
11744 return rc;
11745 }
11746 #ifdef CONFIG_RFS_ACCEL
11747 rmap = bp->dev->rx_cpu_rmap;
11748 #endif
11749
11750 /* Enable TPH support as part of IRQ request */
11751 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11752 if (!rc)
11753 bp->tph_mode = PCI_TPH_ST_IV_MODE;
11754
11755 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11756 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11757 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11758
11759 if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11760 rmap && bp->bnapi[i]->rx_ring) {
11761 rc = irq_cpu_rmap_add(rmap, irq->vector);
11762 if (rc)
11763 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11764 j);
11765 j++;
11766 }
11767
11768 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11769 bp->bnapi[i]);
11770 if (rc)
11771 break;
11772
11773 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11774 irq->requested = 1;
11775
11776 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11777 int numa_node = dev_to_node(&bp->pdev->dev);
11778 u16 tag;
11779
11780 irq->have_cpumask = 1;
11781 irq->msix_nr = map_idx;
11782 irq->ring_nr = i;
11783 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11784 irq->cpu_mask);
11785 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11786 if (rc) {
11787 netdev_warn(bp->dev,
11788 "Update affinity hint failed, IRQ = %d\n",
11789 irq->vector);
11790 break;
11791 }
11792
11793 bnxt_register_irq_notifier(bp, irq);
11794
11795 /* Init ST table entry */
11796 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11797 cpumask_first(irq->cpu_mask),
11798 &tag))
11799 continue;
11800
11801 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11802 }
11803 }
11804 return rc;
11805 }
11806
bnxt_del_napi(struct bnxt * bp)11807 static void bnxt_del_napi(struct bnxt *bp)
11808 {
11809 int i;
11810
11811 if (!bp->bnapi)
11812 return;
11813
11814 for (i = 0; i < bp->rx_nr_rings; i++)
11815 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11816 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11817 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11818
11819 for (i = 0; i < bp->cp_nr_rings; i++) {
11820 struct bnxt_napi *bnapi = bp->bnapi[i];
11821
11822 __netif_napi_del_locked(&bnapi->napi);
11823 }
11824 /* We called __netif_napi_del_locked(), we need
11825 * to respect an RCU grace period before freeing napi structures.
11826 */
11827 synchronize_net();
11828 }
11829
bnxt_init_napi(struct bnxt * bp)11830 static void bnxt_init_napi(struct bnxt *bp)
11831 {
11832 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11833 unsigned int cp_nr_rings = bp->cp_nr_rings;
11834 struct bnxt_napi *bnapi;
11835 int i;
11836
11837 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11838 poll_fn = bnxt_poll_p5;
11839 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11840 cp_nr_rings--;
11841
11842 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11843
11844 for (i = 0; i < cp_nr_rings; i++) {
11845 bnapi = bp->bnapi[i];
11846 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11847 bnapi->index);
11848 }
11849 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11850 bnapi = bp->bnapi[cp_nr_rings];
11851 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11852 }
11853 }
11854
bnxt_disable_napi(struct bnxt * bp)11855 static void bnxt_disable_napi(struct bnxt *bp)
11856 {
11857 int i;
11858
11859 if (!bp->bnapi ||
11860 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11861 return;
11862
11863 for (i = 0; i < bp->cp_nr_rings; i++) {
11864 struct bnxt_napi *bnapi = bp->bnapi[i];
11865 struct bnxt_cp_ring_info *cpr;
11866
11867 cpr = &bnapi->cp_ring;
11868 if (bnapi->tx_fault)
11869 cpr->sw_stats->tx.tx_resets++;
11870 if (bnapi->in_reset)
11871 cpr->sw_stats->rx.rx_resets++;
11872 napi_disable_locked(&bnapi->napi);
11873 }
11874 }
11875
bnxt_enable_napi(struct bnxt * bp)11876 static void bnxt_enable_napi(struct bnxt *bp)
11877 {
11878 int i;
11879
11880 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11881 for (i = 0; i < bp->cp_nr_rings; i++) {
11882 struct bnxt_napi *bnapi = bp->bnapi[i];
11883 struct bnxt_cp_ring_info *cpr;
11884
11885 bnapi->tx_fault = 0;
11886
11887 cpr = &bnapi->cp_ring;
11888 bnapi->in_reset = false;
11889
11890 if (bnapi->rx_ring) {
11891 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11892 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11893 }
11894 napi_enable_locked(&bnapi->napi);
11895 }
11896 }
11897
bnxt_tx_disable(struct bnxt * bp)11898 void bnxt_tx_disable(struct bnxt *bp)
11899 {
11900 int i;
11901 struct bnxt_tx_ring_info *txr;
11902
11903 if (bp->tx_ring) {
11904 for (i = 0; i < bp->tx_nr_rings; i++) {
11905 txr = &bp->tx_ring[i];
11906 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11907 }
11908 }
11909 /* Make sure napi polls see @dev_state change */
11910 synchronize_net();
11911 /* Drop carrier first to prevent TX timeout */
11912 netif_carrier_off(bp->dev);
11913 /* Stop all TX queues */
11914 netif_tx_disable(bp->dev);
11915 }
11916
bnxt_tx_enable(struct bnxt * bp)11917 void bnxt_tx_enable(struct bnxt *bp)
11918 {
11919 int i;
11920 struct bnxt_tx_ring_info *txr;
11921
11922 for (i = 0; i < bp->tx_nr_rings; i++) {
11923 txr = &bp->tx_ring[i];
11924 WRITE_ONCE(txr->dev_state, 0);
11925 }
11926 /* Make sure napi polls see @dev_state change */
11927 synchronize_net();
11928 netif_tx_wake_all_queues(bp->dev);
11929 if (BNXT_LINK_IS_UP(bp))
11930 netif_carrier_on(bp->dev);
11931 }
11932
bnxt_report_fec(struct bnxt_link_info * link_info)11933 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11934 {
11935 u8 active_fec = link_info->active_fec_sig_mode &
11936 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11937
11938 switch (active_fec) {
11939 default:
11940 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11941 return "None";
11942 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11943 return "Clause 74 BaseR";
11944 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11945 return "Clause 91 RS(528,514)";
11946 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11947 return "Clause 91 RS544_1XN";
11948 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11949 return "Clause 91 RS(544,514)";
11950 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11951 return "Clause 91 RS272_1XN";
11952 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11953 return "Clause 91 RS(272,257)";
11954 }
11955 }
11956
bnxt_link_down_reason(struct bnxt_link_info * link_info)11957 static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
11958 {
11959 u8 reason = link_info->link_down_reason;
11960
11961 /* Multiple bits can be set, we report 1 bit only in order of
11962 * priority.
11963 */
11964 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
11965 return "(Remote fault)";
11966 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
11967 return "(OTP Speed limit violation)";
11968 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
11969 return "(Cable removed)";
11970 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
11971 return "(Module fault)";
11972 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
11973 return "(BMC request down)";
11974 return "";
11975 }
11976
bnxt_report_link(struct bnxt * bp)11977 void bnxt_report_link(struct bnxt *bp)
11978 {
11979 if (BNXT_LINK_IS_UP(bp)) {
11980 const char *signal = "";
11981 const char *flow_ctrl;
11982 const char *duplex;
11983 u32 speed;
11984 u16 fec;
11985
11986 netif_carrier_on(bp->dev);
11987 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11988 if (speed == SPEED_UNKNOWN) {
11989 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11990 return;
11991 }
11992 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11993 duplex = "full";
11994 else
11995 duplex = "half";
11996 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11997 flow_ctrl = "ON - receive & transmit";
11998 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11999 flow_ctrl = "ON - transmit";
12000 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
12001 flow_ctrl = "ON - receive";
12002 else
12003 flow_ctrl = "none";
12004 if (bp->link_info.phy_qcfg_resp.option_flags &
12005 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
12006 u8 sig_mode = bp->link_info.active_fec_sig_mode &
12007 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
12008 switch (sig_mode) {
12009 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
12010 signal = "(NRZ) ";
12011 break;
12012 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
12013 signal = "(PAM4 56Gbps) ";
12014 break;
12015 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
12016 signal = "(PAM4 112Gbps) ";
12017 break;
12018 default:
12019 break;
12020 }
12021 }
12022 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
12023 speed, signal, duplex, flow_ctrl);
12024 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
12025 netdev_info(bp->dev, "EEE is %s\n",
12026 bp->eee.eee_active ? "active" :
12027 "not active");
12028 fec = bp->link_info.fec_cfg;
12029 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
12030 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
12031 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
12032 bnxt_report_fec(&bp->link_info));
12033 } else {
12034 char *str = bnxt_link_down_reason(&bp->link_info);
12035
12036 netif_carrier_off(bp->dev);
12037 netdev_err(bp->dev, "NIC Link is Down %s\n", str);
12038 }
12039 }
12040
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)12041 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
12042 {
12043 if (!resp->supported_speeds_auto_mode &&
12044 !resp->supported_speeds_force_mode &&
12045 !resp->supported_pam4_speeds_auto_mode &&
12046 !resp->supported_pam4_speeds_force_mode &&
12047 !resp->supported_speeds2_auto_mode &&
12048 !resp->supported_speeds2_force_mode)
12049 return true;
12050 return false;
12051 }
12052
bnxt_hwrm_phy_qcaps(struct bnxt * bp)12053 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
12054 {
12055 struct bnxt_link_info *link_info = &bp->link_info;
12056 struct hwrm_port_phy_qcaps_output *resp;
12057 struct hwrm_port_phy_qcaps_input *req;
12058 int rc = 0;
12059
12060 if (bp->hwrm_spec_code < 0x10201)
12061 return 0;
12062
12063 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
12064 if (rc)
12065 return rc;
12066
12067 resp = hwrm_req_hold(bp, req);
12068 rc = hwrm_req_send(bp, req);
12069 if (rc)
12070 goto hwrm_phy_qcaps_exit;
12071
12072 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
12073 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
12074 struct ethtool_keee *eee = &bp->eee;
12075 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
12076
12077 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
12078 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
12079 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
12080 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
12081 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
12082 }
12083
12084 if (bp->hwrm_spec_code >= 0x10a01) {
12085 if (bnxt_phy_qcaps_no_speed(resp)) {
12086 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
12087 netdev_warn(bp->dev, "Ethernet link disabled\n");
12088 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
12089 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
12090 netdev_info(bp->dev, "Ethernet link enabled\n");
12091 /* Phy re-enabled, reprobe the speeds */
12092 link_info->support_auto_speeds = 0;
12093 link_info->support_pam4_auto_speeds = 0;
12094 link_info->support_auto_speeds2 = 0;
12095 }
12096 }
12097 if (resp->supported_speeds_auto_mode)
12098 link_info->support_auto_speeds =
12099 le16_to_cpu(resp->supported_speeds_auto_mode);
12100 if (resp->supported_pam4_speeds_auto_mode)
12101 link_info->support_pam4_auto_speeds =
12102 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12103 if (resp->supported_speeds2_auto_mode)
12104 link_info->support_auto_speeds2 =
12105 le16_to_cpu(resp->supported_speeds2_auto_mode);
12106
12107 bp->port_count = resp->port_cnt;
12108
12109 hwrm_phy_qcaps_exit:
12110 hwrm_req_drop(bp, req);
12111 return rc;
12112 }
12113
bnxt_hwrm_mac_qcaps(struct bnxt * bp)12114 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12115 {
12116 struct hwrm_port_mac_qcaps_output *resp;
12117 struct hwrm_port_mac_qcaps_input *req;
12118 int rc;
12119
12120 if (bp->hwrm_spec_code < 0x10a03)
12121 return;
12122
12123 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12124 if (rc)
12125 return;
12126
12127 resp = hwrm_req_hold(bp, req);
12128 rc = hwrm_req_send_silent(bp, req);
12129 if (!rc)
12130 bp->mac_flags = resp->flags;
12131 hwrm_req_drop(bp, req);
12132 }
12133
bnxt_support_dropped(u16 advertising,u16 supported)12134 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12135 {
12136 u16 diff = advertising ^ supported;
12137
12138 return ((supported | diff) != supported);
12139 }
12140
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)12141 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12142 {
12143 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12144
12145 /* Check if any advertised speeds are no longer supported. The caller
12146 * holds the link_lock mutex, so we can modify link_info settings.
12147 */
12148 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12149 if (bnxt_support_dropped(link_info->advertising,
12150 link_info->support_auto_speeds2)) {
12151 link_info->advertising = link_info->support_auto_speeds2;
12152 return true;
12153 }
12154 return false;
12155 }
12156 if (bnxt_support_dropped(link_info->advertising,
12157 link_info->support_auto_speeds)) {
12158 link_info->advertising = link_info->support_auto_speeds;
12159 return true;
12160 }
12161 if (bnxt_support_dropped(link_info->advertising_pam4,
12162 link_info->support_pam4_auto_speeds)) {
12163 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12164 return true;
12165 }
12166 return false;
12167 }
12168
bnxt_update_link(struct bnxt * bp,bool chng_link_state)12169 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12170 {
12171 struct bnxt_link_info *link_info = &bp->link_info;
12172 struct hwrm_port_phy_qcfg_output *resp;
12173 struct hwrm_port_phy_qcfg_input *req;
12174 u8 link_state = link_info->link_state;
12175 bool support_changed;
12176 int rc;
12177
12178 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12179 if (rc)
12180 return rc;
12181
12182 resp = hwrm_req_hold(bp, req);
12183 rc = hwrm_req_send(bp, req);
12184 if (rc) {
12185 hwrm_req_drop(bp, req);
12186 if (BNXT_VF(bp) && rc == -ENODEV) {
12187 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12188 rc = 0;
12189 }
12190 return rc;
12191 }
12192
12193 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12194 link_info->phy_link_status = resp->link;
12195 link_info->duplex = resp->duplex_cfg;
12196 if (bp->hwrm_spec_code >= 0x10800)
12197 link_info->duplex = resp->duplex_state;
12198 link_info->pause = resp->pause;
12199 link_info->auto_mode = resp->auto_mode;
12200 link_info->auto_pause_setting = resp->auto_pause;
12201 link_info->lp_pause = resp->link_partner_adv_pause;
12202 link_info->force_pause_setting = resp->force_pause;
12203 link_info->duplex_setting = resp->duplex_cfg;
12204 if (link_info->phy_link_status == BNXT_LINK_LINK) {
12205 link_info->link_speed = le16_to_cpu(resp->link_speed);
12206 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12207 link_info->active_lanes = resp->active_lanes;
12208 } else {
12209 link_info->link_speed = 0;
12210 link_info->active_lanes = 0;
12211 }
12212 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12213 link_info->force_pam4_link_speed =
12214 le16_to_cpu(resp->force_pam4_link_speed);
12215 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12216 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12217 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12218 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12219 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12220 link_info->auto_pam4_link_speeds =
12221 le16_to_cpu(resp->auto_pam4_link_speed_mask);
12222 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12223 link_info->lp_auto_link_speeds =
12224 le16_to_cpu(resp->link_partner_adv_speeds);
12225 link_info->lp_auto_pam4_link_speeds =
12226 resp->link_partner_pam4_adv_speeds;
12227 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12228 link_info->phy_ver[0] = resp->phy_maj;
12229 link_info->phy_ver[1] = resp->phy_min;
12230 link_info->phy_ver[2] = resp->phy_bld;
12231 link_info->media_type = resp->media_type;
12232 link_info->phy_type = resp->phy_type;
12233 link_info->transceiver = resp->xcvr_pkg_type;
12234 link_info->phy_addr = resp->eee_config_phy_addr &
12235 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12236 link_info->module_status = resp->module_status;
12237 link_info->link_down_reason = resp->link_down_reason;
12238
12239 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12240 struct ethtool_keee *eee = &bp->eee;
12241 u16 fw_speeds;
12242
12243 eee->eee_active = 0;
12244 if (resp->eee_config_phy_addr &
12245 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12246 eee->eee_active = 1;
12247 fw_speeds = le16_to_cpu(
12248 resp->link_partner_adv_eee_link_speed_mask);
12249 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12250 }
12251
12252 /* Pull initial EEE config */
12253 if (!chng_link_state) {
12254 if (resp->eee_config_phy_addr &
12255 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12256 eee->eee_enabled = 1;
12257
12258 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12259 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12260
12261 if (resp->eee_config_phy_addr &
12262 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12263 __le32 tmr;
12264
12265 eee->tx_lpi_enabled = 1;
12266 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12267 eee->tx_lpi_timer = le32_to_cpu(tmr) &
12268 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12269 }
12270 }
12271 }
12272
12273 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12274 if (bp->hwrm_spec_code >= 0x10504) {
12275 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12276 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12277 }
12278 /* TODO: need to add more logic to report VF link */
12279 if (chng_link_state) {
12280 if (link_info->phy_link_status == BNXT_LINK_LINK)
12281 link_info->link_state = BNXT_LINK_STATE_UP;
12282 else
12283 link_info->link_state = BNXT_LINK_STATE_DOWN;
12284 if (link_state != link_info->link_state)
12285 bnxt_report_link(bp);
12286 } else {
12287 /* always link down if not require to update link state */
12288 link_info->link_state = BNXT_LINK_STATE_DOWN;
12289 }
12290 hwrm_req_drop(bp, req);
12291
12292 if (!BNXT_PHY_CFG_ABLE(bp))
12293 return 0;
12294
12295 support_changed = bnxt_support_speed_dropped(link_info);
12296 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12297 bnxt_hwrm_set_link_setting(bp, true, false);
12298 return 0;
12299 }
12300
bnxt_get_port_module_status(struct bnxt * bp)12301 static void bnxt_get_port_module_status(struct bnxt *bp)
12302 {
12303 struct bnxt_link_info *link_info = &bp->link_info;
12304 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12305 u8 module_status;
12306
12307 if (bnxt_update_link(bp, true))
12308 return;
12309
12310 module_status = link_info->module_status;
12311 switch (module_status) {
12312 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12313 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12314 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12315 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12316 bp->pf.port_id);
12317 if (bp->hwrm_spec_code >= 0x10201) {
12318 netdev_warn(bp->dev, "Module part number %s\n",
12319 resp->phy_vendor_partnumber);
12320 }
12321 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12322 netdev_warn(bp->dev, "TX is disabled\n");
12323 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12324 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12325 }
12326 }
12327
12328 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12329 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12330 {
12331 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12332 if (bp->hwrm_spec_code >= 0x10201)
12333 req->auto_pause =
12334 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12335 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12336 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12337 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12338 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12339 req->enables |=
12340 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12341 } else {
12342 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12343 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12344 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12345 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12346 req->enables |=
12347 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12348 if (bp->hwrm_spec_code >= 0x10201) {
12349 req->auto_pause = req->force_pause;
12350 req->enables |= cpu_to_le32(
12351 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12352 }
12353 }
12354 }
12355
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12356 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12357 {
12358 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12359 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12360 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12361 req->enables |=
12362 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12363 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12364 } else if (bp->link_info.advertising) {
12365 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12366 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12367 }
12368 if (bp->link_info.advertising_pam4) {
12369 req->enables |=
12370 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12371 req->auto_link_pam4_speed_mask =
12372 cpu_to_le16(bp->link_info.advertising_pam4);
12373 }
12374 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12375 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12376 } else {
12377 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12378 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12379 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12380 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12381 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12382 (u32)bp->link_info.req_link_speed);
12383 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12384 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12385 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12386 } else {
12387 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12388 }
12389 }
12390
12391 /* tell chimp that the setting takes effect immediately */
12392 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12393 }
12394
bnxt_hwrm_set_pause(struct bnxt * bp)12395 int bnxt_hwrm_set_pause(struct bnxt *bp)
12396 {
12397 struct hwrm_port_phy_cfg_input *req;
12398 int rc;
12399
12400 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12401 if (rc)
12402 return rc;
12403
12404 bnxt_hwrm_set_pause_common(bp, req);
12405
12406 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12407 bp->link_info.force_link_chng)
12408 bnxt_hwrm_set_link_common(bp, req);
12409
12410 rc = hwrm_req_send(bp, req);
12411 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12412 /* since changing of pause setting doesn't trigger any link
12413 * change event, the driver needs to update the current pause
12414 * result upon successfully return of the phy_cfg command
12415 */
12416 bp->link_info.pause =
12417 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12418 bp->link_info.auto_pause_setting = 0;
12419 if (!bp->link_info.force_link_chng)
12420 bnxt_report_link(bp);
12421 }
12422 bp->link_info.force_link_chng = false;
12423 return rc;
12424 }
12425
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12426 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12427 struct hwrm_port_phy_cfg_input *req)
12428 {
12429 struct ethtool_keee *eee = &bp->eee;
12430
12431 if (eee->eee_enabled) {
12432 u16 eee_speeds;
12433 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12434
12435 if (eee->tx_lpi_enabled)
12436 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12437 else
12438 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12439
12440 req->flags |= cpu_to_le32(flags);
12441 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12442 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12443 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12444 } else {
12445 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12446 }
12447 }
12448
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)12449 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12450 {
12451 struct hwrm_port_phy_cfg_input *req;
12452 int rc;
12453
12454 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12455 if (rc)
12456 return rc;
12457
12458 if (set_pause)
12459 bnxt_hwrm_set_pause_common(bp, req);
12460
12461 bnxt_hwrm_set_link_common(bp, req);
12462
12463 if (set_eee)
12464 bnxt_hwrm_set_eee(bp, req);
12465 return hwrm_req_send(bp, req);
12466 }
12467
bnxt_hwrm_shutdown_link(struct bnxt * bp)12468 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12469 {
12470 struct hwrm_port_phy_cfg_input *req;
12471 int rc;
12472
12473 if (!BNXT_SINGLE_PF(bp))
12474 return 0;
12475
12476 if (pci_num_vf(bp->pdev) &&
12477 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12478 return 0;
12479
12480 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12481 if (rc)
12482 return rc;
12483
12484 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12485 rc = hwrm_req_send(bp, req);
12486 if (!rc) {
12487 mutex_lock(&bp->link_lock);
12488 /* Device is not obliged link down in certain scenarios, even
12489 * when forced. Setting the state unknown is consistent with
12490 * driver startup and will force link state to be reported
12491 * during subsequent open based on PORT_PHY_QCFG.
12492 */
12493 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12494 mutex_unlock(&bp->link_lock);
12495 }
12496 return rc;
12497 }
12498
bnxt_fw_reset_via_optee(struct bnxt * bp)12499 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12500 {
12501 #ifdef CONFIG_TEE_BNXT_FW
12502 int rc = tee_bnxt_fw_load();
12503
12504 if (rc)
12505 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12506
12507 return rc;
12508 #else
12509 netdev_err(bp->dev, "OP-TEE not supported\n");
12510 return -ENODEV;
12511 #endif
12512 }
12513
bnxt_try_recover_fw(struct bnxt * bp)12514 static int bnxt_try_recover_fw(struct bnxt *bp)
12515 {
12516 if (bp->fw_health && bp->fw_health->status_reliable) {
12517 int retry = 0, rc;
12518 u32 sts;
12519
12520 do {
12521 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12522 rc = bnxt_hwrm_poll(bp);
12523 if (!BNXT_FW_IS_BOOTING(sts) &&
12524 !BNXT_FW_IS_RECOVERING(sts))
12525 break;
12526 retry++;
12527 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12528
12529 if (!BNXT_FW_IS_HEALTHY(sts)) {
12530 netdev_err(bp->dev,
12531 "Firmware not responding, status: 0x%x\n",
12532 sts);
12533 rc = -ENODEV;
12534 }
12535 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12536 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12537 return bnxt_fw_reset_via_optee(bp);
12538 }
12539 return rc;
12540 }
12541
12542 return -ENODEV;
12543 }
12544
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12545 void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12546 {
12547 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12548
12549 if (!BNXT_NEW_RM(bp))
12550 return; /* no resource reservations required */
12551
12552 hw_resc->resv_cp_rings = 0;
12553 hw_resc->resv_stat_ctxs = 0;
12554 hw_resc->resv_irqs = 0;
12555 hw_resc->resv_tx_rings = 0;
12556 hw_resc->resv_rx_rings = 0;
12557 hw_resc->resv_hw_ring_grps = 0;
12558 hw_resc->resv_vnics = 0;
12559 hw_resc->resv_rsscos_ctxs = 0;
12560 if (!fw_reset) {
12561 bp->tx_nr_rings = 0;
12562 bp->rx_nr_rings = 0;
12563 }
12564 }
12565
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12566 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12567 {
12568 int rc;
12569
12570 if (!BNXT_NEW_RM(bp))
12571 return 0; /* no resource reservations required */
12572
12573 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12574 if (rc)
12575 netdev_err(bp->dev, "resc_qcaps failed\n");
12576
12577 bnxt_clear_reservations(bp, fw_reset);
12578
12579 return rc;
12580 }
12581
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12582 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12583 {
12584 struct hwrm_func_drv_if_change_output *resp;
12585 struct hwrm_func_drv_if_change_input *req;
12586 bool resc_reinit = false;
12587 bool caps_change = false;
12588 int rc, retry = 0;
12589 bool fw_reset;
12590 u32 flags = 0;
12591
12592 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12593 bp->fw_reset_state = 0;
12594
12595 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12596 return 0;
12597
12598 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12599 if (rc)
12600 return rc;
12601
12602 if (up)
12603 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12604 resp = hwrm_req_hold(bp, req);
12605
12606 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12607 while (retry < BNXT_FW_IF_RETRY) {
12608 rc = hwrm_req_send(bp, req);
12609 if (rc != -EAGAIN)
12610 break;
12611
12612 msleep(50);
12613 retry++;
12614 }
12615
12616 if (rc == -EAGAIN) {
12617 hwrm_req_drop(bp, req);
12618 return rc;
12619 } else if (!rc) {
12620 flags = le32_to_cpu(resp->flags);
12621 } else if (up) {
12622 rc = bnxt_try_recover_fw(bp);
12623 fw_reset = true;
12624 }
12625 hwrm_req_drop(bp, req);
12626 if (rc)
12627 return rc;
12628
12629 if (!up) {
12630 bnxt_inv_fw_health_reg(bp);
12631 return 0;
12632 }
12633
12634 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12635 resc_reinit = true;
12636 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12637 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12638 fw_reset = true;
12639 else
12640 bnxt_remap_fw_health_regs(bp);
12641
12642 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12643 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12644 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12645 return -ENODEV;
12646 }
12647 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12648 caps_change = true;
12649
12650 if (resc_reinit || fw_reset || caps_change) {
12651 if (fw_reset || caps_change) {
12652 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12653 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12654 bnxt_ulp_irq_stop(bp);
12655 bnxt_free_ctx_mem(bp, false);
12656 bnxt_dcb_free(bp);
12657 rc = bnxt_fw_init_one(bp);
12658 if (rc) {
12659 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12660 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12661 return rc;
12662 }
12663 /* IRQ will be initialized later in bnxt_request_irq()*/
12664 bnxt_clear_int_mode(bp);
12665 }
12666 rc = bnxt_cancel_reservations(bp, fw_reset);
12667 }
12668 return rc;
12669 }
12670
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12671 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12672 {
12673 struct hwrm_port_led_qcaps_output *resp;
12674 struct hwrm_port_led_qcaps_input *req;
12675 struct bnxt_pf_info *pf = &bp->pf;
12676 int rc;
12677
12678 bp->num_leds = 0;
12679 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12680 return 0;
12681
12682 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12683 if (rc)
12684 return rc;
12685
12686 req->port_id = cpu_to_le16(pf->port_id);
12687 resp = hwrm_req_hold(bp, req);
12688 rc = hwrm_req_send(bp, req);
12689 if (rc) {
12690 hwrm_req_drop(bp, req);
12691 return rc;
12692 }
12693 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12694 int i;
12695
12696 bp->num_leds = resp->num_leds;
12697 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12698 bp->num_leds);
12699 for (i = 0; i < bp->num_leds; i++) {
12700 struct bnxt_led_info *led = &bp->leds[i];
12701 __le16 caps = led->led_state_caps;
12702
12703 if (!led->led_group_id ||
12704 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12705 bp->num_leds = 0;
12706 break;
12707 }
12708 }
12709 }
12710 hwrm_req_drop(bp, req);
12711 return 0;
12712 }
12713
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12714 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12715 {
12716 struct hwrm_wol_filter_alloc_output *resp;
12717 struct hwrm_wol_filter_alloc_input *req;
12718 int rc;
12719
12720 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12721 if (rc)
12722 return rc;
12723
12724 req->port_id = cpu_to_le16(bp->pf.port_id);
12725 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12726 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12727 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12728
12729 resp = hwrm_req_hold(bp, req);
12730 rc = hwrm_req_send(bp, req);
12731 if (!rc)
12732 bp->wol_filter_id = resp->wol_filter_id;
12733 hwrm_req_drop(bp, req);
12734 return rc;
12735 }
12736
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12737 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12738 {
12739 struct hwrm_wol_filter_free_input *req;
12740 int rc;
12741
12742 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12743 if (rc)
12744 return rc;
12745
12746 req->port_id = cpu_to_le16(bp->pf.port_id);
12747 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12748 req->wol_filter_id = bp->wol_filter_id;
12749
12750 return hwrm_req_send(bp, req);
12751 }
12752
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12753 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12754 {
12755 struct hwrm_wol_filter_qcfg_output *resp;
12756 struct hwrm_wol_filter_qcfg_input *req;
12757 u16 next_handle = 0;
12758 int rc;
12759
12760 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12761 if (rc)
12762 return rc;
12763
12764 req->port_id = cpu_to_le16(bp->pf.port_id);
12765 req->handle = cpu_to_le16(handle);
12766 resp = hwrm_req_hold(bp, req);
12767 rc = hwrm_req_send(bp, req);
12768 if (!rc) {
12769 next_handle = le16_to_cpu(resp->next_handle);
12770 if (next_handle != 0) {
12771 if (resp->wol_type ==
12772 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12773 bp->wol = 1;
12774 bp->wol_filter_id = resp->wol_filter_id;
12775 }
12776 }
12777 }
12778 hwrm_req_drop(bp, req);
12779 return next_handle;
12780 }
12781
bnxt_get_wol_settings(struct bnxt * bp)12782 static void bnxt_get_wol_settings(struct bnxt *bp)
12783 {
12784 u16 handle = 0;
12785
12786 bp->wol = 0;
12787 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12788 return;
12789
12790 do {
12791 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12792 } while (handle && handle != 0xffff);
12793 }
12794
bnxt_eee_config_ok(struct bnxt * bp)12795 static bool bnxt_eee_config_ok(struct bnxt *bp)
12796 {
12797 struct ethtool_keee *eee = &bp->eee;
12798 struct bnxt_link_info *link_info = &bp->link_info;
12799
12800 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12801 return true;
12802
12803 if (eee->eee_enabled) {
12804 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12805 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12806
12807 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12808
12809 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12810 eee->eee_enabled = 0;
12811 return false;
12812 }
12813 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12814 linkmode_and(eee->advertised, advertising,
12815 eee->supported);
12816 return false;
12817 }
12818 }
12819 return true;
12820 }
12821
bnxt_update_phy_setting(struct bnxt * bp)12822 static int bnxt_update_phy_setting(struct bnxt *bp)
12823 {
12824 int rc;
12825 bool update_link = false;
12826 bool update_pause = false;
12827 bool update_eee = false;
12828 struct bnxt_link_info *link_info = &bp->link_info;
12829
12830 rc = bnxt_update_link(bp, true);
12831 if (rc) {
12832 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12833 rc);
12834 return rc;
12835 }
12836 if (!BNXT_SINGLE_PF(bp))
12837 return 0;
12838
12839 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12840 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12841 link_info->req_flow_ctrl)
12842 update_pause = true;
12843 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12844 link_info->force_pause_setting != link_info->req_flow_ctrl)
12845 update_pause = true;
12846 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12847 if (BNXT_AUTO_MODE(link_info->auto_mode))
12848 update_link = true;
12849 if (bnxt_force_speed_updated(link_info))
12850 update_link = true;
12851 if (link_info->req_duplex != link_info->duplex_setting)
12852 update_link = true;
12853 } else {
12854 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12855 update_link = true;
12856 if (bnxt_auto_speed_updated(link_info))
12857 update_link = true;
12858 }
12859
12860 /* The last close may have shutdown the link, so need to call
12861 * PHY_CFG to bring it back up.
12862 */
12863 if (!BNXT_LINK_IS_UP(bp))
12864 update_link = true;
12865
12866 if (!bnxt_eee_config_ok(bp))
12867 update_eee = true;
12868
12869 if (update_link)
12870 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12871 else if (update_pause)
12872 rc = bnxt_hwrm_set_pause(bp);
12873 if (rc) {
12874 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12875 rc);
12876 return rc;
12877 }
12878
12879 return rc;
12880 }
12881
12882 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12883
bnxt_reinit_after_abort(struct bnxt * bp)12884 static int bnxt_reinit_after_abort(struct bnxt *bp)
12885 {
12886 int rc;
12887
12888 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12889 return -EBUSY;
12890
12891 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12892 return -ENODEV;
12893
12894 rc = bnxt_fw_init_one(bp);
12895 if (!rc) {
12896 bnxt_clear_int_mode(bp);
12897 rc = bnxt_init_int_mode(bp);
12898 if (!rc) {
12899 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12900 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12901 }
12902 }
12903 return rc;
12904 }
12905
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)12906 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12907 {
12908 struct bnxt_ntuple_filter *ntp_fltr;
12909 struct bnxt_l2_filter *l2_fltr;
12910
12911 if (list_empty(&fltr->list))
12912 return;
12913
12914 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12915 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12916 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12917 atomic_inc(&l2_fltr->refcnt);
12918 ntp_fltr->l2_fltr = l2_fltr;
12919 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12920 bnxt_del_ntp_filter(bp, ntp_fltr);
12921 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12922 fltr->sw_id);
12923 }
12924 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12925 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12926 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12927 bnxt_del_l2_filter(bp, l2_fltr);
12928 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12929 fltr->sw_id);
12930 }
12931 }
12932 }
12933
bnxt_cfg_usr_fltrs(struct bnxt * bp)12934 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12935 {
12936 struct bnxt_filter_base *usr_fltr, *tmp;
12937
12938 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12939 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12940 }
12941
bnxt_set_xps_mapping(struct bnxt * bp)12942 static int bnxt_set_xps_mapping(struct bnxt *bp)
12943 {
12944 int numa_node = dev_to_node(&bp->pdev->dev);
12945 unsigned int q_idx, map_idx, cpu, i;
12946 const struct cpumask *cpu_mask_ptr;
12947 int nr_cpus = num_online_cpus();
12948 cpumask_t *q_map;
12949 int rc = 0;
12950
12951 q_map = kzalloc_objs(*q_map, bp->tx_nr_rings_per_tc);
12952 if (!q_map)
12953 return -ENOMEM;
12954
12955 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12956 * Each TC has the same number of TX queues. The nth TX queue for each
12957 * TC will have the same CPU mask.
12958 */
12959 for (i = 0; i < nr_cpus; i++) {
12960 map_idx = i % bp->tx_nr_rings_per_tc;
12961 cpu = cpumask_local_spread(i, numa_node);
12962 cpu_mask_ptr = get_cpu_mask(cpu);
12963 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12964 }
12965
12966 /* Register CPU mask for each TX queue except the ones marked for XDP */
12967 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12968 map_idx = q_idx % bp->tx_nr_rings_per_tc;
12969 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12970 if (rc) {
12971 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12972 q_idx);
12973 break;
12974 }
12975 }
12976
12977 kfree(q_map);
12978
12979 return rc;
12980 }
12981
bnxt_tx_nr_rings(struct bnxt * bp)12982 static int bnxt_tx_nr_rings(struct bnxt *bp)
12983 {
12984 return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
12985 bp->tx_nr_rings_per_tc;
12986 }
12987
bnxt_tx_nr_rings_per_tc(struct bnxt * bp)12988 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
12989 {
12990 return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
12991 }
12992
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12993 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12994 {
12995 int rc = 0;
12996
12997 netif_carrier_off(bp->dev);
12998 if (irq_re_init) {
12999 /* Reserve rings now if none were reserved at driver probe. */
13000 rc = bnxt_init_dflt_ring_mode(bp);
13001 if (rc) {
13002 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
13003 return rc;
13004 }
13005 }
13006 rc = bnxt_reserve_rings(bp, irq_re_init);
13007 if (rc)
13008 return rc;
13009
13010 /* Make adjustments if reserved TX rings are less than requested */
13011 bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
13012 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
13013 if (bp->tx_nr_rings_xdp) {
13014 bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
13015 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
13016 }
13017 rc = bnxt_alloc_mem(bp, irq_re_init);
13018 if (rc) {
13019 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13020 goto open_err_free_mem;
13021 }
13022
13023 if (irq_re_init) {
13024 bnxt_init_napi(bp);
13025 rc = bnxt_request_irq(bp);
13026 if (rc) {
13027 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
13028 goto open_err_irq;
13029 }
13030 }
13031
13032 rc = bnxt_init_nic(bp, irq_re_init);
13033 if (rc) {
13034 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13035 goto open_err_irq;
13036 }
13037
13038 bnxt_enable_napi(bp);
13039 bnxt_debug_dev_init(bp);
13040
13041 if (link_re_init) {
13042 mutex_lock(&bp->link_lock);
13043 rc = bnxt_update_phy_setting(bp);
13044 mutex_unlock(&bp->link_lock);
13045 if (rc) {
13046 netdev_warn(bp->dev, "failed to update phy settings\n");
13047 if (BNXT_SINGLE_PF(bp)) {
13048 bp->link_info.phy_retry = true;
13049 bp->link_info.phy_retry_expires =
13050 jiffies + 5 * HZ;
13051 }
13052 }
13053 }
13054
13055 if (irq_re_init) {
13056 udp_tunnel_nic_reset_ntf(bp->dev);
13057 rc = bnxt_set_xps_mapping(bp);
13058 if (rc)
13059 netdev_warn(bp->dev, "failed to set xps mapping\n");
13060 }
13061
13062 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
13063 if (!static_key_enabled(&bnxt_xdp_locking_key))
13064 static_branch_enable(&bnxt_xdp_locking_key);
13065 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
13066 static_branch_disable(&bnxt_xdp_locking_key);
13067 }
13068 set_bit(BNXT_STATE_OPEN, &bp->state);
13069 bnxt_enable_int(bp);
13070 /* Enable TX queues */
13071 bnxt_tx_enable(bp);
13072 mod_timer(&bp->timer, jiffies + bp->current_interval);
13073 /* Poll link status and check for SFP+ module status */
13074 mutex_lock(&bp->link_lock);
13075 bnxt_get_port_module_status(bp);
13076 mutex_unlock(&bp->link_lock);
13077
13078 /* VF-reps may need to be re-opened after the PF is re-opened */
13079 if (BNXT_PF(bp))
13080 bnxt_vf_reps_open(bp);
13081 bnxt_ptp_init_rtc(bp, true);
13082 bnxt_ptp_cfg_tstamp_filters(bp);
13083 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13084 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
13085 bnxt_cfg_usr_fltrs(bp);
13086 return 0;
13087
13088 open_err_irq:
13089 bnxt_del_napi(bp);
13090
13091 open_err_free_mem:
13092 bnxt_free_skbs(bp);
13093 bnxt_free_irq(bp);
13094 bnxt_free_mem(bp, true);
13095 return rc;
13096 }
13097
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13098 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13099 {
13100 int rc = 0;
13101
13102 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13103 rc = -EIO;
13104 if (!rc)
13105 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13106 if (rc) {
13107 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13108 netif_close(bp->dev);
13109 }
13110 return rc;
13111 }
13112
13113 /* netdev instance lock held, open the NIC half way by allocating all
13114 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
13115 * for offline self tests.
13116 */
bnxt_half_open_nic(struct bnxt * bp)13117 int bnxt_half_open_nic(struct bnxt *bp)
13118 {
13119 int rc = 0;
13120
13121 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13122 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13123 rc = -ENODEV;
13124 goto half_open_err;
13125 }
13126
13127 rc = bnxt_alloc_mem(bp, true);
13128 if (rc) {
13129 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13130 goto half_open_err;
13131 }
13132 bnxt_init_napi(bp);
13133 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13134 rc = bnxt_init_nic(bp, true);
13135 if (rc) {
13136 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13137 bnxt_del_napi(bp);
13138 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13139 goto half_open_err;
13140 }
13141 return 0;
13142
13143 half_open_err:
13144 bnxt_free_skbs(bp);
13145 bnxt_free_mem(bp, true);
13146 netif_close(bp->dev);
13147 return rc;
13148 }
13149
13150 /* netdev instance lock held, this call can only be made after a previous
13151 * successful call to bnxt_half_open_nic().
13152 */
bnxt_half_close_nic(struct bnxt * bp)13153 void bnxt_half_close_nic(struct bnxt *bp)
13154 {
13155 bnxt_hwrm_resource_free(bp, false, true);
13156 bnxt_del_napi(bp);
13157 bnxt_free_skbs(bp);
13158 bnxt_free_mem(bp, true);
13159 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13160 }
13161
bnxt_reenable_sriov(struct bnxt * bp)13162 void bnxt_reenable_sriov(struct bnxt *bp)
13163 {
13164 if (BNXT_PF(bp)) {
13165 struct bnxt_pf_info *pf = &bp->pf;
13166 int n = pf->active_vfs;
13167
13168 if (n)
13169 bnxt_cfg_hw_sriov(bp, &n, true);
13170 }
13171 }
13172
bnxt_open(struct net_device * dev)13173 static int bnxt_open(struct net_device *dev)
13174 {
13175 struct bnxt *bp = netdev_priv(dev);
13176 int rc;
13177
13178 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13179 rc = bnxt_reinit_after_abort(bp);
13180 if (rc) {
13181 if (rc == -EBUSY)
13182 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13183 else
13184 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13185 return -ENODEV;
13186 }
13187 }
13188
13189 rc = bnxt_hwrm_if_change(bp, true);
13190 if (rc)
13191 return rc;
13192
13193 rc = __bnxt_open_nic(bp, true, true);
13194 if (rc) {
13195 bnxt_hwrm_if_change(bp, false);
13196 } else {
13197 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13198 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13199 bnxt_queue_sp_work(bp,
13200 BNXT_RESTART_ULP_SP_EVENT);
13201 }
13202 }
13203
13204 return rc;
13205 }
13206
bnxt_drv_busy(struct bnxt * bp)13207 static bool bnxt_drv_busy(struct bnxt *bp)
13208 {
13209 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13210 test_bit(BNXT_STATE_READ_STATS, &bp->state));
13211 }
13212
13213 static void bnxt_get_ring_stats(struct bnxt *bp,
13214 struct rtnl_link_stats64 *stats);
13215
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13216 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13217 bool link_re_init)
13218 {
13219 /* Close the VF-reps before closing PF */
13220 if (BNXT_PF(bp))
13221 bnxt_vf_reps_close(bp);
13222
13223 /* Change device state to avoid TX queue wake up's */
13224 bnxt_tx_disable(bp);
13225
13226 clear_bit(BNXT_STATE_OPEN, &bp->state);
13227 smp_mb__after_atomic();
13228 while (bnxt_drv_busy(bp))
13229 msleep(20);
13230
13231 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13232 bnxt_clear_rss_ctxs(bp);
13233 /* Flush rings and disable interrupts */
13234 bnxt_shutdown_nic(bp, irq_re_init);
13235
13236 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13237
13238 bnxt_debug_dev_exit(bp);
13239 bnxt_disable_napi(bp);
13240 timer_delete_sync(&bp->timer);
13241 bnxt_free_skbs(bp);
13242
13243 /* Save ring stats before shutdown */
13244 if (bp->bnapi && irq_re_init) {
13245 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13246 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
13247 }
13248 if (irq_re_init) {
13249 bnxt_free_irq(bp);
13250 bnxt_del_napi(bp);
13251 }
13252 bnxt_free_mem(bp, irq_re_init);
13253 }
13254
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13255 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13256 {
13257 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13258 /* If we get here, it means firmware reset is in progress
13259 * while we are trying to close. We can safely proceed with
13260 * the close because we are holding netdev instance lock.
13261 * Some firmware messages may fail as we proceed to close.
13262 * We set the ABORT_ERR flag here so that the FW reset thread
13263 * will later abort when it gets the netdev instance lock
13264 * and sees the flag.
13265 */
13266 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13267 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13268 }
13269
13270 #ifdef CONFIG_BNXT_SRIOV
13271 if (bp->sriov_cfg) {
13272 int rc;
13273
13274 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13275 !bp->sriov_cfg,
13276 BNXT_SRIOV_CFG_WAIT_TMO);
13277 if (!rc)
13278 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13279 else if (rc < 0)
13280 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13281 }
13282 #endif
13283 __bnxt_close_nic(bp, irq_re_init, link_re_init);
13284 }
13285
bnxt_close(struct net_device * dev)13286 static int bnxt_close(struct net_device *dev)
13287 {
13288 struct bnxt *bp = netdev_priv(dev);
13289
13290 bnxt_close_nic(bp, true, true);
13291 bnxt_hwrm_shutdown_link(bp);
13292 bnxt_hwrm_if_change(bp, false);
13293 return 0;
13294 }
13295
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)13296 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13297 u16 *val)
13298 {
13299 struct hwrm_port_phy_mdio_read_output *resp;
13300 struct hwrm_port_phy_mdio_read_input *req;
13301 int rc;
13302
13303 if (bp->hwrm_spec_code < 0x10a00)
13304 return -EOPNOTSUPP;
13305
13306 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13307 if (rc)
13308 return rc;
13309
13310 req->port_id = cpu_to_le16(bp->pf.port_id);
13311 req->phy_addr = phy_addr;
13312 req->reg_addr = cpu_to_le16(reg & 0x1f);
13313 if (mdio_phy_id_is_c45(phy_addr)) {
13314 req->cl45_mdio = 1;
13315 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13316 req->dev_addr = mdio_phy_id_devad(phy_addr);
13317 req->reg_addr = cpu_to_le16(reg);
13318 }
13319
13320 resp = hwrm_req_hold(bp, req);
13321 rc = hwrm_req_send(bp, req);
13322 if (!rc)
13323 *val = le16_to_cpu(resp->reg_data);
13324 hwrm_req_drop(bp, req);
13325 return rc;
13326 }
13327
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)13328 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13329 u16 val)
13330 {
13331 struct hwrm_port_phy_mdio_write_input *req;
13332 int rc;
13333
13334 if (bp->hwrm_spec_code < 0x10a00)
13335 return -EOPNOTSUPP;
13336
13337 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13338 if (rc)
13339 return rc;
13340
13341 req->port_id = cpu_to_le16(bp->pf.port_id);
13342 req->phy_addr = phy_addr;
13343 req->reg_addr = cpu_to_le16(reg & 0x1f);
13344 if (mdio_phy_id_is_c45(phy_addr)) {
13345 req->cl45_mdio = 1;
13346 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13347 req->dev_addr = mdio_phy_id_devad(phy_addr);
13348 req->reg_addr = cpu_to_le16(reg);
13349 }
13350 req->reg_data = cpu_to_le16(val);
13351
13352 return hwrm_req_send(bp, req);
13353 }
13354
13355 /* netdev instance lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13356 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13357 {
13358 struct mii_ioctl_data *mdio = if_mii(ifr);
13359 struct bnxt *bp = netdev_priv(dev);
13360 int rc;
13361
13362 switch (cmd) {
13363 case SIOCGMIIPHY:
13364 mdio->phy_id = bp->link_info.phy_addr;
13365
13366 fallthrough;
13367 case SIOCGMIIREG: {
13368 u16 mii_regval = 0;
13369
13370 if (!netif_running(dev))
13371 return -EAGAIN;
13372
13373 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13374 &mii_regval);
13375 mdio->val_out = mii_regval;
13376 return rc;
13377 }
13378
13379 case SIOCSMIIREG:
13380 if (!netif_running(dev))
13381 return -EAGAIN;
13382
13383 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13384 mdio->val_in);
13385
13386 default:
13387 /* do nothing */
13388 break;
13389 }
13390 return -EOPNOTSUPP;
13391 }
13392
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13393 static void bnxt_get_ring_stats(struct bnxt *bp,
13394 struct rtnl_link_stats64 *stats)
13395 {
13396 int i;
13397
13398 for (i = 0; i < bp->cp_nr_rings; i++) {
13399 struct bnxt_napi *bnapi = bp->bnapi[i];
13400 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13401 u64 *sw = cpr->stats.sw_stats;
13402
13403 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13404 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13405 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13406
13407 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13408 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13409 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13410
13411 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13412 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13413 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13414
13415 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13416 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13417 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13418
13419 stats->rx_missed_errors +=
13420 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13421
13422 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13423
13424 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13425
13426 stats->rx_dropped +=
13427 cpr->sw_stats->rx.rx_netpoll_discards +
13428 cpr->sw_stats->rx.rx_oom_discards;
13429 }
13430 }
13431
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13432 static void bnxt_add_prev_stats(struct bnxt *bp,
13433 struct rtnl_link_stats64 *stats)
13434 {
13435 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13436
13437 stats->rx_packets += prev_stats->rx_packets;
13438 stats->tx_packets += prev_stats->tx_packets;
13439 stats->rx_bytes += prev_stats->rx_bytes;
13440 stats->tx_bytes += prev_stats->tx_bytes;
13441 stats->rx_missed_errors += prev_stats->rx_missed_errors;
13442 stats->multicast += prev_stats->multicast;
13443 stats->rx_dropped += prev_stats->rx_dropped;
13444 stats->tx_dropped += prev_stats->tx_dropped;
13445 }
13446
13447 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)13448 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13449 {
13450 struct bnxt *bp = netdev_priv(dev);
13451
13452 set_bit(BNXT_STATE_READ_STATS, &bp->state);
13453 /* Make sure bnxt_close_nic() sees that we are reading stats before
13454 * we check the BNXT_STATE_OPEN flag.
13455 */
13456 smp_mb__after_atomic();
13457 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13458 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13459 *stats = bp->net_stats_prev;
13460 return;
13461 }
13462
13463 bnxt_get_ring_stats(bp, stats);
13464 bnxt_add_prev_stats(bp, stats);
13465
13466 if (bp->flags & BNXT_FLAG_PORT_STATS) {
13467 u64 *rx = bp->port_stats.sw_stats;
13468 u64 *tx = bp->port_stats.sw_stats +
13469 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13470
13471 stats->rx_crc_errors =
13472 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13473 stats->rx_frame_errors =
13474 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13475 stats->rx_length_errors =
13476 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13477 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13478 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13479 stats->rx_errors =
13480 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13481 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13482 stats->collisions =
13483 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13484 stats->tx_fifo_errors =
13485 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13486 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13487 }
13488 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13489 }
13490
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)13491 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
13492 struct bnxt_total_ring_err_stats *stats,
13493 struct bnxt_cp_ring_info *cpr)
13494 {
13495 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13496 u64 *hw_stats = cpr->stats.sw_stats;
13497
13498 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13499 stats->rx_total_resets += sw_stats->rx.rx_resets;
13500 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13501 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13502 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13503 stats->rx_total_ring_discards +=
13504 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13505 stats->rx_total_hw_gro_packets += sw_stats->rx.rx_hw_gro_packets;
13506 stats->rx_total_hw_gro_wire_packets += sw_stats->rx.rx_hw_gro_wire_packets;
13507 stats->tx_total_resets += sw_stats->tx.tx_resets;
13508 stats->tx_total_ring_discards +=
13509 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13510 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13511 }
13512
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)13513 void bnxt_get_ring_err_stats(struct bnxt *bp,
13514 struct bnxt_total_ring_err_stats *stats)
13515 {
13516 int i;
13517
13518 for (i = 0; i < bp->cp_nr_rings; i++)
13519 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13520 }
13521
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)13522 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13523 {
13524 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13525 struct net_device *dev = bp->dev;
13526 struct netdev_hw_addr *ha;
13527 u8 *haddr;
13528 int mc_count = 0;
13529 bool update = false;
13530 int off = 0;
13531
13532 netdev_for_each_mc_addr(ha, dev) {
13533 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13534 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13535 vnic->mc_list_count = 0;
13536 return false;
13537 }
13538 haddr = ha->addr;
13539 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13540 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13541 update = true;
13542 }
13543 off += ETH_ALEN;
13544 mc_count++;
13545 }
13546 if (mc_count)
13547 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13548
13549 if (mc_count != vnic->mc_list_count) {
13550 vnic->mc_list_count = mc_count;
13551 update = true;
13552 }
13553 return update;
13554 }
13555
bnxt_uc_list_updated(struct bnxt * bp)13556 static bool bnxt_uc_list_updated(struct bnxt *bp)
13557 {
13558 struct net_device *dev = bp->dev;
13559 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13560 struct netdev_hw_addr *ha;
13561 int off = 0;
13562
13563 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13564 return true;
13565
13566 netdev_for_each_uc_addr(ha, dev) {
13567 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13568 return true;
13569
13570 off += ETH_ALEN;
13571 }
13572 return false;
13573 }
13574
bnxt_set_rx_mode(struct net_device * dev)13575 static void bnxt_set_rx_mode(struct net_device *dev)
13576 {
13577 struct bnxt *bp = netdev_priv(dev);
13578 struct bnxt_vnic_info *vnic;
13579 bool mc_update = false;
13580 bool uc_update;
13581 u32 mask;
13582
13583 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13584 return;
13585
13586 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13587 mask = vnic->rx_mask;
13588 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13589 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13590 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13591 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13592
13593 if (dev->flags & IFF_PROMISC)
13594 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13595
13596 uc_update = bnxt_uc_list_updated(bp);
13597
13598 if (dev->flags & IFF_BROADCAST)
13599 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13600 if (dev->flags & IFF_ALLMULTI) {
13601 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13602 vnic->mc_list_count = 0;
13603 } else if (dev->flags & IFF_MULTICAST) {
13604 mc_update = bnxt_mc_list_updated(bp, &mask);
13605 }
13606
13607 if (mask != vnic->rx_mask || uc_update || mc_update) {
13608 vnic->rx_mask = mask;
13609
13610 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13611 }
13612 }
13613
bnxt_cfg_rx_mode(struct bnxt * bp)13614 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13615 {
13616 struct net_device *dev = bp->dev;
13617 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13618 struct netdev_hw_addr *ha;
13619 int i, off = 0, rc;
13620 bool uc_update;
13621
13622 netif_addr_lock_bh(dev);
13623 uc_update = bnxt_uc_list_updated(bp);
13624 netif_addr_unlock_bh(dev);
13625
13626 if (!uc_update)
13627 goto skip_uc;
13628
13629 for (i = 1; i < vnic->uc_filter_count; i++) {
13630 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13631
13632 bnxt_hwrm_l2_filter_free(bp, fltr);
13633 bnxt_del_l2_filter(bp, fltr);
13634 }
13635
13636 vnic->uc_filter_count = 1;
13637
13638 netif_addr_lock_bh(dev);
13639 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13640 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13641 } else {
13642 netdev_for_each_uc_addr(ha, dev) {
13643 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13644 off += ETH_ALEN;
13645 vnic->uc_filter_count++;
13646 }
13647 }
13648 netif_addr_unlock_bh(dev);
13649
13650 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13651 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13652 if (rc) {
13653 if (BNXT_VF(bp) && rc == -ENODEV) {
13654 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13655 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13656 else
13657 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13658 rc = 0;
13659 } else {
13660 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13661 }
13662 vnic->uc_filter_count = i;
13663 return rc;
13664 }
13665 }
13666 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13667 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13668
13669 skip_uc:
13670 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13671 !bnxt_promisc_ok(bp))
13672 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13673 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13674 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13675 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13676 rc);
13677 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13678 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13679 vnic->mc_list_count = 0;
13680 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13681 }
13682 if (rc)
13683 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13684 rc);
13685
13686 return rc;
13687 }
13688
bnxt_can_reserve_rings(struct bnxt * bp)13689 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13690 {
13691 #ifdef CONFIG_BNXT_SRIOV
13692 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13693 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13694
13695 /* No minimum rings were provisioned by the PF. Don't
13696 * reserve rings by default when device is down.
13697 */
13698 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13699 return true;
13700
13701 if (!netif_running(bp->dev))
13702 return false;
13703 }
13704 #endif
13705 return true;
13706 }
13707
13708 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13709 static bool bnxt_rfs_supported(struct bnxt *bp)
13710 {
13711 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13712 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13713 return true;
13714 return false;
13715 }
13716 /* 212 firmware is broken for aRFS */
13717 if (BNXT_FW_MAJ(bp) == 212)
13718 return false;
13719 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13720 return true;
13721 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13722 return true;
13723 return false;
13724 }
13725
13726 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13727 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13728 {
13729 struct bnxt_hw_rings hwr = {0};
13730 int max_vnics, max_rss_ctxs;
13731
13732 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13733 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13734 return bnxt_rfs_supported(bp);
13735
13736 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13737 return false;
13738
13739 hwr.grp = bp->rx_nr_rings;
13740 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13741 if (new_rss_ctx)
13742 hwr.vnic++;
13743 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13744 max_vnics = bnxt_get_max_func_vnics(bp);
13745 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13746
13747 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13748 if (bp->rx_nr_rings > 1)
13749 netdev_warn(bp->dev,
13750 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13751 min(max_rss_ctxs - 1, max_vnics - 1));
13752 return false;
13753 }
13754
13755 if (!BNXT_NEW_RM(bp))
13756 return true;
13757
13758 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13759 * issue that will mess up the default VNIC if we reduce the
13760 * reservations.
13761 */
13762 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13763 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13764 return true;
13765
13766 bnxt_hwrm_reserve_rings(bp, &hwr);
13767 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13768 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13769 return true;
13770
13771 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13772 hwr.vnic = 1;
13773 hwr.rss_ctx = 0;
13774 bnxt_hwrm_reserve_rings(bp, &hwr);
13775 return false;
13776 }
13777
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13778 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13779 netdev_features_t features)
13780 {
13781 struct bnxt *bp = netdev_priv(dev);
13782 netdev_features_t vlan_features;
13783
13784 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13785 features &= ~NETIF_F_NTUPLE;
13786
13787 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13788 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13789
13790 if (!(features & NETIF_F_GRO))
13791 features &= ~NETIF_F_GRO_HW;
13792
13793 if (features & NETIF_F_GRO_HW)
13794 features &= ~NETIF_F_LRO;
13795
13796 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13797 * turned on or off together.
13798 */
13799 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13800 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13801 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13802 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13803 else if (vlan_features)
13804 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13805 }
13806 #ifdef CONFIG_BNXT_SRIOV
13807 if (BNXT_VF(bp) && bp->vf.vlan)
13808 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13809 #endif
13810 return features;
13811 }
13812
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13813 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13814 bool link_re_init, u32 flags, bool update_tpa)
13815 {
13816 bnxt_close_nic(bp, irq_re_init, link_re_init);
13817 bp->flags = flags;
13818 if (update_tpa)
13819 bnxt_set_ring_params(bp);
13820 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13821 }
13822
bnxt_set_features(struct net_device * dev,netdev_features_t features)13823 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13824 {
13825 bool update_tpa = false, update_ntuple = false;
13826 struct bnxt *bp = netdev_priv(dev);
13827 u32 flags = bp->flags;
13828 u32 changes;
13829 int rc = 0;
13830 bool re_init = false;
13831
13832 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13833 if (features & NETIF_F_GRO_HW)
13834 flags |= BNXT_FLAG_GRO;
13835 else if (features & NETIF_F_LRO)
13836 flags |= BNXT_FLAG_LRO;
13837
13838 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13839 flags &= ~BNXT_FLAG_TPA;
13840
13841 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13842 flags |= BNXT_FLAG_STRIP_VLAN;
13843
13844 if (features & NETIF_F_NTUPLE)
13845 flags |= BNXT_FLAG_RFS;
13846 else
13847 bnxt_clear_usr_fltrs(bp, true);
13848
13849 changes = flags ^ bp->flags;
13850 if (changes & BNXT_FLAG_TPA) {
13851 update_tpa = true;
13852 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13853 (flags & BNXT_FLAG_TPA) == 0 ||
13854 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13855 re_init = true;
13856 }
13857
13858 if (changes & ~BNXT_FLAG_TPA)
13859 re_init = true;
13860
13861 if (changes & BNXT_FLAG_RFS)
13862 update_ntuple = true;
13863
13864 if (flags != bp->flags) {
13865 u32 old_flags = bp->flags;
13866
13867 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13868 bp->flags = flags;
13869 if (update_tpa)
13870 bnxt_set_ring_params(bp);
13871 return rc;
13872 }
13873
13874 if (update_ntuple)
13875 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13876
13877 if (re_init)
13878 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13879
13880 if (update_tpa) {
13881 bp->flags = flags;
13882 rc = bnxt_set_tpa(bp,
13883 (flags & BNXT_FLAG_TPA) ?
13884 true : false);
13885 if (rc)
13886 bp->flags = old_flags;
13887 }
13888 }
13889 return rc;
13890 }
13891
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)13892 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13893 u8 **nextp)
13894 {
13895 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13896 int hdr_count = 0;
13897 u8 *nexthdr;
13898 int start;
13899
13900 /* Check that there are at most 2 IPv6 extension headers, no
13901 * fragment header, and each is <= 64 bytes.
13902 */
13903 start = nw_off + sizeof(*ip6h);
13904 nexthdr = &ip6h->nexthdr;
13905 while (ipv6_ext_hdr(*nexthdr)) {
13906 struct ipv6_opt_hdr *hp;
13907 int hdrlen;
13908
13909 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13910 *nexthdr == NEXTHDR_FRAGMENT)
13911 return false;
13912 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13913 skb_headlen(skb), NULL);
13914 if (!hp)
13915 return false;
13916 if (*nexthdr == NEXTHDR_AUTH)
13917 hdrlen = ipv6_authlen(hp);
13918 else
13919 hdrlen = ipv6_optlen(hp);
13920
13921 if (hdrlen > 64)
13922 return false;
13923
13924 hdr_count++;
13925 nexthdr = &hp->nexthdr;
13926 start += hdrlen;
13927 }
13928 if (nextp) {
13929 /* Caller will check inner protocol */
13930 if (skb->encapsulation) {
13931 *nextp = nexthdr;
13932 return true;
13933 }
13934 *nextp = NULL;
13935 }
13936 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13937 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13938 }
13939
13940 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)13941 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13942 {
13943 struct udphdr *uh = udp_hdr(skb);
13944 __be16 udp_port = uh->dest;
13945
13946 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13947 udp_port != bp->vxlan_gpe_port)
13948 return false;
13949 if (skb->inner_protocol == htons(ETH_P_TEB)) {
13950 struct ethhdr *eh = inner_eth_hdr(skb);
13951
13952 switch (eh->h_proto) {
13953 case htons(ETH_P_IP):
13954 return true;
13955 case htons(ETH_P_IPV6):
13956 return bnxt_exthdr_check(bp, skb,
13957 skb_inner_network_offset(skb),
13958 NULL);
13959 }
13960 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13961 return true;
13962 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13963 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13964 NULL);
13965 }
13966 return false;
13967 }
13968
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)13969 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13970 {
13971 switch (l4_proto) {
13972 case IPPROTO_UDP:
13973 return bnxt_udp_tunl_check(bp, skb);
13974 case IPPROTO_IPIP:
13975 return true;
13976 case IPPROTO_GRE: {
13977 switch (skb->inner_protocol) {
13978 default:
13979 return false;
13980 case htons(ETH_P_IP):
13981 return true;
13982 case htons(ETH_P_IPV6):
13983 fallthrough;
13984 }
13985 }
13986 case IPPROTO_IPV6:
13987 /* Check ext headers of inner ipv6 */
13988 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13989 NULL);
13990 }
13991 return false;
13992 }
13993
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)13994 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13995 struct net_device *dev,
13996 netdev_features_t features)
13997 {
13998 struct bnxt *bp = netdev_priv(dev);
13999 u8 *l4_proto;
14000
14001 features = vlan_features_check(skb, features);
14002 switch (vlan_get_protocol(skb)) {
14003 case htons(ETH_P_IP):
14004 if (!skb->encapsulation)
14005 return features;
14006 l4_proto = &ip_hdr(skb)->protocol;
14007 if (bnxt_tunl_check(bp, skb, *l4_proto))
14008 return features;
14009 break;
14010 case htons(ETH_P_IPV6):
14011 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
14012 &l4_proto))
14013 break;
14014 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
14015 return features;
14016 break;
14017 }
14018 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
14019 }
14020
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)14021 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
14022 u32 *reg_buf)
14023 {
14024 struct hwrm_dbg_read_direct_output *resp;
14025 struct hwrm_dbg_read_direct_input *req;
14026 __le32 *dbg_reg_buf;
14027 dma_addr_t mapping;
14028 int rc, i;
14029
14030 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
14031 if (rc)
14032 return rc;
14033
14034 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
14035 &mapping);
14036 if (!dbg_reg_buf) {
14037 rc = -ENOMEM;
14038 goto dbg_rd_reg_exit;
14039 }
14040
14041 req->host_dest_addr = cpu_to_le64(mapping);
14042
14043 resp = hwrm_req_hold(bp, req);
14044 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
14045 req->read_len32 = cpu_to_le32(num_words);
14046
14047 rc = hwrm_req_send(bp, req);
14048 if (rc || resp->error_code) {
14049 rc = -EIO;
14050 goto dbg_rd_reg_exit;
14051 }
14052 for (i = 0; i < num_words; i++)
14053 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
14054
14055 dbg_rd_reg_exit:
14056 hwrm_req_drop(bp, req);
14057 return rc;
14058 }
14059
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)14060 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
14061 u32 ring_id, u32 *prod, u32 *cons)
14062 {
14063 struct hwrm_dbg_ring_info_get_output *resp;
14064 struct hwrm_dbg_ring_info_get_input *req;
14065 int rc;
14066
14067 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
14068 if (rc)
14069 return rc;
14070
14071 req->ring_type = ring_type;
14072 req->fw_ring_id = cpu_to_le32(ring_id);
14073 resp = hwrm_req_hold(bp, req);
14074 rc = hwrm_req_send(bp, req);
14075 if (!rc) {
14076 *prod = le32_to_cpu(resp->producer_index);
14077 *cons = le32_to_cpu(resp->consumer_index);
14078 }
14079 hwrm_req_drop(bp, req);
14080 return rc;
14081 }
14082
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)14083 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
14084 {
14085 struct bnxt_tx_ring_info *txr;
14086 int i = bnapi->index, j;
14087
14088 bnxt_for_each_napi_tx(j, bnapi, txr)
14089 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14090 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14091 txr->tx_cons);
14092 }
14093
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)14094 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14095 {
14096 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14097 int i = bnapi->index;
14098
14099 if (!rxr)
14100 return;
14101
14102 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14103 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14104 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14105 rxr->rx_sw_agg_prod);
14106 }
14107
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)14108 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14109 {
14110 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
14111 int i = bnapi->index, j;
14112
14113 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14114 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14115 for (j = 0; j < cpr->cp_ring_count; j++) {
14116 cpr2 = &cpr->cp_ring_arr[j];
14117 if (!cpr2->bnapi)
14118 continue;
14119 netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
14120 i, j, cpr2->cp_ring_struct.fw_ring_id,
14121 cpr2->cp_raw_cons);
14122 }
14123 }
14124
bnxt_dbg_dump_states(struct bnxt * bp)14125 static void bnxt_dbg_dump_states(struct bnxt *bp)
14126 {
14127 int i;
14128 struct bnxt_napi *bnapi;
14129
14130 for (i = 0; i < bp->cp_nr_rings; i++) {
14131 bnapi = bp->bnapi[i];
14132 if (netif_msg_drv(bp)) {
14133 bnxt_dump_tx_sw_state(bnapi);
14134 bnxt_dump_rx_sw_state(bnapi);
14135 bnxt_dump_cp_sw_state(bnapi);
14136 }
14137 }
14138 }
14139
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)14140 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14141 {
14142 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14143 struct hwrm_ring_reset_input *req;
14144 struct bnxt_napi *bnapi = rxr->bnapi;
14145 struct bnxt_cp_ring_info *cpr;
14146 u16 cp_ring_id;
14147 int rc;
14148
14149 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14150 if (rc)
14151 return rc;
14152
14153 cpr = &bnapi->cp_ring;
14154 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14155 req->cmpl_ring = cpu_to_le16(cp_ring_id);
14156 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14157 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14158 return hwrm_req_send_silent(bp, req);
14159 }
14160
bnxt_reset_task(struct bnxt * bp,bool silent)14161 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14162 {
14163 if (!silent)
14164 bnxt_dbg_dump_states(bp);
14165 if (netif_running(bp->dev)) {
14166 bnxt_close_nic(bp, !silent, false);
14167 bnxt_open_nic(bp, !silent, false);
14168 }
14169 }
14170
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)14171 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14172 {
14173 struct bnxt *bp = netdev_priv(dev);
14174
14175 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
14176 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14177 }
14178
bnxt_fw_health_check(struct bnxt * bp)14179 static void bnxt_fw_health_check(struct bnxt *bp)
14180 {
14181 struct bnxt_fw_health *fw_health = bp->fw_health;
14182 struct pci_dev *pdev = bp->pdev;
14183 u32 val;
14184
14185 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14186 return;
14187
14188 /* Make sure it is enabled before checking the tmr_counter. */
14189 smp_rmb();
14190 if (fw_health->tmr_counter) {
14191 fw_health->tmr_counter--;
14192 return;
14193 }
14194
14195 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14196 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14197 fw_health->arrests++;
14198 goto fw_reset;
14199 }
14200
14201 fw_health->last_fw_heartbeat = val;
14202
14203 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14204 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14205 fw_health->discoveries++;
14206 goto fw_reset;
14207 }
14208
14209 fw_health->tmr_counter = fw_health->tmr_multiplier;
14210 return;
14211
14212 fw_reset:
14213 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14214 }
14215
bnxt_timer(struct timer_list * t)14216 static void bnxt_timer(struct timer_list *t)
14217 {
14218 struct bnxt *bp = timer_container_of(bp, t, timer);
14219 struct net_device *dev = bp->dev;
14220
14221 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14222 return;
14223
14224 if (atomic_read(&bp->intr_sem) != 0)
14225 goto bnxt_restart_timer;
14226
14227 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14228 bnxt_fw_health_check(bp);
14229
14230 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14231 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14232
14233 if (bnxt_tc_flower_enabled(bp))
14234 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14235
14236 #ifdef CONFIG_RFS_ACCEL
14237 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14238 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14239 #endif /*CONFIG_RFS_ACCEL*/
14240
14241 if (bp->link_info.phy_retry) {
14242 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14243 bp->link_info.phy_retry = false;
14244 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14245 } else {
14246 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14247 }
14248 }
14249
14250 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14251 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14252
14253 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14254 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14255
14256 bnxt_restart_timer:
14257 mod_timer(&bp->timer, jiffies + bp->current_interval);
14258 }
14259
bnxt_lock_sp(struct bnxt * bp)14260 static void bnxt_lock_sp(struct bnxt *bp)
14261 {
14262 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14263 * set. If the device is being closed, bnxt_close() may be holding
14264 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14265 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14266 * instance lock.
14267 */
14268 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14269 netdev_lock(bp->dev);
14270 }
14271
bnxt_unlock_sp(struct bnxt * bp)14272 static void bnxt_unlock_sp(struct bnxt *bp)
14273 {
14274 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14275 netdev_unlock(bp->dev);
14276 }
14277
14278 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)14279 static void bnxt_reset(struct bnxt *bp, bool silent)
14280 {
14281 bnxt_lock_sp(bp);
14282 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14283 bnxt_reset_task(bp, silent);
14284 bnxt_unlock_sp(bp);
14285 }
14286
14287 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)14288 static void bnxt_rx_ring_reset(struct bnxt *bp)
14289 {
14290 int i;
14291
14292 bnxt_lock_sp(bp);
14293 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14294 bnxt_unlock_sp(bp);
14295 return;
14296 }
14297 /* Disable and flush TPA before resetting the RX ring */
14298 if (bp->flags & BNXT_FLAG_TPA)
14299 bnxt_set_tpa(bp, false);
14300 for (i = 0; i < bp->rx_nr_rings; i++) {
14301 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14302 struct bnxt_cp_ring_info *cpr;
14303 int rc;
14304
14305 if (!rxr->bnapi->in_reset)
14306 continue;
14307
14308 rc = bnxt_hwrm_rx_ring_reset(bp, i);
14309 if (rc) {
14310 if (rc == -EINVAL || rc == -EOPNOTSUPP)
14311 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14312 else
14313 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14314 rc);
14315 bnxt_reset_task(bp, true);
14316 break;
14317 }
14318 bnxt_free_one_rx_ring_skbs(bp, rxr);
14319 rxr->rx_prod = 0;
14320 rxr->rx_agg_prod = 0;
14321 rxr->rx_sw_agg_prod = 0;
14322 rxr->rx_next_cons = 0;
14323 rxr->bnapi->in_reset = false;
14324 bnxt_alloc_one_rx_ring(bp, i);
14325 cpr = &rxr->bnapi->cp_ring;
14326 cpr->sw_stats->rx.rx_resets++;
14327 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14328 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14329 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14330 }
14331 if (bp->flags & BNXT_FLAG_TPA)
14332 bnxt_set_tpa(bp, true);
14333 bnxt_unlock_sp(bp);
14334 }
14335
bnxt_fw_fatal_close(struct bnxt * bp)14336 static void bnxt_fw_fatal_close(struct bnxt *bp)
14337 {
14338 bnxt_tx_disable(bp);
14339 bnxt_disable_napi(bp);
14340 bnxt_disable_int_sync(bp);
14341 bnxt_free_irq(bp);
14342 bnxt_clear_int_mode(bp);
14343 pci_disable_device(bp->pdev);
14344 }
14345
bnxt_fw_reset_close(struct bnxt * bp)14346 static void bnxt_fw_reset_close(struct bnxt *bp)
14347 {
14348 /* When firmware is in fatal state, quiesce device and disable
14349 * bus master to prevent any potential bad DMAs before freeing
14350 * kernel memory.
14351 */
14352 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14353 u16 val = 0;
14354
14355 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14356 if (val == 0xffff)
14357 bp->fw_reset_min_dsecs = 0;
14358 bnxt_fw_fatal_close(bp);
14359 }
14360 __bnxt_close_nic(bp, true, false);
14361 bnxt_vf_reps_free(bp);
14362 bnxt_clear_int_mode(bp);
14363 bnxt_hwrm_func_drv_unrgtr(bp);
14364 if (pci_is_enabled(bp->pdev))
14365 pci_disable_device(bp->pdev);
14366 bnxt_free_ctx_mem(bp, false);
14367 }
14368
is_bnxt_fw_ok(struct bnxt * bp)14369 static bool is_bnxt_fw_ok(struct bnxt *bp)
14370 {
14371 struct bnxt_fw_health *fw_health = bp->fw_health;
14372 bool no_heartbeat = false, has_reset = false;
14373 u32 val;
14374
14375 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14376 if (val == fw_health->last_fw_heartbeat)
14377 no_heartbeat = true;
14378
14379 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14380 if (val != fw_health->last_fw_reset_cnt)
14381 has_reset = true;
14382
14383 if (!no_heartbeat && has_reset)
14384 return true;
14385
14386 return false;
14387 }
14388
14389 /* netdev instance lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)14390 static void bnxt_force_fw_reset(struct bnxt *bp)
14391 {
14392 struct bnxt_fw_health *fw_health = bp->fw_health;
14393 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14394 u32 wait_dsecs;
14395
14396 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14397 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14398 return;
14399
14400 /* we have to serialize with bnxt_refclk_read()*/
14401 if (ptp) {
14402 unsigned long flags;
14403
14404 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14405 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14406 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14407 } else {
14408 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14409 }
14410 bnxt_fw_reset_close(bp);
14411 wait_dsecs = fw_health->master_func_wait_dsecs;
14412 if (fw_health->primary) {
14413 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14414 wait_dsecs = 0;
14415 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14416 } else {
14417 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14418 wait_dsecs = fw_health->normal_func_wait_dsecs;
14419 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14420 }
14421
14422 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14423 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14424 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14425 }
14426
bnxt_fw_exception(struct bnxt * bp)14427 void bnxt_fw_exception(struct bnxt *bp)
14428 {
14429 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14430 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14431 bnxt_ulp_stop(bp);
14432 bnxt_lock_sp(bp);
14433 bnxt_force_fw_reset(bp);
14434 bnxt_unlock_sp(bp);
14435 }
14436
14437 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14438 * < 0 on error.
14439 */
bnxt_get_registered_vfs(struct bnxt * bp)14440 static int bnxt_get_registered_vfs(struct bnxt *bp)
14441 {
14442 #ifdef CONFIG_BNXT_SRIOV
14443 int rc;
14444
14445 if (!BNXT_PF(bp))
14446 return 0;
14447
14448 rc = bnxt_hwrm_func_qcfg(bp);
14449 if (rc) {
14450 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14451 return rc;
14452 }
14453 if (bp->pf.registered_vfs)
14454 return bp->pf.registered_vfs;
14455 if (bp->sriov_cfg)
14456 return 1;
14457 #endif
14458 return 0;
14459 }
14460
bnxt_fw_reset(struct bnxt * bp)14461 void bnxt_fw_reset(struct bnxt *bp)
14462 {
14463 bnxt_ulp_stop(bp);
14464 bnxt_lock_sp(bp);
14465 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14466 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14467 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14468 int n = 0, tmo;
14469
14470 /* we have to serialize with bnxt_refclk_read()*/
14471 if (ptp) {
14472 unsigned long flags;
14473
14474 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14475 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14476 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14477 } else {
14478 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14479 }
14480 if (bp->pf.active_vfs &&
14481 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14482 n = bnxt_get_registered_vfs(bp);
14483 if (n < 0) {
14484 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14485 n);
14486 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14487 netif_close(bp->dev);
14488 goto fw_reset_exit;
14489 } else if (n > 0) {
14490 u16 vf_tmo_dsecs = n * 10;
14491
14492 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14493 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14494 bp->fw_reset_state =
14495 BNXT_FW_RESET_STATE_POLL_VF;
14496 bnxt_queue_fw_reset_work(bp, HZ / 10);
14497 goto fw_reset_exit;
14498 }
14499 bnxt_fw_reset_close(bp);
14500 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14501 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14502 tmo = HZ / 10;
14503 } else {
14504 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14505 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14506 }
14507 bnxt_queue_fw_reset_work(bp, tmo);
14508 }
14509 fw_reset_exit:
14510 bnxt_unlock_sp(bp);
14511 }
14512
bnxt_chk_missed_irq(struct bnxt * bp)14513 static void bnxt_chk_missed_irq(struct bnxt *bp)
14514 {
14515 int i;
14516
14517 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14518 return;
14519
14520 for (i = 0; i < bp->cp_nr_rings; i++) {
14521 struct bnxt_napi *bnapi = bp->bnapi[i];
14522 struct bnxt_cp_ring_info *cpr;
14523 u32 fw_ring_id;
14524 int j;
14525
14526 if (!bnapi)
14527 continue;
14528
14529 cpr = &bnapi->cp_ring;
14530 for (j = 0; j < cpr->cp_ring_count; j++) {
14531 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14532 u32 val[2];
14533
14534 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14535 continue;
14536
14537 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14538 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14539 continue;
14540 }
14541 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14542 bnxt_dbg_hwrm_ring_info_get(bp,
14543 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14544 fw_ring_id, &val[0], &val[1]);
14545 cpr->sw_stats->cmn.missed_irqs++;
14546 }
14547 }
14548 }
14549
14550 static void bnxt_cfg_ntp_filters(struct bnxt *);
14551
bnxt_init_ethtool_link_settings(struct bnxt * bp)14552 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14553 {
14554 struct bnxt_link_info *link_info = &bp->link_info;
14555
14556 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14557 link_info->autoneg = BNXT_AUTONEG_SPEED;
14558 if (bp->hwrm_spec_code >= 0x10201) {
14559 if (link_info->auto_pause_setting &
14560 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14561 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14562 } else {
14563 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14564 }
14565 bnxt_set_auto_speed(link_info);
14566 } else {
14567 bnxt_set_force_speed(link_info);
14568 link_info->req_duplex = link_info->duplex_setting;
14569 }
14570 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14571 link_info->req_flow_ctrl =
14572 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14573 else
14574 link_info->req_flow_ctrl = link_info->force_pause_setting;
14575 }
14576
bnxt_fw_echo_reply(struct bnxt * bp)14577 static void bnxt_fw_echo_reply(struct bnxt *bp)
14578 {
14579 struct bnxt_fw_health *fw_health = bp->fw_health;
14580 struct hwrm_func_echo_response_input *req;
14581 int rc;
14582
14583 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14584 if (rc)
14585 return;
14586 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14587 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14588 hwrm_req_send(bp, req);
14589 }
14590
bnxt_ulp_restart(struct bnxt * bp)14591 static void bnxt_ulp_restart(struct bnxt *bp)
14592 {
14593 bnxt_ulp_stop(bp);
14594 bnxt_ulp_start(bp, 0);
14595 }
14596
bnxt_sp_task(struct work_struct * work)14597 static void bnxt_sp_task(struct work_struct *work)
14598 {
14599 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14600
14601 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14602 smp_mb__after_atomic();
14603 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14604 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14605 return;
14606 }
14607
14608 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14609 bnxt_ulp_restart(bp);
14610 bnxt_reenable_sriov(bp);
14611 }
14612
14613 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14614 bnxt_cfg_rx_mode(bp);
14615
14616 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14617 bnxt_cfg_ntp_filters(bp);
14618 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14619 bnxt_hwrm_exec_fwd_req(bp);
14620 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14621 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14622 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14623 bnxt_hwrm_port_qstats(bp, 0);
14624 bnxt_hwrm_port_qstats_ext(bp, 0);
14625 bnxt_accumulate_all_stats(bp);
14626 }
14627
14628 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14629 int rc;
14630
14631 mutex_lock(&bp->link_lock);
14632 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14633 &bp->sp_event))
14634 bnxt_hwrm_phy_qcaps(bp);
14635
14636 rc = bnxt_update_link(bp, true);
14637 if (rc)
14638 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14639 rc);
14640
14641 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14642 &bp->sp_event))
14643 bnxt_init_ethtool_link_settings(bp);
14644 mutex_unlock(&bp->link_lock);
14645 }
14646 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14647 int rc;
14648
14649 mutex_lock(&bp->link_lock);
14650 rc = bnxt_update_phy_setting(bp);
14651 mutex_unlock(&bp->link_lock);
14652 if (rc) {
14653 netdev_warn(bp->dev, "update phy settings retry failed\n");
14654 } else {
14655 bp->link_info.phy_retry = false;
14656 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14657 }
14658 }
14659 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14660 mutex_lock(&bp->link_lock);
14661 bnxt_get_port_module_status(bp);
14662 mutex_unlock(&bp->link_lock);
14663 }
14664
14665 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14666 bnxt_tc_flow_stats_work(bp);
14667
14668 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14669 bnxt_chk_missed_irq(bp);
14670
14671 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14672 bnxt_fw_echo_reply(bp);
14673
14674 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14675 bnxt_hwmon_notify_event(bp);
14676
14677 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14678 * must be the last functions to be called before exiting.
14679 */
14680 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14681 bnxt_reset(bp, false);
14682
14683 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14684 bnxt_reset(bp, true);
14685
14686 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14687 bnxt_rx_ring_reset(bp);
14688
14689 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14690 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14691 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14692 bnxt_devlink_health_fw_report(bp);
14693 else
14694 bnxt_fw_reset(bp);
14695 }
14696
14697 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14698 if (!is_bnxt_fw_ok(bp))
14699 bnxt_devlink_health_fw_report(bp);
14700 }
14701
14702 smp_mb__before_atomic();
14703 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14704 }
14705
14706 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14707 int *max_cp);
14708
14709 /* Under netdev instance lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14710 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14711 int tx_xdp)
14712 {
14713 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14714 struct bnxt_hw_rings hwr = {0};
14715 int rx_rings = rx;
14716 int rc;
14717
14718 if (tcs)
14719 tx_sets = tcs;
14720
14721 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14722
14723 if (max_rx < rx_rings)
14724 return -ENOMEM;
14725
14726 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14727 rx_rings <<= 1;
14728
14729 hwr.rx = rx_rings;
14730 hwr.tx = tx * tx_sets + tx_xdp;
14731 if (max_tx < hwr.tx)
14732 return -ENOMEM;
14733
14734 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14735
14736 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14737 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14738 if (max_cp < hwr.cp)
14739 return -ENOMEM;
14740 hwr.stat = hwr.cp;
14741 if (BNXT_NEW_RM(bp)) {
14742 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14743 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14744 hwr.grp = rx;
14745 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14746 }
14747 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14748 hwr.cp_p5 = hwr.tx + rx;
14749 rc = bnxt_hwrm_check_rings(bp, &hwr);
14750 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14751 if (!bnxt_ulp_registered(bp->edev)) {
14752 hwr.cp += bnxt_get_ulp_msix_num(bp);
14753 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14754 }
14755 if (hwr.cp > bp->total_irqs) {
14756 int total_msix = bnxt_change_msix(bp, hwr.cp);
14757
14758 if (total_msix < hwr.cp) {
14759 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14760 hwr.cp, total_msix);
14761 rc = -ENOSPC;
14762 }
14763 }
14764 }
14765 return rc;
14766 }
14767
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14768 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14769 {
14770 if (bp->bar2) {
14771 pci_iounmap(pdev, bp->bar2);
14772 bp->bar2 = NULL;
14773 }
14774
14775 if (bp->bar1) {
14776 pci_iounmap(pdev, bp->bar1);
14777 bp->bar1 = NULL;
14778 }
14779
14780 if (bp->bar0) {
14781 pci_iounmap(pdev, bp->bar0);
14782 bp->bar0 = NULL;
14783 }
14784 }
14785
bnxt_cleanup_pci(struct bnxt * bp)14786 static void bnxt_cleanup_pci(struct bnxt *bp)
14787 {
14788 bnxt_unmap_bars(bp, bp->pdev);
14789 pci_release_regions(bp->pdev);
14790 if (pci_is_enabled(bp->pdev))
14791 pci_disable_device(bp->pdev);
14792 }
14793
bnxt_init_dflt_coal(struct bnxt * bp)14794 static void bnxt_init_dflt_coal(struct bnxt *bp)
14795 {
14796 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14797 struct bnxt_coal *coal;
14798 u16 flags = 0;
14799
14800 if (coal_cap->cmpl_params &
14801 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14802 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14803
14804 /* Tick values in micro seconds.
14805 * 1 coal_buf x bufs_per_record = 1 completion record.
14806 */
14807 coal = &bp->rx_coal;
14808 coal->coal_ticks = 10;
14809 coal->coal_bufs = 30;
14810 coal->coal_ticks_irq = 1;
14811 coal->coal_bufs_irq = 2;
14812 coal->idle_thresh = 50;
14813 coal->bufs_per_record = 2;
14814 coal->budget = 64; /* NAPI budget */
14815 coal->flags = flags;
14816
14817 coal = &bp->tx_coal;
14818 coal->coal_ticks = 28;
14819 coal->coal_bufs = 30;
14820 coal->coal_ticks_irq = 2;
14821 coal->coal_bufs_irq = 2;
14822 coal->bufs_per_record = 1;
14823 coal->flags = flags;
14824
14825 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14826 }
14827
14828 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14829 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14830 {
14831 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14832
14833 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14834 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14835 return true;
14836 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14837 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14838 return true;
14839 return false;
14840 }
14841
bnxt_hwrm_pfcwd_qcaps(struct bnxt * bp)14842 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14843 {
14844 struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14845 struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14846 int rc;
14847
14848 bp->max_pfcwd_tmo_ms = 0;
14849 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14850 if (rc)
14851 return;
14852 resp = hwrm_req_hold(bp, req);
14853 rc = hwrm_req_send_silent(bp, req);
14854 if (!rc)
14855 bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14856 hwrm_req_drop(bp, req);
14857 }
14858
bnxt_fw_init_one_p1(struct bnxt * bp)14859 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14860 {
14861 int rc;
14862
14863 bp->fw_cap = 0;
14864 rc = bnxt_hwrm_ver_get(bp);
14865 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14866 * so wait before continuing with recovery.
14867 */
14868 if (rc)
14869 msleep(100);
14870 bnxt_try_map_fw_health_reg(bp);
14871 if (rc) {
14872 rc = bnxt_try_recover_fw(bp);
14873 if (rc)
14874 return rc;
14875 rc = bnxt_hwrm_ver_get(bp);
14876 if (rc)
14877 return rc;
14878 }
14879
14880 bnxt_nvm_cfg_ver_get(bp);
14881
14882 rc = bnxt_hwrm_func_reset(bp);
14883 if (rc)
14884 return -ENODEV;
14885
14886 bnxt_hwrm_fw_set_time(bp);
14887 return 0;
14888 }
14889
bnxt_fw_init_one_p2(struct bnxt * bp)14890 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14891 {
14892 int rc;
14893
14894 /* Get the MAX capabilities for this function */
14895 rc = bnxt_hwrm_func_qcaps(bp);
14896 if (rc) {
14897 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14898 rc);
14899 return -ENODEV;
14900 }
14901
14902 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14903 if (rc)
14904 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14905 rc);
14906
14907 if (bnxt_alloc_fw_health(bp)) {
14908 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14909 } else {
14910 rc = bnxt_hwrm_error_recovery_qcfg(bp);
14911 if (rc)
14912 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14913 rc);
14914 }
14915
14916 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14917 if (rc)
14918 return -ENODEV;
14919
14920 rc = bnxt_alloc_crash_dump_mem(bp);
14921 if (rc)
14922 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14923 rc);
14924 if (!rc) {
14925 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14926 if (rc) {
14927 bnxt_free_crash_dump_mem(bp);
14928 netdev_warn(bp->dev,
14929 "hwrm crash dump mem failure rc: %d\n", rc);
14930 }
14931 }
14932
14933 if (bnxt_fw_pre_resv_vnics(bp))
14934 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14935
14936 bnxt_hwrm_pfcwd_qcaps(bp);
14937 bnxt_hwrm_func_qcfg(bp);
14938 bnxt_hwrm_vnic_qcaps(bp);
14939 bnxt_hwrm_port_led_qcaps(bp);
14940 bnxt_ethtool_init(bp);
14941 if (bp->fw_cap & BNXT_FW_CAP_PTP)
14942 __bnxt_hwrm_ptp_qcfg(bp);
14943 bnxt_dcb_init(bp);
14944 bnxt_hwmon_init(bp);
14945 return 0;
14946 }
14947
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)14948 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14949 {
14950 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14951 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14952 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14953 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14954 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14955 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14956 bp->rss_hash_delta = bp->rss_hash_cfg;
14957 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14958 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14959 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14960 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14961 }
14962 }
14963
bnxt_set_dflt_rfs(struct bnxt * bp)14964 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14965 {
14966 struct net_device *dev = bp->dev;
14967
14968 dev->hw_features &= ~NETIF_F_NTUPLE;
14969 dev->features &= ~NETIF_F_NTUPLE;
14970 bp->flags &= ~BNXT_FLAG_RFS;
14971 if (bnxt_rfs_supported(bp)) {
14972 dev->hw_features |= NETIF_F_NTUPLE;
14973 if (bnxt_rfs_capable(bp, false)) {
14974 bp->flags |= BNXT_FLAG_RFS;
14975 dev->features |= NETIF_F_NTUPLE;
14976 }
14977 }
14978 }
14979
bnxt_fw_init_one_p3(struct bnxt * bp)14980 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14981 {
14982 struct pci_dev *pdev = bp->pdev;
14983
14984 bnxt_set_dflt_rss_hash_type(bp);
14985 bnxt_set_dflt_rfs(bp);
14986
14987 bnxt_get_wol_settings(bp);
14988 if (bp->flags & BNXT_FLAG_WOL_CAP)
14989 device_set_wakeup_enable(&pdev->dev, bp->wol);
14990 else
14991 device_set_wakeup_capable(&pdev->dev, false);
14992
14993 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14994 bnxt_hwrm_coal_params_qcaps(bp);
14995 }
14996
14997 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14998
bnxt_fw_init_one(struct bnxt * bp)14999 int bnxt_fw_init_one(struct bnxt *bp)
15000 {
15001 int rc;
15002
15003 rc = bnxt_fw_init_one_p1(bp);
15004 if (rc) {
15005 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
15006 return rc;
15007 }
15008 rc = bnxt_fw_init_one_p2(bp);
15009 if (rc) {
15010 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
15011 return rc;
15012 }
15013 rc = bnxt_probe_phy(bp, false);
15014 if (rc)
15015 return rc;
15016 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
15017 if (rc)
15018 return rc;
15019
15020 bnxt_fw_init_one_p3(bp);
15021 return 0;
15022 }
15023
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)15024 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
15025 {
15026 struct bnxt_fw_health *fw_health = bp->fw_health;
15027 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
15028 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
15029 u32 reg_type, reg_off, delay_msecs;
15030
15031 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
15032 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
15033 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
15034 switch (reg_type) {
15035 case BNXT_FW_HEALTH_REG_TYPE_CFG:
15036 pci_write_config_dword(bp->pdev, reg_off, val);
15037 break;
15038 case BNXT_FW_HEALTH_REG_TYPE_GRC:
15039 writel(reg_off & BNXT_GRC_BASE_MASK,
15040 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
15041 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
15042 fallthrough;
15043 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
15044 writel(val, bp->bar0 + reg_off);
15045 break;
15046 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
15047 writel(val, bp->bar1 + reg_off);
15048 break;
15049 }
15050 if (delay_msecs) {
15051 pci_read_config_dword(bp->pdev, 0, &val);
15052 msleep(delay_msecs);
15053 }
15054 }
15055
bnxt_hwrm_reset_permitted(struct bnxt * bp)15056 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
15057 {
15058 struct hwrm_func_qcfg_output *resp;
15059 struct hwrm_func_qcfg_input *req;
15060 bool result = true; /* firmware will enforce if unknown */
15061
15062 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
15063 return result;
15064
15065 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
15066 return result;
15067
15068 req->fid = cpu_to_le16(0xffff);
15069 resp = hwrm_req_hold(bp, req);
15070 if (!hwrm_req_send(bp, req))
15071 result = !!(le16_to_cpu(resp->flags) &
15072 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
15073 hwrm_req_drop(bp, req);
15074 return result;
15075 }
15076
bnxt_reset_all(struct bnxt * bp)15077 static void bnxt_reset_all(struct bnxt *bp)
15078 {
15079 struct bnxt_fw_health *fw_health = bp->fw_health;
15080 int i, rc;
15081
15082 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15083 bnxt_fw_reset_via_optee(bp);
15084 bp->fw_reset_timestamp = jiffies;
15085 return;
15086 }
15087
15088 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
15089 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
15090 bnxt_fw_reset_writel(bp, i);
15091 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
15092 struct hwrm_fw_reset_input *req;
15093
15094 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15095 if (!rc) {
15096 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15097 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15098 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15099 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15100 rc = hwrm_req_send(bp, req);
15101 }
15102 if (rc != -ENODEV)
15103 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15104 }
15105 bp->fw_reset_timestamp = jiffies;
15106 }
15107
bnxt_fw_reset_timeout(struct bnxt * bp)15108 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15109 {
15110 return time_after(jiffies, bp->fw_reset_timestamp +
15111 (bp->fw_reset_max_dsecs * HZ / 10));
15112 }
15113
bnxt_fw_reset_abort(struct bnxt * bp,int rc)15114 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15115 {
15116 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15117 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15118 bnxt_dl_health_fw_status_update(bp, false);
15119 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15120 netif_close(bp->dev);
15121 }
15122
bnxt_fw_reset_task(struct work_struct * work)15123 static void bnxt_fw_reset_task(struct work_struct *work)
15124 {
15125 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15126 int rc = 0;
15127
15128 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15129 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15130 return;
15131 }
15132
15133 switch (bp->fw_reset_state) {
15134 case BNXT_FW_RESET_STATE_POLL_VF: {
15135 int n = bnxt_get_registered_vfs(bp);
15136 int tmo;
15137
15138 if (n < 0) {
15139 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15140 n, jiffies_to_msecs(jiffies -
15141 bp->fw_reset_timestamp));
15142 goto fw_reset_abort;
15143 } else if (n > 0) {
15144 if (bnxt_fw_reset_timeout(bp)) {
15145 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15146 bp->fw_reset_state = 0;
15147 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15148 n);
15149 goto ulp_start;
15150 }
15151 bnxt_queue_fw_reset_work(bp, HZ / 10);
15152 return;
15153 }
15154 bp->fw_reset_timestamp = jiffies;
15155 netdev_lock(bp->dev);
15156 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15157 bnxt_fw_reset_abort(bp, rc);
15158 netdev_unlock(bp->dev);
15159 goto ulp_start;
15160 }
15161 bnxt_fw_reset_close(bp);
15162 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15163 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15164 tmo = HZ / 10;
15165 } else {
15166 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15167 tmo = bp->fw_reset_min_dsecs * HZ / 10;
15168 }
15169 netdev_unlock(bp->dev);
15170 bnxt_queue_fw_reset_work(bp, tmo);
15171 return;
15172 }
15173 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15174 u32 val;
15175
15176 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15177 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15178 !bnxt_fw_reset_timeout(bp)) {
15179 bnxt_queue_fw_reset_work(bp, HZ / 5);
15180 return;
15181 }
15182
15183 if (!bp->fw_health->primary) {
15184 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15185
15186 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15187 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15188 return;
15189 }
15190 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15191 }
15192 fallthrough;
15193 case BNXT_FW_RESET_STATE_RESET_FW:
15194 bnxt_reset_all(bp);
15195 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15196 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15197 return;
15198 case BNXT_FW_RESET_STATE_ENABLE_DEV:
15199 bnxt_inv_fw_health_reg(bp);
15200 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15201 !bp->fw_reset_min_dsecs) {
15202 u16 val;
15203
15204 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15205 if (val == 0xffff) {
15206 if (bnxt_fw_reset_timeout(bp)) {
15207 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15208 rc = -ETIMEDOUT;
15209 goto fw_reset_abort;
15210 }
15211 bnxt_queue_fw_reset_work(bp, HZ / 1000);
15212 return;
15213 }
15214 }
15215 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15216 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15217 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15218 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15219 bnxt_dl_remote_reload(bp);
15220 if (pci_enable_device(bp->pdev)) {
15221 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15222 rc = -ENODEV;
15223 goto fw_reset_abort;
15224 }
15225 pci_set_master(bp->pdev);
15226 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15227 fallthrough;
15228 case BNXT_FW_RESET_STATE_POLL_FW:
15229 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15230 rc = bnxt_hwrm_poll(bp);
15231 if (rc) {
15232 if (bnxt_fw_reset_timeout(bp)) {
15233 netdev_err(bp->dev, "Firmware reset aborted\n");
15234 goto fw_reset_abort_status;
15235 }
15236 bnxt_queue_fw_reset_work(bp, HZ / 5);
15237 return;
15238 }
15239 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15240 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15241 fallthrough;
15242 case BNXT_FW_RESET_STATE_OPENING:
15243 while (!netdev_trylock(bp->dev)) {
15244 bnxt_queue_fw_reset_work(bp, HZ / 10);
15245 return;
15246 }
15247 rc = bnxt_open(bp->dev);
15248 if (rc) {
15249 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15250 bnxt_fw_reset_abort(bp, rc);
15251 netdev_unlock(bp->dev);
15252 goto ulp_start;
15253 }
15254
15255 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15256 bp->fw_health->enabled) {
15257 bp->fw_health->last_fw_reset_cnt =
15258 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15259 }
15260 bp->fw_reset_state = 0;
15261 /* Make sure fw_reset_state is 0 before clearing the flag */
15262 smp_mb__before_atomic();
15263 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15264 bnxt_ptp_reapply_pps(bp);
15265 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15266 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15267 bnxt_dl_health_fw_recovery_done(bp);
15268 bnxt_dl_health_fw_status_update(bp, true);
15269 }
15270 netdev_unlock(bp->dev);
15271 bnxt_ulp_start(bp, 0);
15272 bnxt_reenable_sriov(bp);
15273 netdev_lock(bp->dev);
15274 bnxt_vf_reps_alloc(bp);
15275 bnxt_vf_reps_open(bp);
15276 netdev_unlock(bp->dev);
15277 break;
15278 }
15279 return;
15280
15281 fw_reset_abort_status:
15282 if (bp->fw_health->status_reliable ||
15283 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15284 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15285
15286 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15287 }
15288 fw_reset_abort:
15289 netdev_lock(bp->dev);
15290 bnxt_fw_reset_abort(bp, rc);
15291 netdev_unlock(bp->dev);
15292 ulp_start:
15293 bnxt_ulp_start(bp, rc);
15294 }
15295
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)15296 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15297 {
15298 int rc;
15299 struct bnxt *bp = netdev_priv(dev);
15300
15301 SET_NETDEV_DEV(dev, &pdev->dev);
15302
15303 /* enable device (incl. PCI PM wakeup), and bus-mastering */
15304 rc = pci_enable_device(pdev);
15305 if (rc) {
15306 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15307 goto init_err;
15308 }
15309
15310 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15311 dev_err(&pdev->dev,
15312 "Cannot find PCI device base address, aborting\n");
15313 rc = -ENODEV;
15314 goto init_err_disable;
15315 }
15316
15317 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15318 if (rc) {
15319 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15320 goto init_err_disable;
15321 }
15322
15323 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15324 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15325 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15326 rc = -EIO;
15327 goto init_err_release;
15328 }
15329
15330 pci_set_master(pdev);
15331
15332 bp->dev = dev;
15333 bp->pdev = pdev;
15334
15335 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15336 * determines the BAR size.
15337 */
15338 bp->bar0 = pci_ioremap_bar(pdev, 0);
15339 if (!bp->bar0) {
15340 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15341 rc = -ENOMEM;
15342 goto init_err_release;
15343 }
15344
15345 bp->bar2 = pci_ioremap_bar(pdev, 4);
15346 if (!bp->bar2) {
15347 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15348 rc = -ENOMEM;
15349 goto init_err_release;
15350 }
15351
15352 INIT_WORK(&bp->sp_task, bnxt_sp_task);
15353 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15354
15355 spin_lock_init(&bp->ntp_fltr_lock);
15356 #if BITS_PER_LONG == 32
15357 spin_lock_init(&bp->db_lock);
15358 #endif
15359
15360 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15361 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15362
15363 timer_setup(&bp->timer, bnxt_timer, 0);
15364 bp->current_interval = BNXT_TIMER_INTERVAL;
15365
15366 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15367 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15368
15369 clear_bit(BNXT_STATE_OPEN, &bp->state);
15370 return 0;
15371
15372 init_err_release:
15373 bnxt_unmap_bars(bp, pdev);
15374 pci_release_regions(pdev);
15375
15376 init_err_disable:
15377 pci_disable_device(pdev);
15378
15379 init_err:
15380 return rc;
15381 }
15382
bnxt_change_mac_addr(struct net_device * dev,void * p)15383 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15384 {
15385 struct sockaddr *addr = p;
15386 struct bnxt *bp = netdev_priv(dev);
15387 int rc = 0;
15388
15389 netdev_assert_locked(dev);
15390
15391 if (!is_valid_ether_addr(addr->sa_data))
15392 return -EADDRNOTAVAIL;
15393
15394 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15395 return 0;
15396
15397 rc = bnxt_approve_mac(bp, addr->sa_data, true);
15398 if (rc)
15399 return rc;
15400
15401 eth_hw_addr_set(dev, addr->sa_data);
15402 bnxt_clear_usr_fltrs(bp, true);
15403 if (netif_running(dev)) {
15404 bnxt_close_nic(bp, false, false);
15405 rc = bnxt_open_nic(bp, false, false);
15406 }
15407
15408 return rc;
15409 }
15410
bnxt_change_mtu(struct net_device * dev,int new_mtu)15411 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15412 {
15413 struct bnxt *bp = netdev_priv(dev);
15414
15415 netdev_assert_locked(dev);
15416
15417 if (netif_running(dev))
15418 bnxt_close_nic(bp, true, false);
15419
15420 WRITE_ONCE(dev->mtu, new_mtu);
15421
15422 /* MTU change may change the AGG ring settings if an XDP multi-buffer
15423 * program is attached. We need to set the AGG rings settings and
15424 * rx_skb_func accordingly.
15425 */
15426 if (READ_ONCE(bp->xdp_prog))
15427 bnxt_set_rx_skb_mode(bp, true);
15428
15429 bnxt_set_ring_params(bp);
15430
15431 if (netif_running(dev))
15432 return bnxt_open_nic(bp, true, false);
15433
15434 return 0;
15435 }
15436
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)15437 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15438 {
15439 struct bnxt *bp = netdev_priv(dev);
15440 bool sh = false;
15441 int rc, tx_cp;
15442
15443 if (tc > bp->max_tc) {
15444 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15445 tc, bp->max_tc);
15446 return -EINVAL;
15447 }
15448
15449 if (bp->num_tc == tc)
15450 return 0;
15451
15452 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15453 sh = true;
15454
15455 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15456 sh, tc, bp->tx_nr_rings_xdp);
15457 if (rc)
15458 return rc;
15459
15460 /* Needs to close the device and do hw resource re-allocations */
15461 if (netif_running(bp->dev))
15462 bnxt_close_nic(bp, true, false);
15463
15464 if (tc) {
15465 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15466 netdev_set_num_tc(dev, tc);
15467 bp->num_tc = tc;
15468 } else {
15469 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15470 netdev_reset_tc(dev);
15471 bp->num_tc = 0;
15472 }
15473 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15474 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15475 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15476 tx_cp + bp->rx_nr_rings;
15477
15478 if (netif_running(bp->dev))
15479 return bnxt_open_nic(bp, true, false);
15480
15481 return 0;
15482 }
15483
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)15484 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15485 void *cb_priv)
15486 {
15487 struct bnxt *bp = cb_priv;
15488
15489 if (!bnxt_tc_flower_enabled(bp) ||
15490 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15491 return -EOPNOTSUPP;
15492
15493 switch (type) {
15494 case TC_SETUP_CLSFLOWER:
15495 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15496 default:
15497 return -EOPNOTSUPP;
15498 }
15499 }
15500
15501 LIST_HEAD(bnxt_block_cb_list);
15502
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)15503 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15504 void *type_data)
15505 {
15506 struct bnxt *bp = netdev_priv(dev);
15507
15508 switch (type) {
15509 case TC_SETUP_BLOCK:
15510 return flow_block_cb_setup_simple(type_data,
15511 &bnxt_block_cb_list,
15512 bnxt_setup_tc_block_cb,
15513 bp, bp, true);
15514 case TC_SETUP_QDISC_MQPRIO: {
15515 struct tc_mqprio_qopt *mqprio = type_data;
15516
15517 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15518
15519 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15520 }
15521 default:
15522 return -EOPNOTSUPP;
15523 }
15524 }
15525
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15526 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15527 const struct sk_buff *skb)
15528 {
15529 struct bnxt_vnic_info *vnic;
15530
15531 if (skb)
15532 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15533
15534 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15535 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15536 }
15537
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15538 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15539 u32 idx)
15540 {
15541 struct hlist_head *head;
15542 int bit_id;
15543
15544 spin_lock_bh(&bp->ntp_fltr_lock);
15545 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15546 if (bit_id < 0) {
15547 spin_unlock_bh(&bp->ntp_fltr_lock);
15548 return -ENOMEM;
15549 }
15550
15551 fltr->base.sw_id = (u16)bit_id;
15552 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15553 fltr->base.flags |= BNXT_ACT_RING_DST;
15554 head = &bp->ntp_fltr_hash_tbl[idx];
15555 hlist_add_head_rcu(&fltr->base.hash, head);
15556 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15557 bnxt_insert_usr_fltr(bp, &fltr->base);
15558 bp->ntp_fltr_count++;
15559 spin_unlock_bh(&bp->ntp_fltr_lock);
15560 return 0;
15561 }
15562
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15563 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15564 struct bnxt_ntuple_filter *f2)
15565 {
15566 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15567 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15568 struct flow_keys *keys1 = &f1->fkeys;
15569 struct flow_keys *keys2 = &f2->fkeys;
15570
15571 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15572 keys1->basic.ip_proto != keys2->basic.ip_proto)
15573 return false;
15574
15575 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15576 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15577 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15578 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15579 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15580 return false;
15581 } else {
15582 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15583 &keys2->addrs.v6addrs.src) ||
15584 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15585 &masks2->addrs.v6addrs.src) ||
15586 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15587 &keys2->addrs.v6addrs.dst) ||
15588 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15589 &masks2->addrs.v6addrs.dst))
15590 return false;
15591 }
15592
15593 return keys1->ports.src == keys2->ports.src &&
15594 masks1->ports.src == masks2->ports.src &&
15595 keys1->ports.dst == keys2->ports.dst &&
15596 masks1->ports.dst == masks2->ports.dst &&
15597 keys1->control.flags == keys2->control.flags &&
15598 f1->l2_fltr == f2->l2_fltr;
15599 }
15600
15601 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15602 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15603 struct bnxt_ntuple_filter *fltr, u32 idx)
15604 {
15605 struct bnxt_ntuple_filter *f;
15606 struct hlist_head *head;
15607
15608 head = &bp->ntp_fltr_hash_tbl[idx];
15609 hlist_for_each_entry_rcu(f, head, base.hash) {
15610 if (bnxt_fltr_match(f, fltr))
15611 return f;
15612 }
15613 return NULL;
15614 }
15615
15616 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15617 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15618 u16 rxq_index, u32 flow_id)
15619 {
15620 struct bnxt *bp = netdev_priv(dev);
15621 struct bnxt_ntuple_filter *fltr, *new_fltr;
15622 struct flow_keys *fkeys;
15623 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15624 struct bnxt_l2_filter *l2_fltr;
15625 int rc = 0, idx;
15626 u32 flags;
15627
15628 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15629 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15630 atomic_inc(&l2_fltr->refcnt);
15631 } else {
15632 struct bnxt_l2_key key;
15633
15634 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15635 key.vlan = 0;
15636 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15637 if (!l2_fltr)
15638 return -EINVAL;
15639 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15640 bnxt_del_l2_filter(bp, l2_fltr);
15641 return -EINVAL;
15642 }
15643 }
15644 new_fltr = kzalloc_obj(*new_fltr, GFP_ATOMIC);
15645 if (!new_fltr) {
15646 bnxt_del_l2_filter(bp, l2_fltr);
15647 return -ENOMEM;
15648 }
15649
15650 fkeys = &new_fltr->fkeys;
15651 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15652 rc = -EPROTONOSUPPORT;
15653 goto err_free;
15654 }
15655
15656 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15657 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15658 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15659 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15660 rc = -EPROTONOSUPPORT;
15661 goto err_free;
15662 }
15663 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15664 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15665 if (bp->hwrm_spec_code < 0x10601) {
15666 rc = -EPROTONOSUPPORT;
15667 goto err_free;
15668 }
15669 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15670 }
15671 flags = fkeys->control.flags;
15672 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15673 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15674 rc = -EPROTONOSUPPORT;
15675 goto err_free;
15676 }
15677 new_fltr->l2_fltr = l2_fltr;
15678
15679 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15680 rcu_read_lock();
15681 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15682 if (fltr) {
15683 rc = fltr->base.sw_id;
15684 rcu_read_unlock();
15685 goto err_free;
15686 }
15687 rcu_read_unlock();
15688
15689 new_fltr->flow_id = flow_id;
15690 new_fltr->base.rxq = rxq_index;
15691 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15692 if (!rc) {
15693 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15694 return new_fltr->base.sw_id;
15695 }
15696
15697 err_free:
15698 bnxt_del_l2_filter(bp, l2_fltr);
15699 kfree(new_fltr);
15700 return rc;
15701 }
15702 #endif
15703
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15704 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15705 {
15706 spin_lock_bh(&bp->ntp_fltr_lock);
15707 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15708 spin_unlock_bh(&bp->ntp_fltr_lock);
15709 return;
15710 }
15711 hlist_del_rcu(&fltr->base.hash);
15712 bnxt_del_one_usr_fltr(bp, &fltr->base);
15713 bp->ntp_fltr_count--;
15714 spin_unlock_bh(&bp->ntp_fltr_lock);
15715 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15716 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15717 kfree_rcu(fltr, base.rcu);
15718 }
15719
bnxt_cfg_ntp_filters(struct bnxt * bp)15720 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15721 {
15722 #ifdef CONFIG_RFS_ACCEL
15723 int i;
15724
15725 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15726 struct hlist_head *head;
15727 struct hlist_node *tmp;
15728 struct bnxt_ntuple_filter *fltr;
15729 int rc;
15730
15731 head = &bp->ntp_fltr_hash_tbl[i];
15732 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15733 bool del = false;
15734
15735 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15736 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15737 continue;
15738 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15739 fltr->flow_id,
15740 fltr->base.sw_id)) {
15741 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15742 fltr);
15743 del = true;
15744 }
15745 } else {
15746 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15747 fltr);
15748 if (rc)
15749 del = true;
15750 else
15751 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15752 }
15753
15754 if (del)
15755 bnxt_del_ntp_filter(bp, fltr);
15756 }
15757 }
15758 #endif
15759 }
15760
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15761 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15762 unsigned int entry, struct udp_tunnel_info *ti)
15763 {
15764 struct bnxt *bp = netdev_priv(netdev);
15765 unsigned int cmd;
15766
15767 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15768 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15769 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15770 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15771 else
15772 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15773
15774 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15775 }
15776
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15777 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15778 unsigned int entry, struct udp_tunnel_info *ti)
15779 {
15780 struct bnxt *bp = netdev_priv(netdev);
15781 unsigned int cmd;
15782
15783 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15784 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15785 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15786 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15787 else
15788 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15789
15790 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15791 }
15792
15793 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15794 .set_port = bnxt_udp_tunnel_set_port,
15795 .unset_port = bnxt_udp_tunnel_unset_port,
15796 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15797 .tables = {
15798 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15799 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15800 },
15801 }, bnxt_udp_tunnels_p7 = {
15802 .set_port = bnxt_udp_tunnel_set_port,
15803 .unset_port = bnxt_udp_tunnel_unset_port,
15804 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15805 .tables = {
15806 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15807 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15808 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15809 },
15810 };
15811
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15812 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15813 struct net_device *dev, u32 filter_mask,
15814 int nlflags)
15815 {
15816 struct bnxt *bp = netdev_priv(dev);
15817
15818 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15819 nlflags, filter_mask, NULL);
15820 }
15821
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15822 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15823 u16 flags, struct netlink_ext_ack *extack)
15824 {
15825 struct bnxt *bp = netdev_priv(dev);
15826 struct nlattr *attr, *br_spec;
15827 int rem, rc = 0;
15828
15829 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15830 return -EOPNOTSUPP;
15831
15832 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15833 if (!br_spec)
15834 return -EINVAL;
15835
15836 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15837 u16 mode;
15838
15839 mode = nla_get_u16(attr);
15840 if (mode == bp->br_mode)
15841 break;
15842
15843 rc = bnxt_hwrm_set_br_mode(bp, mode);
15844 if (!rc)
15845 bp->br_mode = mode;
15846 break;
15847 }
15848 return rc;
15849 }
15850
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15851 int bnxt_get_port_parent_id(struct net_device *dev,
15852 struct netdev_phys_item_id *ppid)
15853 {
15854 struct bnxt *bp = netdev_priv(dev);
15855
15856 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15857 return -EOPNOTSUPP;
15858
15859 /* The PF and it's VF-reps only support the switchdev framework */
15860 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15861 return -EOPNOTSUPP;
15862
15863 ppid->id_len = sizeof(bp->dsn);
15864 memcpy(ppid->id, bp->dsn, ppid->id_len);
15865
15866 return 0;
15867 }
15868
15869 static const struct net_device_ops bnxt_netdev_ops = {
15870 .ndo_open = bnxt_open,
15871 .ndo_start_xmit = bnxt_start_xmit,
15872 .ndo_stop = bnxt_close,
15873 .ndo_get_stats64 = bnxt_get_stats64,
15874 .ndo_set_rx_mode = bnxt_set_rx_mode,
15875 .ndo_eth_ioctl = bnxt_ioctl,
15876 .ndo_validate_addr = eth_validate_addr,
15877 .ndo_set_mac_address = bnxt_change_mac_addr,
15878 .ndo_change_mtu = bnxt_change_mtu,
15879 .ndo_fix_features = bnxt_fix_features,
15880 .ndo_set_features = bnxt_set_features,
15881 .ndo_features_check = bnxt_features_check,
15882 .ndo_tx_timeout = bnxt_tx_timeout,
15883 #ifdef CONFIG_BNXT_SRIOV
15884 .ndo_get_vf_config = bnxt_get_vf_config,
15885 .ndo_set_vf_mac = bnxt_set_vf_mac,
15886 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
15887 .ndo_set_vf_rate = bnxt_set_vf_bw,
15888 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
15889 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
15890 .ndo_set_vf_trust = bnxt_set_vf_trust,
15891 #endif
15892 .ndo_setup_tc = bnxt_setup_tc,
15893 #ifdef CONFIG_RFS_ACCEL
15894 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
15895 #endif
15896 .ndo_bpf = bnxt_xdp,
15897 .ndo_xdp_xmit = bnxt_xdp_xmit,
15898 .ndo_bridge_getlink = bnxt_bridge_getlink,
15899 .ndo_bridge_setlink = bnxt_bridge_setlink,
15900 .ndo_hwtstamp_get = bnxt_hwtstamp_get,
15901 .ndo_hwtstamp_set = bnxt_hwtstamp_set,
15902 };
15903
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)15904 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15905 struct netdev_queue_stats_rx *stats)
15906 {
15907 struct bnxt *bp = netdev_priv(dev);
15908 struct bnxt_cp_ring_info *cpr;
15909 u64 *sw;
15910
15911 if (!bp->bnapi)
15912 return;
15913
15914 cpr = &bp->bnapi[i]->cp_ring;
15915 sw = cpr->stats.sw_stats;
15916
15917 stats->packets = 0;
15918 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15919 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15920 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15921
15922 stats->bytes = 0;
15923 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15924 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15925 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15926
15927 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15928 stats->hw_gro_packets = cpr->sw_stats->rx.rx_hw_gro_packets;
15929 stats->hw_gro_wire_packets = cpr->sw_stats->rx.rx_hw_gro_wire_packets;
15930 }
15931
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)15932 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15933 struct netdev_queue_stats_tx *stats)
15934 {
15935 struct bnxt *bp = netdev_priv(dev);
15936 struct bnxt_napi *bnapi;
15937 u64 *sw;
15938
15939 if (!bp->tx_ring)
15940 return;
15941
15942 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15943 sw = bnapi->cp_ring.stats.sw_stats;
15944
15945 stats->packets = 0;
15946 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15947 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15948 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15949
15950 stats->bytes = 0;
15951 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15952 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15953 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15954 }
15955
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)15956 static void bnxt_get_base_stats(struct net_device *dev,
15957 struct netdev_queue_stats_rx *rx,
15958 struct netdev_queue_stats_tx *tx)
15959 {
15960 struct bnxt *bp = netdev_priv(dev);
15961
15962 rx->packets = bp->net_stats_prev.rx_packets;
15963 rx->bytes = bp->net_stats_prev.rx_bytes;
15964 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15965 rx->hw_gro_packets = bp->ring_err_stats_prev.rx_total_hw_gro_packets;
15966 rx->hw_gro_wire_packets = bp->ring_err_stats_prev.rx_total_hw_gro_wire_packets;
15967
15968 tx->packets = bp->net_stats_prev.tx_packets;
15969 tx->bytes = bp->net_stats_prev.tx_bytes;
15970 }
15971
15972 static const struct netdev_stat_ops bnxt_stat_ops = {
15973 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
15974 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
15975 .get_base_stats = bnxt_get_base_stats,
15976 };
15977
bnxt_queue_default_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg)15978 static void bnxt_queue_default_qcfg(struct net_device *dev,
15979 struct netdev_queue_config *qcfg)
15980 {
15981 qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
15982 }
15983
bnxt_validate_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg,struct netlink_ext_ack * extack)15984 static int bnxt_validate_qcfg(struct net_device *dev,
15985 struct netdev_queue_config *qcfg,
15986 struct netlink_ext_ack *extack)
15987 {
15988 struct bnxt *bp = netdev_priv(dev);
15989
15990 /* Older chips need MSS calc so rx_page_size is not supported */
15991 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
15992 qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
15993 return -EINVAL;
15994
15995 if (!is_power_of_2(qcfg->rx_page_size))
15996 return -ERANGE;
15997
15998 if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
15999 qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
16000 return -ERANGE;
16001
16002 return 0;
16003 }
16004
bnxt_queue_mem_alloc(struct net_device * dev,struct netdev_queue_config * qcfg,void * qmem,int idx)16005 static int bnxt_queue_mem_alloc(struct net_device *dev,
16006 struct netdev_queue_config *qcfg,
16007 void *qmem, int idx)
16008 {
16009 struct bnxt_rx_ring_info *rxr, *clone;
16010 struct bnxt *bp = netdev_priv(dev);
16011 struct bnxt_ring_struct *ring;
16012 int rc;
16013
16014 if (!bp->rx_ring)
16015 return -ENETDOWN;
16016
16017 rxr = &bp->rx_ring[idx];
16018 clone = qmem;
16019 memcpy(clone, rxr, sizeof(*rxr));
16020 bnxt_init_rx_ring_struct(bp, clone);
16021 bnxt_reset_rx_ring_struct(bp, clone);
16022
16023 clone->rx_prod = 0;
16024 clone->rx_agg_prod = 0;
16025 clone->rx_sw_agg_prod = 0;
16026 clone->rx_next_cons = 0;
16027 clone->need_head_pool = false;
16028 clone->rx_page_size = qcfg->rx_page_size;
16029
16030 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
16031 if (rc)
16032 return rc;
16033
16034 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
16035 if (rc < 0)
16036 goto err_page_pool_destroy;
16037
16038 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
16039 MEM_TYPE_PAGE_POOL,
16040 clone->page_pool);
16041 if (rc)
16042 goto err_rxq_info_unreg;
16043
16044 ring = &clone->rx_ring_struct;
16045 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16046 if (rc)
16047 goto err_free_rx_ring;
16048
16049 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
16050 ring = &clone->rx_agg_ring_struct;
16051 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16052 if (rc)
16053 goto err_free_rx_agg_ring;
16054
16055 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
16056 if (rc)
16057 goto err_free_rx_agg_ring;
16058 }
16059
16060 if (bp->flags & BNXT_FLAG_TPA) {
16061 rc = bnxt_alloc_one_tpa_info(bp, clone);
16062 if (rc)
16063 goto err_free_tpa_info;
16064 }
16065
16066 bnxt_init_one_rx_ring_rxbd(bp, clone);
16067 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
16068
16069 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
16070 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16071 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
16072 if (bp->flags & BNXT_FLAG_TPA)
16073 bnxt_alloc_one_tpa_info_data(bp, clone);
16074
16075 return 0;
16076
16077 err_free_tpa_info:
16078 bnxt_free_one_tpa_info(bp, clone);
16079 err_free_rx_agg_ring:
16080 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
16081 err_free_rx_ring:
16082 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
16083 err_rxq_info_unreg:
16084 xdp_rxq_info_unreg(&clone->xdp_rxq);
16085 err_page_pool_destroy:
16086 page_pool_destroy(clone->page_pool);
16087 page_pool_destroy(clone->head_pool);
16088 clone->page_pool = NULL;
16089 clone->head_pool = NULL;
16090 return rc;
16091 }
16092
bnxt_queue_mem_free(struct net_device * dev,void * qmem)16093 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
16094 {
16095 struct bnxt_rx_ring_info *rxr = qmem;
16096 struct bnxt *bp = netdev_priv(dev);
16097 struct bnxt_ring_struct *ring;
16098
16099 bnxt_free_one_rx_ring_skbs(bp, rxr);
16100 bnxt_free_one_tpa_info(bp, rxr);
16101
16102 xdp_rxq_info_unreg(&rxr->xdp_rxq);
16103
16104 page_pool_destroy(rxr->page_pool);
16105 page_pool_destroy(rxr->head_pool);
16106 rxr->page_pool = NULL;
16107 rxr->head_pool = NULL;
16108
16109 ring = &rxr->rx_ring_struct;
16110 bnxt_free_ring(bp, &ring->ring_mem);
16111
16112 ring = &rxr->rx_agg_ring_struct;
16113 bnxt_free_ring(bp, &ring->ring_mem);
16114
16115 kfree(rxr->rx_agg_bmap);
16116 rxr->rx_agg_bmap = NULL;
16117 }
16118
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)16119 static void bnxt_copy_rx_ring(struct bnxt *bp,
16120 struct bnxt_rx_ring_info *dst,
16121 struct bnxt_rx_ring_info *src)
16122 {
16123 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
16124 struct bnxt_ring_struct *dst_ring, *src_ring;
16125 int i;
16126
16127 dst_ring = &dst->rx_ring_struct;
16128 dst_rmem = &dst_ring->ring_mem;
16129 src_ring = &src->rx_ring_struct;
16130 src_rmem = &src_ring->ring_mem;
16131
16132 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16133 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16134 WARN_ON(dst_rmem->flags != src_rmem->flags);
16135 WARN_ON(dst_rmem->depth != src_rmem->depth);
16136 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16137 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16138
16139 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16140 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16141 *dst_rmem->vmem = *src_rmem->vmem;
16142 for (i = 0; i < dst_rmem->nr_pages; i++) {
16143 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16144 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16145 }
16146
16147 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16148 return;
16149
16150 dst_ring = &dst->rx_agg_ring_struct;
16151 dst_rmem = &dst_ring->ring_mem;
16152 src_ring = &src->rx_agg_ring_struct;
16153 src_rmem = &src_ring->ring_mem;
16154
16155 dst->rx_page_size = src->rx_page_size;
16156
16157 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16158 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16159 WARN_ON(dst_rmem->flags != src_rmem->flags);
16160 WARN_ON(dst_rmem->depth != src_rmem->depth);
16161 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16162 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16163 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16164
16165 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16166 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16167 *dst_rmem->vmem = *src_rmem->vmem;
16168 for (i = 0; i < dst_rmem->nr_pages; i++) {
16169 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16170 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16171 }
16172
16173 dst->rx_agg_bmap = src->rx_agg_bmap;
16174 }
16175
bnxt_queue_start(struct net_device * dev,struct netdev_queue_config * qcfg,void * qmem,int idx)16176 static int bnxt_queue_start(struct net_device *dev,
16177 struct netdev_queue_config *qcfg,
16178 void *qmem, int idx)
16179 {
16180 struct bnxt *bp = netdev_priv(dev);
16181 struct bnxt_rx_ring_info *rxr, *clone;
16182 struct bnxt_cp_ring_info *cpr;
16183 struct bnxt_vnic_info *vnic;
16184 struct bnxt_napi *bnapi;
16185 int i, rc;
16186 u16 mru;
16187
16188 rxr = &bp->rx_ring[idx];
16189 clone = qmem;
16190
16191 rxr->rx_prod = clone->rx_prod;
16192 rxr->rx_agg_prod = clone->rx_agg_prod;
16193 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16194 rxr->rx_next_cons = clone->rx_next_cons;
16195 rxr->rx_tpa = clone->rx_tpa;
16196 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16197 rxr->page_pool = clone->page_pool;
16198 rxr->head_pool = clone->head_pool;
16199 rxr->xdp_rxq = clone->xdp_rxq;
16200 rxr->need_head_pool = clone->need_head_pool;
16201
16202 bnxt_copy_rx_ring(bp, rxr, clone);
16203
16204 bnapi = rxr->bnapi;
16205 cpr = &bnapi->cp_ring;
16206
16207 /* All rings have been reserved and previously allocated.
16208 * Reallocating with the same parameters should never fail.
16209 */
16210 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16211 if (rc)
16212 goto err_reset;
16213
16214 if (bp->tph_mode) {
16215 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16216 if (rc)
16217 goto err_reset;
16218 }
16219
16220 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16221 if (rc)
16222 goto err_reset;
16223
16224 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16225 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16226 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16227
16228 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16229 rc = bnxt_tx_queue_start(bp, idx);
16230 if (rc)
16231 goto err_reset;
16232 }
16233
16234 bnxt_enable_rx_page_pool(rxr);
16235 napi_enable_locked(&bnapi->napi);
16236 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16237
16238 mru = bp->dev->mtu + VLAN_ETH_HLEN;
16239 for (i = 0; i < bp->nr_vnics; i++) {
16240 vnic = &bp->vnic_info[i];
16241
16242 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16243 if (rc)
16244 return rc;
16245 }
16246 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16247
16248 err_reset:
16249 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16250 rc);
16251 napi_enable_locked(&bnapi->napi);
16252 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16253 bnxt_reset_task(bp, true);
16254 return rc;
16255 }
16256
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)16257 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16258 {
16259 struct bnxt *bp = netdev_priv(dev);
16260 struct bnxt_rx_ring_info *rxr;
16261 struct bnxt_cp_ring_info *cpr;
16262 struct bnxt_vnic_info *vnic;
16263 struct bnxt_napi *bnapi;
16264 int i;
16265
16266 for (i = 0; i < bp->nr_vnics; i++) {
16267 vnic = &bp->vnic_info[i];
16268
16269 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16270 }
16271 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16272 /* Make sure NAPI sees that the VNIC is disabled */
16273 synchronize_net();
16274 rxr = &bp->rx_ring[idx];
16275 bnapi = rxr->bnapi;
16276 cpr = &bnapi->cp_ring;
16277 cancel_work_sync(&cpr->dim.work);
16278 bnxt_hwrm_rx_ring_free(bp, rxr, false);
16279 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16280 page_pool_disable_direct_recycling(rxr->page_pool);
16281 if (bnxt_separate_head_pool(rxr))
16282 page_pool_disable_direct_recycling(rxr->head_pool);
16283
16284 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16285 bnxt_tx_queue_stop(bp, idx);
16286
16287 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16288 * completion is handled in NAPI to guarantee no more DMA on that ring
16289 * after seeing the completion.
16290 */
16291 napi_disable_locked(&bnapi->napi);
16292
16293 if (bp->tph_mode) {
16294 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16295 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16296 }
16297 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16298
16299 memcpy(qmem, rxr, sizeof(*rxr));
16300 bnxt_init_rx_ring_struct(bp, qmem);
16301
16302 return 0;
16303 }
16304
16305 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16306 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
16307 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
16308 .ndo_queue_mem_free = bnxt_queue_mem_free,
16309 .ndo_queue_start = bnxt_queue_start,
16310 .ndo_queue_stop = bnxt_queue_stop,
16311 .ndo_default_qcfg = bnxt_queue_default_qcfg,
16312 .ndo_validate_qcfg = bnxt_validate_qcfg,
16313 .supported_params = QCFG_RX_PAGE_SIZE,
16314 };
16315
16316 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
16317 .ndo_default_qcfg = bnxt_queue_default_qcfg,
16318 };
16319
bnxt_remove_one(struct pci_dev * pdev)16320 static void bnxt_remove_one(struct pci_dev *pdev)
16321 {
16322 struct net_device *dev = pci_get_drvdata(pdev);
16323 struct bnxt *bp = netdev_priv(dev);
16324
16325 if (BNXT_PF(bp))
16326 __bnxt_sriov_disable(bp);
16327
16328 bnxt_rdma_aux_device_del(bp);
16329
16330 unregister_netdev(dev);
16331 bnxt_ptp_clear(bp);
16332
16333 bnxt_rdma_aux_device_uninit(bp);
16334
16335 bnxt_free_l2_filters(bp, true);
16336 bnxt_free_ntp_fltrs(bp, true);
16337 WARN_ON(bp->num_rss_ctx);
16338 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16339 /* Flush any pending tasks */
16340 cancel_work_sync(&bp->sp_task);
16341 cancel_delayed_work_sync(&bp->fw_reset_task);
16342 bp->sp_event = 0;
16343
16344 bnxt_dl_fw_reporters_destroy(bp);
16345 bnxt_dl_unregister(bp);
16346 bnxt_shutdown_tc(bp);
16347
16348 bnxt_clear_int_mode(bp);
16349 bnxt_hwrm_func_drv_unrgtr(bp);
16350 bnxt_free_hwrm_resources(bp);
16351 bnxt_hwmon_uninit(bp);
16352 bnxt_ethtool_free(bp);
16353 bnxt_dcb_free(bp);
16354 kfree(bp->ptp_cfg);
16355 bp->ptp_cfg = NULL;
16356 kfree(bp->fw_health);
16357 bp->fw_health = NULL;
16358 bnxt_cleanup_pci(bp);
16359 bnxt_free_ctx_mem(bp, true);
16360 bnxt_free_crash_dump_mem(bp);
16361 kfree(bp->rss_indir_tbl);
16362 bp->rss_indir_tbl = NULL;
16363 bnxt_free_port_stats(bp);
16364 free_netdev(dev);
16365 }
16366
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)16367 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16368 {
16369 int rc = 0;
16370 struct bnxt_link_info *link_info = &bp->link_info;
16371
16372 bp->phy_flags = 0;
16373 rc = bnxt_hwrm_phy_qcaps(bp);
16374 if (rc) {
16375 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16376 rc);
16377 return rc;
16378 }
16379 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16380 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16381 else
16382 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16383
16384 bp->mac_flags = 0;
16385 bnxt_hwrm_mac_qcaps(bp);
16386
16387 if (!fw_dflt)
16388 return 0;
16389
16390 mutex_lock(&bp->link_lock);
16391 rc = bnxt_update_link(bp, false);
16392 if (rc) {
16393 mutex_unlock(&bp->link_lock);
16394 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16395 rc);
16396 return rc;
16397 }
16398
16399 /* Older firmware does not have supported_auto_speeds, so assume
16400 * that all supported speeds can be autonegotiated.
16401 */
16402 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16403 link_info->support_auto_speeds = link_info->support_speeds;
16404
16405 bnxt_init_ethtool_link_settings(bp);
16406 mutex_unlock(&bp->link_lock);
16407 return 0;
16408 }
16409
bnxt_get_max_irq(struct pci_dev * pdev)16410 static int bnxt_get_max_irq(struct pci_dev *pdev)
16411 {
16412 u16 ctrl;
16413
16414 if (!pdev->msix_cap)
16415 return 1;
16416
16417 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16418 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16419 }
16420
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)16421 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16422 int *max_cp)
16423 {
16424 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16425 int max_ring_grps = 0, max_irq;
16426
16427 *max_tx = hw_resc->max_tx_rings;
16428 *max_rx = hw_resc->max_rx_rings;
16429 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16430 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16431 bnxt_get_ulp_msix_num_in_use(bp),
16432 hw_resc->max_stat_ctxs -
16433 bnxt_get_ulp_stat_ctxs_in_use(bp));
16434 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16435 *max_cp = min_t(int, *max_cp, max_irq);
16436 max_ring_grps = hw_resc->max_hw_ring_grps;
16437 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16438 *max_cp -= 1;
16439 *max_rx -= 2;
16440 }
16441 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16442 *max_rx >>= 1;
16443 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16444 int rc;
16445
16446 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16447 if (rc) {
16448 *max_rx = 0;
16449 *max_tx = 0;
16450 }
16451 /* On P5 chips, max_cp output param should be available NQs */
16452 *max_cp = max_irq;
16453 }
16454 *max_rx = min_t(int, *max_rx, max_ring_grps);
16455 }
16456
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16457 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16458 {
16459 int rx, tx, cp;
16460
16461 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
16462 *max_rx = rx;
16463 *max_tx = tx;
16464 if (!rx || !tx || !cp)
16465 return -ENOMEM;
16466
16467 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16468 }
16469
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16470 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16471 bool shared)
16472 {
16473 int rc;
16474
16475 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16476 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16477 /* Not enough rings, try disabling agg rings. */
16478 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16479 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16480 if (rc) {
16481 /* set BNXT_FLAG_AGG_RINGS back for consistency */
16482 bp->flags |= BNXT_FLAG_AGG_RINGS;
16483 return rc;
16484 }
16485 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16486 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16487 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16488 bnxt_set_ring_params(bp);
16489 }
16490
16491 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16492 int max_cp, max_stat, max_irq;
16493
16494 /* Reserve minimum resources for RoCE */
16495 max_cp = bnxt_get_max_func_cp_rings(bp);
16496 max_stat = bnxt_get_max_func_stat_ctxs(bp);
16497 max_irq = bnxt_get_max_func_irqs(bp);
16498 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16499 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16500 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16501 return 0;
16502
16503 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16504 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16505 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16506 max_cp = min_t(int, max_cp, max_irq);
16507 max_cp = min_t(int, max_cp, max_stat);
16508 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16509 if (rc)
16510 rc = 0;
16511 }
16512 return rc;
16513 }
16514
16515 /* In initial default shared ring setting, each shared ring must have a
16516 * RX/TX ring pair.
16517 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)16518 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16519 {
16520 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16521 bp->rx_nr_rings = bp->cp_nr_rings;
16522 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16523 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16524 }
16525
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)16526 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16527 {
16528 int dflt_rings, max_rx_rings, max_tx_rings, rc;
16529 int avail_msix;
16530
16531 if (!bnxt_can_reserve_rings(bp))
16532 return 0;
16533
16534 if (sh)
16535 bp->flags |= BNXT_FLAG_SHARED_RINGS;
16536 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16537 /* Reduce default rings on multi-port cards so that total default
16538 * rings do not exceed CPU count.
16539 */
16540 if (bp->port_count > 1) {
16541 int max_rings =
16542 max_t(int, num_online_cpus() / bp->port_count, 1);
16543
16544 dflt_rings = min_t(int, dflt_rings, max_rings);
16545 }
16546 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16547 if (rc)
16548 return rc;
16549 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16550 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16551 if (sh)
16552 bnxt_trim_dflt_sh_rings(bp);
16553 else
16554 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16555 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16556
16557 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16558 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16559 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16560
16561 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16562 bnxt_set_dflt_ulp_stat_ctxs(bp);
16563 }
16564
16565 rc = __bnxt_reserve_rings(bp);
16566 if (rc && rc != -ENODEV)
16567 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16568 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16569 if (sh)
16570 bnxt_trim_dflt_sh_rings(bp);
16571
16572 /* Rings may have been trimmed, re-reserve the trimmed rings. */
16573 if (bnxt_need_reserve_rings(bp)) {
16574 rc = __bnxt_reserve_rings(bp);
16575 if (rc && rc != -ENODEV)
16576 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16577 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16578 }
16579 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16580 bp->rx_nr_rings++;
16581 bp->cp_nr_rings++;
16582 }
16583 if (rc) {
16584 bp->tx_nr_rings = 0;
16585 bp->rx_nr_rings = 0;
16586 }
16587 return rc;
16588 }
16589
bnxt_init_dflt_ring_mode(struct bnxt * bp)16590 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16591 {
16592 int rc;
16593
16594 if (bp->tx_nr_rings)
16595 return 0;
16596
16597 bnxt_ulp_irq_stop(bp);
16598 bnxt_clear_int_mode(bp);
16599 rc = bnxt_set_dflt_rings(bp, true);
16600 if (rc) {
16601 if (BNXT_VF(bp) && rc == -ENODEV)
16602 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16603 else
16604 netdev_err(bp->dev, "Not enough rings available.\n");
16605 goto init_dflt_ring_err;
16606 }
16607 rc = bnxt_init_int_mode(bp);
16608 if (rc)
16609 goto init_dflt_ring_err;
16610
16611 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16612
16613 bnxt_set_dflt_rfs(bp);
16614
16615 init_dflt_ring_err:
16616 bnxt_ulp_irq_restart(bp, rc);
16617 return rc;
16618 }
16619
bnxt_restore_pf_fw_resources(struct bnxt * bp)16620 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16621 {
16622 int rc;
16623
16624 netdev_ops_assert_locked(bp->dev);
16625 bnxt_hwrm_func_qcaps(bp);
16626
16627 if (netif_running(bp->dev))
16628 __bnxt_close_nic(bp, true, false);
16629
16630 bnxt_ulp_irq_stop(bp);
16631 bnxt_clear_int_mode(bp);
16632 rc = bnxt_init_int_mode(bp);
16633 bnxt_ulp_irq_restart(bp, rc);
16634
16635 if (netif_running(bp->dev)) {
16636 if (rc)
16637 netif_close(bp->dev);
16638 else
16639 rc = bnxt_open_nic(bp, true, false);
16640 }
16641
16642 return rc;
16643 }
16644
bnxt_init_mac_addr(struct bnxt * bp)16645 static int bnxt_init_mac_addr(struct bnxt *bp)
16646 {
16647 int rc = 0;
16648
16649 if (BNXT_PF(bp)) {
16650 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16651 } else {
16652 #ifdef CONFIG_BNXT_SRIOV
16653 struct bnxt_vf_info *vf = &bp->vf;
16654 bool strict_approval = true;
16655
16656 if (is_valid_ether_addr(vf->mac_addr)) {
16657 /* overwrite netdev dev_addr with admin VF MAC */
16658 eth_hw_addr_set(bp->dev, vf->mac_addr);
16659 /* Older PF driver or firmware may not approve this
16660 * correctly.
16661 */
16662 strict_approval = false;
16663 } else {
16664 eth_hw_addr_random(bp->dev);
16665 }
16666 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16667 #endif
16668 }
16669 return rc;
16670 }
16671
bnxt_vpd_read_info(struct bnxt * bp)16672 static void bnxt_vpd_read_info(struct bnxt *bp)
16673 {
16674 struct pci_dev *pdev = bp->pdev;
16675 unsigned int vpd_size, kw_len;
16676 int pos, size;
16677 u8 *vpd_data;
16678
16679 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16680 if (IS_ERR(vpd_data)) {
16681 pci_warn(pdev, "Unable to read VPD\n");
16682 return;
16683 }
16684
16685 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16686 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16687 if (pos < 0)
16688 goto read_sn;
16689
16690 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16691 memcpy(bp->board_partno, &vpd_data[pos], size);
16692
16693 read_sn:
16694 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16695 PCI_VPD_RO_KEYWORD_SERIALNO,
16696 &kw_len);
16697 if (pos < 0)
16698 goto exit;
16699
16700 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16701 memcpy(bp->board_serialno, &vpd_data[pos], size);
16702 exit:
16703 kfree(vpd_data);
16704 }
16705
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16706 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16707 {
16708 struct pci_dev *pdev = bp->pdev;
16709 u64 qword;
16710
16711 qword = pci_get_dsn(pdev);
16712 if (!qword) {
16713 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16714 return -EOPNOTSUPP;
16715 }
16716
16717 put_unaligned_le64(qword, dsn);
16718
16719 bp->flags |= BNXT_FLAG_DSN_VALID;
16720 return 0;
16721 }
16722
bnxt_map_db_bar(struct bnxt * bp)16723 static int bnxt_map_db_bar(struct bnxt *bp)
16724 {
16725 if (!bp->db_size)
16726 return -ENODEV;
16727 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16728 if (!bp->bar1)
16729 return -ENOMEM;
16730 return 0;
16731 }
16732
bnxt_print_device_info(struct bnxt * bp)16733 void bnxt_print_device_info(struct bnxt *bp)
16734 {
16735 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16736 board_info[bp->board_idx].name,
16737 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16738
16739 pcie_print_link_status(bp->pdev);
16740 }
16741
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16742 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16743 {
16744 struct bnxt_hw_resc *hw_resc;
16745 struct net_device *dev;
16746 struct bnxt *bp;
16747 int rc, max_irqs;
16748
16749 if (pci_is_bridge(pdev))
16750 return -ENODEV;
16751
16752 if (!pdev->msix_cap) {
16753 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16754 return -ENODEV;
16755 }
16756
16757 /* Clear any pending DMA transactions from crash kernel
16758 * while loading driver in capture kernel.
16759 */
16760 if (is_kdump_kernel()) {
16761 pci_clear_master(pdev);
16762 pcie_flr(pdev);
16763 }
16764
16765 max_irqs = bnxt_get_max_irq(pdev);
16766 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16767 max_irqs);
16768 if (!dev)
16769 return -ENOMEM;
16770
16771 bp = netdev_priv(dev);
16772 bp->board_idx = ent->driver_data;
16773 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16774 bnxt_set_max_func_irqs(bp, max_irqs);
16775
16776 if (bnxt_vf_pciid(bp->board_idx))
16777 bp->flags |= BNXT_FLAG_VF;
16778
16779 /* No devlink port registration in case of a VF */
16780 if (BNXT_PF(bp))
16781 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16782
16783 rc = bnxt_init_board(pdev, dev);
16784 if (rc < 0)
16785 goto init_err_free;
16786
16787 dev->netdev_ops = &bnxt_netdev_ops;
16788 dev->stat_ops = &bnxt_stat_ops;
16789 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16790 dev->ethtool_ops = &bnxt_ethtool_ops;
16791 pci_set_drvdata(pdev, dev);
16792
16793 rc = bnxt_alloc_hwrm_resources(bp);
16794 if (rc)
16795 goto init_err_pci_clean;
16796
16797 mutex_init(&bp->hwrm_cmd_lock);
16798 mutex_init(&bp->link_lock);
16799
16800 rc = bnxt_fw_init_one_p1(bp);
16801 if (rc)
16802 goto init_err_pci_clean;
16803
16804 if (BNXT_PF(bp))
16805 bnxt_vpd_read_info(bp);
16806
16807 if (BNXT_CHIP_P5_PLUS(bp)) {
16808 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16809 if (BNXT_CHIP_P7(bp))
16810 bp->flags |= BNXT_FLAG_CHIP_P7;
16811 }
16812
16813 rc = bnxt_alloc_rss_indir_tbl(bp);
16814 if (rc)
16815 goto init_err_pci_clean;
16816
16817 rc = bnxt_fw_init_one_p2(bp);
16818 if (rc)
16819 goto init_err_pci_clean;
16820
16821 rc = bnxt_map_db_bar(bp);
16822 if (rc) {
16823 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16824 rc);
16825 goto init_err_pci_clean;
16826 }
16827
16828 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16829 NETIF_F_TSO | NETIF_F_TSO6 |
16830 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16831 NETIF_F_GSO_IPXIP4 |
16832 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16833 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16834 NETIF_F_RXCSUM | NETIF_F_GRO;
16835 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16836 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16837
16838 if (BNXT_SUPPORTS_TPA(bp))
16839 dev->hw_features |= NETIF_F_LRO;
16840
16841 dev->hw_enc_features =
16842 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16843 NETIF_F_TSO | NETIF_F_TSO6 |
16844 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16845 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16846 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16847 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16848 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16849 if (bp->flags & BNXT_FLAG_CHIP_P7)
16850 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16851 else
16852 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16853
16854 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16855 NETIF_F_GSO_GRE_CSUM;
16856 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16857 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16858 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16859 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16860 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16861 if (BNXT_SUPPORTS_TPA(bp))
16862 dev->hw_features |= NETIF_F_GRO_HW;
16863 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16864 if (dev->features & NETIF_F_GRO_HW)
16865 dev->features &= ~NETIF_F_LRO;
16866 dev->priv_flags |= IFF_UNICAST_FLT;
16867
16868 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16869 if (bp->tso_max_segs)
16870 netif_set_tso_max_segs(dev, bp->tso_max_segs);
16871
16872 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16873 NETDEV_XDP_ACT_RX_SG;
16874
16875 #ifdef CONFIG_BNXT_SRIOV
16876 init_waitqueue_head(&bp->sriov_cfg_wait);
16877 #endif
16878 if (BNXT_SUPPORTS_TPA(bp)) {
16879 bp->gro_func = bnxt_gro_func_5730x;
16880 if (BNXT_CHIP_P4(bp))
16881 bp->gro_func = bnxt_gro_func_5731x;
16882 else if (BNXT_CHIP_P5_PLUS(bp))
16883 bp->gro_func = bnxt_gro_func_5750x;
16884 }
16885 if (!BNXT_CHIP_P4_PLUS(bp))
16886 bp->flags |= BNXT_FLAG_DOUBLE_DB;
16887
16888 rc = bnxt_init_mac_addr(bp);
16889 if (rc) {
16890 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16891 rc = -EADDRNOTAVAIL;
16892 goto init_err_pci_clean;
16893 }
16894
16895 if (BNXT_PF(bp)) {
16896 /* Read the adapter's DSN to use as the eswitch switch_id */
16897 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16898 }
16899
16900 /* MTU range: 60 - FW defined max */
16901 dev->min_mtu = ETH_ZLEN;
16902 dev->max_mtu = bp->max_mtu;
16903
16904 rc = bnxt_probe_phy(bp, true);
16905 if (rc)
16906 goto init_err_pci_clean;
16907
16908 hw_resc = &bp->hw_resc;
16909 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16910 BNXT_L2_FLTR_MAX_FLTR;
16911 /* Older firmware may not report these filters properly */
16912 if (bp->max_fltr < BNXT_MAX_FLTR)
16913 bp->max_fltr = BNXT_MAX_FLTR;
16914 bnxt_init_l2_fltr_tbl(bp);
16915 __bnxt_set_rx_skb_mode(bp, false);
16916 bnxt_set_tpa_flags(bp);
16917 bnxt_init_ring_params(bp);
16918 bnxt_set_ring_params(bp);
16919 bnxt_rdma_aux_device_init(bp);
16920 rc = bnxt_set_dflt_rings(bp, true);
16921 if (rc) {
16922 if (BNXT_VF(bp) && rc == -ENODEV) {
16923 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16924 } else {
16925 netdev_err(bp->dev, "Not enough rings available.\n");
16926 rc = -ENOMEM;
16927 }
16928 goto init_err_pci_clean;
16929 }
16930
16931 bnxt_fw_init_one_p3(bp);
16932
16933 bnxt_init_dflt_coal(bp);
16934
16935 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16936 bp->flags |= BNXT_FLAG_STRIP_VLAN;
16937
16938 rc = bnxt_init_int_mode(bp);
16939 if (rc)
16940 goto init_err_pci_clean;
16941
16942 /* No TC has been set yet and rings may have been trimmed due to
16943 * limited MSIX, so we re-initialize the TX rings per TC.
16944 */
16945 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16946
16947 if (BNXT_PF(bp)) {
16948 if (!bnxt_pf_wq) {
16949 bnxt_pf_wq =
16950 create_singlethread_workqueue("bnxt_pf_wq");
16951 if (!bnxt_pf_wq) {
16952 dev_err(&pdev->dev, "Unable to create workqueue.\n");
16953 rc = -ENOMEM;
16954 goto init_err_pci_clean;
16955 }
16956 }
16957 rc = bnxt_init_tc(bp);
16958 if (rc)
16959 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16960 rc);
16961 }
16962
16963 bnxt_inv_fw_health_reg(bp);
16964 rc = bnxt_dl_register(bp);
16965 if (rc)
16966 goto init_err_dl;
16967
16968 INIT_LIST_HEAD(&bp->usr_fltr_list);
16969
16970 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16971 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16972
16973 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
16974 if (BNXT_SUPPORTS_QUEUE_API(bp))
16975 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16976 dev->netmem_tx = true;
16977
16978 rc = register_netdev(dev);
16979 if (rc)
16980 goto init_err_cleanup;
16981
16982 bnxt_dl_fw_reporters_create(bp);
16983
16984 bnxt_rdma_aux_device_add(bp);
16985
16986 bnxt_print_device_info(bp);
16987
16988 pci_save_state(pdev);
16989
16990 return 0;
16991 init_err_cleanup:
16992 bnxt_rdma_aux_device_uninit(bp);
16993 bnxt_dl_unregister(bp);
16994 init_err_dl:
16995 bnxt_shutdown_tc(bp);
16996 bnxt_clear_int_mode(bp);
16997
16998 init_err_pci_clean:
16999 bnxt_hwrm_func_drv_unrgtr(bp);
17000 bnxt_ptp_clear(bp);
17001 kfree(bp->ptp_cfg);
17002 bp->ptp_cfg = NULL;
17003 bnxt_free_hwrm_resources(bp);
17004 bnxt_hwmon_uninit(bp);
17005 bnxt_ethtool_free(bp);
17006 kfree(bp->fw_health);
17007 bp->fw_health = NULL;
17008 bnxt_cleanup_pci(bp);
17009 bnxt_free_ctx_mem(bp, true);
17010 bnxt_free_crash_dump_mem(bp);
17011 kfree(bp->rss_indir_tbl);
17012 bp->rss_indir_tbl = NULL;
17013
17014 init_err_free:
17015 free_netdev(dev);
17016 return rc;
17017 }
17018
bnxt_shutdown(struct pci_dev * pdev)17019 static void bnxt_shutdown(struct pci_dev *pdev)
17020 {
17021 struct net_device *dev = pci_get_drvdata(pdev);
17022 struct bnxt *bp;
17023
17024 if (!dev)
17025 return;
17026
17027 rtnl_lock();
17028 netdev_lock(dev);
17029 bp = netdev_priv(dev);
17030 if (!bp)
17031 goto shutdown_exit;
17032
17033 if (netif_running(dev))
17034 netif_close(dev);
17035
17036 if (bnxt_hwrm_func_drv_unrgtr(bp)) {
17037 pcie_flr(pdev);
17038 goto shutdown_exit;
17039 }
17040 bnxt_ptp_clear(bp);
17041 bnxt_clear_int_mode(bp);
17042 pci_disable_device(pdev);
17043
17044 if (system_state == SYSTEM_POWER_OFF) {
17045 pci_wake_from_d3(pdev, bp->wol);
17046 pci_set_power_state(pdev, PCI_D3hot);
17047 }
17048
17049 shutdown_exit:
17050 netdev_unlock(dev);
17051 rtnl_unlock();
17052 }
17053
17054 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)17055 static int bnxt_suspend(struct device *device)
17056 {
17057 struct net_device *dev = dev_get_drvdata(device);
17058 struct bnxt *bp = netdev_priv(dev);
17059 int rc = 0;
17060
17061 bnxt_ulp_stop(bp);
17062
17063 netdev_lock(dev);
17064 if (netif_running(dev)) {
17065 netif_device_detach(dev);
17066 rc = bnxt_close(dev);
17067 }
17068 bnxt_hwrm_func_drv_unrgtr(bp);
17069 bnxt_ptp_clear(bp);
17070 pci_disable_device(bp->pdev);
17071 bnxt_free_ctx_mem(bp, false);
17072 netdev_unlock(dev);
17073 return rc;
17074 }
17075
bnxt_resume(struct device * device)17076 static int bnxt_resume(struct device *device)
17077 {
17078 struct net_device *dev = dev_get_drvdata(device);
17079 struct bnxt *bp = netdev_priv(dev);
17080 int rc = 0;
17081
17082 netdev_lock(dev);
17083 rc = pci_enable_device(bp->pdev);
17084 if (rc) {
17085 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
17086 rc);
17087 goto resume_exit;
17088 }
17089 pci_set_master(bp->pdev);
17090 if (bnxt_hwrm_ver_get(bp)) {
17091 rc = -ENODEV;
17092 goto resume_exit;
17093 }
17094 rc = bnxt_hwrm_func_reset(bp);
17095 if (rc) {
17096 rc = -EBUSY;
17097 goto resume_exit;
17098 }
17099
17100 rc = bnxt_hwrm_func_qcaps(bp);
17101 if (rc)
17102 goto resume_exit;
17103
17104 bnxt_clear_reservations(bp, true);
17105
17106 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
17107 rc = -ENODEV;
17108 goto resume_exit;
17109 }
17110 if (bp->fw_crash_mem)
17111 bnxt_hwrm_crash_dump_mem_cfg(bp);
17112
17113 if (bnxt_ptp_init(bp)) {
17114 kfree(bp->ptp_cfg);
17115 bp->ptp_cfg = NULL;
17116 }
17117 bnxt_get_wol_settings(bp);
17118 if (netif_running(dev)) {
17119 rc = bnxt_open(dev);
17120 if (!rc)
17121 netif_device_attach(dev);
17122 }
17123
17124 resume_exit:
17125 netdev_unlock(bp->dev);
17126 bnxt_ulp_start(bp, rc);
17127 if (!rc)
17128 bnxt_reenable_sriov(bp);
17129 return rc;
17130 }
17131
17132 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
17133 #define BNXT_PM_OPS (&bnxt_pm_ops)
17134
17135 #else
17136
17137 #define BNXT_PM_OPS NULL
17138
17139 #endif /* CONFIG_PM_SLEEP */
17140
17141 /**
17142 * bnxt_io_error_detected - called when PCI error is detected
17143 * @pdev: Pointer to PCI device
17144 * @state: The current pci connection state
17145 *
17146 * This function is called after a PCI bus error affecting
17147 * this device has been detected.
17148 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)17149 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17150 pci_channel_state_t state)
17151 {
17152 struct net_device *netdev = pci_get_drvdata(pdev);
17153 struct bnxt *bp = netdev_priv(netdev);
17154 bool abort = false;
17155
17156 netdev_info(netdev, "PCI I/O error detected\n");
17157
17158 bnxt_ulp_stop(bp);
17159
17160 netdev_lock(netdev);
17161 netif_device_detach(netdev);
17162
17163 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17164 netdev_err(bp->dev, "Firmware reset already in progress\n");
17165 abort = true;
17166 }
17167
17168 if (abort || state == pci_channel_io_perm_failure) {
17169 netdev_unlock(netdev);
17170 return PCI_ERS_RESULT_DISCONNECT;
17171 }
17172
17173 /* Link is not reliable anymore if state is pci_channel_io_frozen
17174 * so we disable bus master to prevent any potential bad DMAs before
17175 * freeing kernel memory.
17176 */
17177 if (state == pci_channel_io_frozen) {
17178 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17179 bnxt_fw_fatal_close(bp);
17180 }
17181
17182 if (netif_running(netdev))
17183 __bnxt_close_nic(bp, true, true);
17184
17185 if (pci_is_enabled(pdev))
17186 pci_disable_device(pdev);
17187 bnxt_free_ctx_mem(bp, false);
17188 netdev_unlock(netdev);
17189
17190 /* Request a slot reset. */
17191 return PCI_ERS_RESULT_NEED_RESET;
17192 }
17193
17194 /**
17195 * bnxt_io_slot_reset - called after the pci bus has been reset.
17196 * @pdev: Pointer to PCI device
17197 *
17198 * Restart the card from scratch, as if from a cold-boot.
17199 * At this point, the card has experienced a hard reset,
17200 * followed by fixups by BIOS, and has its config space
17201 * set up identically to what it was at cold boot.
17202 */
bnxt_io_slot_reset(struct pci_dev * pdev)17203 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17204 {
17205 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17206 struct net_device *netdev = pci_get_drvdata(pdev);
17207 struct bnxt *bp = netdev_priv(netdev);
17208 int retry = 0;
17209 int err = 0;
17210 int off;
17211
17212 netdev_info(bp->dev, "PCI Slot Reset\n");
17213
17214 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17215 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17216 msleep(900);
17217
17218 netdev_lock(netdev);
17219
17220 if (pci_enable_device(pdev)) {
17221 dev_err(&pdev->dev,
17222 "Cannot re-enable PCI device after reset.\n");
17223 } else {
17224 pci_set_master(pdev);
17225 /* Upon fatal error, our device internal logic that latches to
17226 * BAR value is getting reset and will restore only upon
17227 * rewriting the BARs.
17228 *
17229 * As pci_restore_state() does not re-write the BARs if the
17230 * value is same as saved value earlier, driver needs to
17231 * write the BARs to 0 to force restore, in case of fatal error.
17232 */
17233 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17234 &bp->state)) {
17235 for (off = PCI_BASE_ADDRESS_0;
17236 off <= PCI_BASE_ADDRESS_5; off += 4)
17237 pci_write_config_dword(bp->pdev, off, 0);
17238 }
17239 pci_restore_state(pdev);
17240 pci_save_state(pdev);
17241
17242 bnxt_inv_fw_health_reg(bp);
17243 bnxt_try_map_fw_health_reg(bp);
17244
17245 /* In some PCIe AER scenarios, firmware may take up to
17246 * 10 seconds to become ready in the worst case.
17247 */
17248 do {
17249 err = bnxt_try_recover_fw(bp);
17250 if (!err)
17251 break;
17252 retry++;
17253 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
17254
17255 if (err) {
17256 dev_err(&pdev->dev, "Firmware not ready\n");
17257 goto reset_exit;
17258 }
17259
17260 err = bnxt_hwrm_func_reset(bp);
17261 if (!err)
17262 result = PCI_ERS_RESULT_RECOVERED;
17263
17264 /* IRQ will be initialized later in bnxt_io_resume */
17265 bnxt_ulp_irq_stop(bp);
17266 bnxt_clear_int_mode(bp);
17267 }
17268
17269 reset_exit:
17270 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17271 bnxt_clear_reservations(bp, true);
17272 netdev_unlock(netdev);
17273
17274 return result;
17275 }
17276
17277 /**
17278 * bnxt_io_resume - called when traffic can start flowing again.
17279 * @pdev: Pointer to PCI device
17280 *
17281 * This callback is called when the error recovery driver tells
17282 * us that its OK to resume normal operation.
17283 */
bnxt_io_resume(struct pci_dev * pdev)17284 static void bnxt_io_resume(struct pci_dev *pdev)
17285 {
17286 struct net_device *netdev = pci_get_drvdata(pdev);
17287 struct bnxt *bp = netdev_priv(netdev);
17288 int err;
17289
17290 netdev_info(bp->dev, "PCI Slot Resume\n");
17291 netdev_lock(netdev);
17292
17293 err = bnxt_hwrm_func_qcaps(bp);
17294 if (!err) {
17295 if (netif_running(netdev)) {
17296 err = bnxt_open(netdev);
17297 } else {
17298 err = bnxt_reserve_rings(bp, true);
17299 if (!err)
17300 err = bnxt_init_int_mode(bp);
17301 }
17302 }
17303
17304 if (!err)
17305 netif_device_attach(netdev);
17306
17307 netdev_unlock(netdev);
17308 bnxt_ulp_start(bp, err);
17309 if (!err)
17310 bnxt_reenable_sriov(bp);
17311 }
17312
17313 static const struct pci_error_handlers bnxt_err_handler = {
17314 .error_detected = bnxt_io_error_detected,
17315 .slot_reset = bnxt_io_slot_reset,
17316 .resume = bnxt_io_resume
17317 };
17318
17319 static struct pci_driver bnxt_pci_driver = {
17320 .name = DRV_MODULE_NAME,
17321 .id_table = bnxt_pci_tbl,
17322 .probe = bnxt_init_one,
17323 .remove = bnxt_remove_one,
17324 .shutdown = bnxt_shutdown,
17325 .driver.pm = BNXT_PM_OPS,
17326 .err_handler = &bnxt_err_handler,
17327 #if defined(CONFIG_BNXT_SRIOV)
17328 .sriov_configure = bnxt_sriov_configure,
17329 #endif
17330 };
17331
bnxt_init(void)17332 static int __init bnxt_init(void)
17333 {
17334 int err;
17335
17336 bnxt_debug_init();
17337 err = pci_register_driver(&bnxt_pci_driver);
17338 if (err) {
17339 bnxt_debug_exit();
17340 return err;
17341 }
17342
17343 return 0;
17344 }
17345
bnxt_exit(void)17346 static void __exit bnxt_exit(void)
17347 {
17348 pci_unregister_driver(&bnxt_pci_driver);
17349 if (bnxt_pf_wq)
17350 destroy_workqueue(bnxt_pf_wq);
17351 bnxt_debug_exit();
17352 }
17353
17354 module_init(bnxt_init);
17355 module_exit(bnxt_exit);
17356