1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62
63 #include "bnxt.h"
64 #include "bnxt_hwrm.h"
65 #include "bnxt_ulp.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77
78 #define BNXT_TX_TIMEOUT (5 * HZ)
79 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
80 NETIF_MSG_TX_ERR)
81
82 MODULE_IMPORT_NS("NETDEV_INTERNAL");
83 MODULE_LICENSE("GPL");
84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
85
86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
88
89 #define BNXT_TX_PUSH_THRESH 164
90
91 /* indexed by enum board_idx */
92 static const struct {
93 char *name;
94 } board_info[] = {
95 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
96 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
97 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
98 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
99 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
100 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
101 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
102 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
103 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
104 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
105 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
106 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
108 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
109 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
110 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
111 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
112 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
114 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
115 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
116 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
117 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
118 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
119 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
120 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
121 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
123 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
124 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
127 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
129 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
130 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
131 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
132 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
133 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
134 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
136 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
137 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
138 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
139 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
140 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
141 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
142 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
143 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
144 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
145 [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
146 };
147
148 static const struct pci_device_id bnxt_pci_tbl[] = {
149 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
150 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
151 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
152 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
153 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
154 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
155 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
156 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
157 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
158 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
159 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
160 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
161 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
162 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
163 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
164 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
165 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
166 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
167 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
168 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
169 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
171 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
172 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
173 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
176 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
183 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
184 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
185 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
186 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
187 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
188 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
189 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
190 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
191 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
197 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
198 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
199 #ifdef CONFIG_BNXT_SRIOV
200 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
201 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
204 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
207 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
208 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
209 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
210 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
216 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
217 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
218 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
219 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
220 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
221 { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
222 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
223 #endif
224 { 0 }
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
228
229 static const u16 bnxt_vf_req_snif[] = {
230 HWRM_FUNC_CFG,
231 HWRM_FUNC_VF_CFG,
232 HWRM_PORT_PHY_QCFG,
233 HWRM_CFA_L2_FILTER_ALLOC,
234 };
235
236 static const u16 bnxt_async_events_arr[] = {
237 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
238 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
239 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
240 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
241 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
242 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
243 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
244 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
245 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
246 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
247 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
248 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
249 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
250 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
251 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
252 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
253 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
254 };
255
256 const u16 bnxt_bstore_to_trace[] = {
257 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
258 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
259 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
260 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
261 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
262 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
263 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
264 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
265 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
266 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
267 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
268 [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
269 [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
270 };
271
272 static struct workqueue_struct *bnxt_pf_wq;
273
274 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
275 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
276 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
277
278 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
279 .ports = {
280 .src = 0,
281 .dst = 0,
282 },
283 .addrs = {
284 .v6addrs = {
285 .src = BNXT_IPV6_MASK_NONE,
286 .dst = BNXT_IPV6_MASK_NONE,
287 },
288 },
289 };
290
291 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
292 .ports = {
293 .src = cpu_to_be16(0xffff),
294 .dst = cpu_to_be16(0xffff),
295 },
296 .addrs = {
297 .v6addrs = {
298 .src = BNXT_IPV6_MASK_ALL,
299 .dst = BNXT_IPV6_MASK_ALL,
300 },
301 },
302 };
303
304 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
305 .ports = {
306 .src = cpu_to_be16(0xffff),
307 .dst = cpu_to_be16(0xffff),
308 },
309 .addrs = {
310 .v4addrs = {
311 .src = cpu_to_be32(0xffffffff),
312 .dst = cpu_to_be32(0xffffffff),
313 },
314 },
315 };
316
bnxt_vf_pciid(enum board_idx idx)317 static bool bnxt_vf_pciid(enum board_idx idx)
318 {
319 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
320 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
321 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
322 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
323 idx == NETXTREME_E_P7_VF_HV);
324 }
325
326 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
327 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
328
329 #define BNXT_DB_CQ(db, idx) \
330 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
331
332 #define BNXT_DB_NQ_P5(db, idx) \
333 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
334 (db)->doorbell)
335
336 #define BNXT_DB_NQ_P7(db, idx) \
337 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
338 DB_RING_IDX(db, idx), (db)->doorbell)
339
340 #define BNXT_DB_CQ_ARM(db, idx) \
341 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
342
343 #define BNXT_DB_NQ_ARM_P5(db, idx) \
344 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
345 DB_RING_IDX(db, idx), (db)->doorbell)
346
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)347 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
348 {
349 if (bp->flags & BNXT_FLAG_CHIP_P7)
350 BNXT_DB_NQ_P7(db, idx);
351 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
352 BNXT_DB_NQ_P5(db, idx);
353 else
354 BNXT_DB_CQ(db, idx);
355 }
356
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)357 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
358 {
359 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
360 BNXT_DB_NQ_ARM_P5(db, idx);
361 else
362 BNXT_DB_CQ_ARM(db, idx);
363 }
364
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)365 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
366 {
367 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
368 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
369 DB_RING_IDX(db, idx), db->doorbell);
370 else
371 BNXT_DB_CQ(db, idx);
372 }
373
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)374 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
375 {
376 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
377 return;
378
379 if (BNXT_PF(bp))
380 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
381 else
382 schedule_delayed_work(&bp->fw_reset_task, delay);
383 }
384
__bnxt_queue_sp_work(struct bnxt * bp)385 static void __bnxt_queue_sp_work(struct bnxt *bp)
386 {
387 if (BNXT_PF(bp))
388 queue_work(bnxt_pf_wq, &bp->sp_task);
389 else
390 schedule_work(&bp->sp_task);
391 }
392
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)393 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
394 {
395 set_bit(event, &bp->sp_event);
396 __bnxt_queue_sp_work(bp);
397 }
398
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)399 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
400 {
401 if (!rxr->bnapi->in_reset) {
402 rxr->bnapi->in_reset = true;
403 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
404 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
405 else
406 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
407 __bnxt_queue_sp_work(bp);
408 }
409 rxr->rx_next_cons = 0xffff;
410 }
411
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)412 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
413 u16 curr)
414 {
415 struct bnxt_napi *bnapi = txr->bnapi;
416
417 if (bnapi->tx_fault)
418 return;
419
420 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
421 txr->txq_index, txr->tx_hw_cons,
422 txr->tx_cons, txr->tx_prod, curr);
423 WARN_ON_ONCE(1);
424 bnapi->tx_fault = 1;
425 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
426 }
427
428 const u16 bnxt_lhint_arr[] = {
429 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
430 TX_BD_FLAGS_LHINT_512_TO_1023,
431 TX_BD_FLAGS_LHINT_1024_TO_2047,
432 TX_BD_FLAGS_LHINT_1024_TO_2047,
433 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 };
449
bnxt_xmit_get_cfa_action(struct sk_buff * skb)450 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
451 {
452 struct metadata_dst *md_dst = skb_metadata_dst(skb);
453
454 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
455 return 0;
456
457 return md_dst->u.port_info.port_id;
458 }
459
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)460 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
461 u16 prod)
462 {
463 /* Sync BD data before updating doorbell */
464 wmb();
465 bnxt_db_write(bp, &txr->tx_db, prod);
466 txr->kick_pending = 0;
467 }
468
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)469 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
470 {
471 struct bnxt *bp = netdev_priv(dev);
472 struct tx_bd *txbd, *txbd0;
473 struct tx_bd_ext *txbd1;
474 struct netdev_queue *txq;
475 int i;
476 dma_addr_t mapping;
477 unsigned int length, pad = 0;
478 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
479 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
480 struct pci_dev *pdev = bp->pdev;
481 u16 prod, last_frag, txts_prod;
482 struct bnxt_tx_ring_info *txr;
483 struct bnxt_sw_tx_bd *tx_buf;
484 __le32 lflags = 0;
485 skb_frag_t *frag;
486
487 i = skb_get_queue_mapping(skb);
488 if (unlikely(i >= bp->tx_nr_rings)) {
489 dev_kfree_skb_any(skb);
490 dev_core_stats_tx_dropped_inc(dev);
491 return NETDEV_TX_OK;
492 }
493
494 txq = netdev_get_tx_queue(dev, i);
495 txr = &bp->tx_ring[bp->tx_ring_map[i]];
496 prod = txr->tx_prod;
497
498 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
499 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
500 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
501 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
502 if (skb_linearize(skb)) {
503 dev_kfree_skb_any(skb);
504 dev_core_stats_tx_dropped_inc(dev);
505 return NETDEV_TX_OK;
506 }
507 }
508 #endif
509 free_size = bnxt_tx_avail(bp, txr);
510 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
511 /* We must have raced with NAPI cleanup */
512 if (net_ratelimit() && txr->kick_pending)
513 netif_warn(bp, tx_err, dev,
514 "bnxt: ring busy w/ flush pending!\n");
515 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
516 bp->tx_wake_thresh))
517 return NETDEV_TX_BUSY;
518 }
519
520 length = skb->len;
521 len = skb_headlen(skb);
522 last_frag = skb_shinfo(skb)->nr_frags;
523
524 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
525
526 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
527 tx_buf->skb = skb;
528 tx_buf->nr_frags = last_frag;
529
530 vlan_tag_flags = 0;
531 cfa_action = bnxt_xmit_get_cfa_action(skb);
532 if (skb_vlan_tag_present(skb)) {
533 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
534 skb_vlan_tag_get(skb);
535 /* Currently supports 8021Q, 8021AD vlan offloads
536 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
537 */
538 if (skb->vlan_proto == htons(ETH_P_8021Q))
539 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
540 }
541
542 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
543 ptp->tx_tstamp_en) {
544 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
545 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
546 tx_buf->is_ts_pkt = 1;
547 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
548 } else if (!skb_is_gso(skb)) {
549 u16 seq_id, hdr_off;
550
551 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
552 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
553 if (vlan_tag_flags)
554 hdr_off += VLAN_HLEN;
555 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
556 tx_buf->is_ts_pkt = 1;
557 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
558
559 ptp->txts_req[txts_prod].tx_seqid = seq_id;
560 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
561 tx_buf->txts_prod = txts_prod;
562 }
563 }
564 }
565 if (unlikely(skb->no_fcs))
566 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
567
568 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
569 skb_frags_readable(skb) && !lflags) {
570 struct tx_push_buffer *tx_push_buf = txr->tx_push;
571 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
572 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
573 void __iomem *db = txr->tx_db.doorbell;
574 void *pdata = tx_push_buf->data;
575 u64 *end;
576 int j, push_len;
577
578 /* Set COAL_NOW to be ready quickly for the next push */
579 tx_push->tx_bd_len_flags_type =
580 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
581 TX_BD_TYPE_LONG_TX_BD |
582 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
583 TX_BD_FLAGS_COAL_NOW |
584 TX_BD_FLAGS_PACKET_END |
585 TX_BD_CNT(2));
586
587 if (skb->ip_summed == CHECKSUM_PARTIAL)
588 tx_push1->tx_bd_hsize_lflags =
589 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
590 else
591 tx_push1->tx_bd_hsize_lflags = 0;
592
593 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
594 tx_push1->tx_bd_cfa_action =
595 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
596
597 end = pdata + length;
598 end = PTR_ALIGN(end, 8) - 1;
599 *end = 0;
600
601 skb_copy_from_linear_data(skb, pdata, len);
602 pdata += len;
603 for (j = 0; j < last_frag; j++) {
604 void *fptr;
605
606 frag = &skb_shinfo(skb)->frags[j];
607 fptr = skb_frag_address_safe(frag);
608 if (!fptr)
609 goto normal_tx;
610
611 memcpy(pdata, fptr, skb_frag_size(frag));
612 pdata += skb_frag_size(frag);
613 }
614
615 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
616 txbd->tx_bd_haddr = txr->data_mapping;
617 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
618 prod = NEXT_TX(prod);
619 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
620 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
621 memcpy(txbd, tx_push1, sizeof(*txbd));
622 prod = NEXT_TX(prod);
623 tx_push->doorbell =
624 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
625 DB_RING_IDX(&txr->tx_db, prod));
626 WRITE_ONCE(txr->tx_prod, prod);
627
628 tx_buf->is_push = 1;
629 netdev_tx_sent_queue(txq, skb->len);
630 wmb(); /* Sync is_push and byte queue before pushing data */
631
632 push_len = (length + sizeof(*tx_push) + 7) / 8;
633 if (push_len > 16) {
634 __iowrite64_copy(db, tx_push_buf, 16);
635 __iowrite32_copy(db + 4, tx_push_buf + 1,
636 (push_len - 16) << 1);
637 } else {
638 __iowrite64_copy(db, tx_push_buf, push_len);
639 }
640
641 goto tx_done;
642 }
643
644 normal_tx:
645 if (length < BNXT_MIN_PKT_SIZE) {
646 pad = BNXT_MIN_PKT_SIZE - length;
647 if (skb_pad(skb, pad))
648 /* SKB already freed. */
649 goto tx_kick_pending;
650 length = BNXT_MIN_PKT_SIZE;
651 }
652
653 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
654
655 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
656 goto tx_free;
657
658 dma_unmap_addr_set(tx_buf, mapping, mapping);
659 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
660 TX_BD_CNT(last_frag + 2);
661
662 txbd->tx_bd_haddr = cpu_to_le64(mapping);
663 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
664
665 prod = NEXT_TX(prod);
666 txbd1 = (struct tx_bd_ext *)
667 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
668
669 txbd1->tx_bd_hsize_lflags = lflags;
670 if (skb_is_gso(skb)) {
671 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
672 u32 hdr_len;
673
674 if (skb->encapsulation) {
675 if (udp_gso)
676 hdr_len = skb_inner_transport_offset(skb) +
677 sizeof(struct udphdr);
678 else
679 hdr_len = skb_inner_tcp_all_headers(skb);
680 } else if (udp_gso) {
681 hdr_len = skb_transport_offset(skb) +
682 sizeof(struct udphdr);
683 } else {
684 hdr_len = skb_tcp_all_headers(skb);
685 }
686
687 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
688 TX_BD_FLAGS_T_IPID |
689 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
690 length = skb_shinfo(skb)->gso_size;
691 txbd1->tx_bd_mss = cpu_to_le32(length);
692 length += hdr_len;
693 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
694 txbd1->tx_bd_hsize_lflags |=
695 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
696 txbd1->tx_bd_mss = 0;
697 }
698
699 length >>= 9;
700 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
701 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
702 skb->len);
703 i = 0;
704 goto tx_dma_error;
705 }
706 flags |= bnxt_lhint_arr[length];
707 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
708
709 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
710 txbd1->tx_bd_cfa_action =
711 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
712 txbd0 = txbd;
713 for (i = 0; i < last_frag; i++) {
714 frag = &skb_shinfo(skb)->frags[i];
715 prod = NEXT_TX(prod);
716 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
717
718 len = skb_frag_size(frag);
719 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
720 DMA_TO_DEVICE);
721
722 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
723 goto tx_dma_error;
724
725 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
726 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
727 mapping, mapping);
728
729 txbd->tx_bd_haddr = cpu_to_le64(mapping);
730
731 flags = len << TX_BD_LEN_SHIFT;
732 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
733 }
734
735 flags &= ~TX_BD_LEN;
736 txbd->tx_bd_len_flags_type =
737 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
738 TX_BD_FLAGS_PACKET_END);
739
740 netdev_tx_sent_queue(txq, skb->len);
741
742 skb_tx_timestamp(skb);
743
744 prod = NEXT_TX(prod);
745 WRITE_ONCE(txr->tx_prod, prod);
746
747 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
748 bnxt_txr_db_kick(bp, txr, prod);
749 } else {
750 if (free_size >= bp->tx_wake_thresh)
751 txbd0->tx_bd_len_flags_type |=
752 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
753 txr->kick_pending = 1;
754 }
755
756 tx_done:
757
758 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
759 if (netdev_xmit_more() && !tx_buf->is_push) {
760 txbd0->tx_bd_len_flags_type &=
761 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
762 bnxt_txr_db_kick(bp, txr, prod);
763 }
764
765 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
766 bp->tx_wake_thresh);
767 }
768 return NETDEV_TX_OK;
769
770 tx_dma_error:
771 last_frag = i;
772
773 /* start back at beginning and unmap skb */
774 prod = txr->tx_prod;
775 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
776 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
777 skb_headlen(skb), DMA_TO_DEVICE);
778 prod = NEXT_TX(prod);
779
780 /* unmap remaining mapped pages */
781 for (i = 0; i < last_frag; i++) {
782 prod = NEXT_TX(prod);
783 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
784 frag = &skb_shinfo(skb)->frags[i];
785 netmem_dma_unmap_page_attrs(&pdev->dev,
786 dma_unmap_addr(tx_buf, mapping),
787 skb_frag_size(frag),
788 DMA_TO_DEVICE, 0);
789 }
790
791 tx_free:
792 dev_kfree_skb_any(skb);
793 tx_kick_pending:
794 if (BNXT_TX_PTP_IS_SET(lflags)) {
795 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
796 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
797 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
798 /* set SKB to err so PTP worker will clean up */
799 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
800 }
801 if (txr->kick_pending)
802 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
803 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
804 dev_core_stats_tx_dropped_inc(dev);
805 return NETDEV_TX_OK;
806 }
807
808 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)809 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
810 int budget)
811 {
812 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
813 struct pci_dev *pdev = bp->pdev;
814 u16 hw_cons = txr->tx_hw_cons;
815 unsigned int tx_bytes = 0;
816 u16 cons = txr->tx_cons;
817 skb_frag_t *frag;
818 int tx_pkts = 0;
819 bool rc = false;
820
821 while (RING_TX(bp, cons) != hw_cons) {
822 struct bnxt_sw_tx_bd *tx_buf;
823 struct sk_buff *skb;
824 bool is_ts_pkt;
825 int j, last;
826
827 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
828 skb = tx_buf->skb;
829
830 if (unlikely(!skb)) {
831 bnxt_sched_reset_txr(bp, txr, cons);
832 return rc;
833 }
834
835 is_ts_pkt = tx_buf->is_ts_pkt;
836 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
837 rc = true;
838 break;
839 }
840
841 cons = NEXT_TX(cons);
842 tx_pkts++;
843 tx_bytes += skb->len;
844 tx_buf->skb = NULL;
845 tx_buf->is_ts_pkt = 0;
846
847 if (tx_buf->is_push) {
848 tx_buf->is_push = 0;
849 goto next_tx_int;
850 }
851
852 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
853 skb_headlen(skb), DMA_TO_DEVICE);
854 last = tx_buf->nr_frags;
855
856 for (j = 0; j < last; j++) {
857 frag = &skb_shinfo(skb)->frags[j];
858 cons = NEXT_TX(cons);
859 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
860 netmem_dma_unmap_page_attrs(&pdev->dev,
861 dma_unmap_addr(tx_buf,
862 mapping),
863 skb_frag_size(frag),
864 DMA_TO_DEVICE, 0);
865 }
866 if (unlikely(is_ts_pkt)) {
867 if (BNXT_CHIP_P5(bp)) {
868 /* PTP worker takes ownership of the skb */
869 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
870 skb = NULL;
871 }
872 }
873
874 next_tx_int:
875 cons = NEXT_TX(cons);
876
877 napi_consume_skb(skb, budget);
878 }
879
880 WRITE_ONCE(txr->tx_cons, cons);
881
882 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
883 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
884 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
885
886 return rc;
887 }
888
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)889 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
890 {
891 struct bnxt_tx_ring_info *txr;
892 bool more = false;
893 int i;
894
895 bnxt_for_each_napi_tx(i, bnapi, txr) {
896 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
897 more |= __bnxt_tx_int(bp, txr, budget);
898 }
899 if (!more)
900 bnapi->events &= ~BNXT_TX_CMP_EVENT;
901 }
902
bnxt_separate_head_pool(struct bnxt_rx_ring_info * rxr)903 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
904 {
905 return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
906 }
907
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)908 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
909 struct bnxt_rx_ring_info *rxr,
910 unsigned int *offset,
911 gfp_t gfp)
912 {
913 struct page *page;
914
915 if (rxr->rx_page_size < PAGE_SIZE) {
916 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
917 rxr->rx_page_size);
918 } else {
919 page = page_pool_dev_alloc_pages(rxr->page_pool);
920 *offset = 0;
921 }
922 if (!page)
923 return NULL;
924
925 *mapping = page_pool_get_dma_addr(page) + *offset;
926 return page;
927 }
928
__bnxt_alloc_rx_netmem(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)929 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
930 struct bnxt_rx_ring_info *rxr,
931 unsigned int *offset,
932 gfp_t gfp)
933 {
934 netmem_ref netmem;
935
936 if (rxr->rx_page_size < PAGE_SIZE) {
937 netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
938 rxr->rx_page_size, gfp);
939 } else {
940 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
941 *offset = 0;
942 }
943 if (!netmem)
944 return 0;
945
946 *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
947 return netmem;
948 }
949
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)950 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
951 struct bnxt_rx_ring_info *rxr,
952 gfp_t gfp)
953 {
954 unsigned int offset;
955 struct page *page;
956
957 page = page_pool_alloc_frag(rxr->head_pool, &offset,
958 bp->rx_buf_size, gfp);
959 if (!page)
960 return NULL;
961
962 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
963 return page_address(page) + offset;
964 }
965
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)966 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
967 u16 prod, gfp_t gfp)
968 {
969 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
970 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
971 dma_addr_t mapping;
972
973 if (BNXT_RX_PAGE_MODE(bp)) {
974 unsigned int offset;
975 struct page *page =
976 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
977
978 if (!page)
979 return -ENOMEM;
980
981 mapping += bp->rx_dma_offset;
982 rx_buf->data = page;
983 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
984 } else {
985 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
986
987 if (!data)
988 return -ENOMEM;
989
990 rx_buf->data = data;
991 rx_buf->data_ptr = data + bp->rx_offset;
992 }
993 rx_buf->mapping = mapping;
994
995 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
996 return 0;
997 }
998
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)999 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1000 {
1001 u16 prod = rxr->rx_prod;
1002 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1003 struct bnxt *bp = rxr->bnapi->bp;
1004 struct rx_bd *cons_bd, *prod_bd;
1005
1006 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1007 cons_rx_buf = &rxr->rx_buf_ring[cons];
1008
1009 prod_rx_buf->data = data;
1010 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1011
1012 prod_rx_buf->mapping = cons_rx_buf->mapping;
1013
1014 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1015 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1016
1017 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1018 }
1019
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1020 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1021 {
1022 u16 next, max = rxr->rx_agg_bmap_size;
1023
1024 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1025 if (next >= max)
1026 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1027 return next;
1028 }
1029
bnxt_alloc_rx_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)1030 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1031 u16 prod, gfp_t gfp)
1032 {
1033 struct rx_bd *rxbd =
1034 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1035 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1036 u16 sw_prod = rxr->rx_sw_agg_prod;
1037 unsigned int offset = 0;
1038 dma_addr_t mapping;
1039 netmem_ref netmem;
1040
1041 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1042 if (!netmem)
1043 return -ENOMEM;
1044
1045 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1046 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1047
1048 __set_bit(sw_prod, rxr->rx_agg_bmap);
1049 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1050 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1051
1052 rx_agg_buf->netmem = netmem;
1053 rx_agg_buf->offset = offset;
1054 rx_agg_buf->mapping = mapping;
1055 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1056 rxbd->rx_bd_opaque = sw_prod;
1057 return 0;
1058 }
1059
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1060 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1061 struct bnxt_cp_ring_info *cpr,
1062 u16 cp_cons, u16 curr)
1063 {
1064 struct rx_agg_cmp *agg;
1065
1066 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1067 agg = (struct rx_agg_cmp *)
1068 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1069 return agg;
1070 }
1071
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1072 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1073 struct bnxt_rx_ring_info *rxr,
1074 u16 agg_id, u16 curr)
1075 {
1076 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1077
1078 return &tpa_info->agg_arr[curr];
1079 }
1080
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1081 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1082 u16 start, u32 agg_bufs, bool tpa)
1083 {
1084 struct bnxt_napi *bnapi = cpr->bnapi;
1085 struct bnxt *bp = bnapi->bp;
1086 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1087 u16 prod = rxr->rx_agg_prod;
1088 u16 sw_prod = rxr->rx_sw_agg_prod;
1089 bool p5_tpa = false;
1090 u32 i;
1091
1092 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1093 p5_tpa = true;
1094
1095 for (i = 0; i < agg_bufs; i++) {
1096 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1097 struct rx_agg_cmp *agg;
1098 struct rx_bd *prod_bd;
1099 netmem_ref netmem;
1100 u16 cons;
1101
1102 if (p5_tpa)
1103 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1104 else
1105 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1106 cons = agg->rx_agg_cmp_opaque;
1107 __clear_bit(cons, rxr->rx_agg_bmap);
1108
1109 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1110 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1111
1112 __set_bit(sw_prod, rxr->rx_agg_bmap);
1113 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1114 cons_rx_buf = &rxr->rx_agg_ring[cons];
1115
1116 /* It is possible for sw_prod to be equal to cons, so
1117 * set cons_rx_buf->netmem to 0 first.
1118 */
1119 netmem = cons_rx_buf->netmem;
1120 cons_rx_buf->netmem = 0;
1121 prod_rx_buf->netmem = netmem;
1122 prod_rx_buf->offset = cons_rx_buf->offset;
1123
1124 prod_rx_buf->mapping = cons_rx_buf->mapping;
1125
1126 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1127
1128 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1129 prod_bd->rx_bd_opaque = sw_prod;
1130
1131 prod = NEXT_RX_AGG(prod);
1132 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1133 }
1134 rxr->rx_agg_prod = prod;
1135 rxr->rx_sw_agg_prod = sw_prod;
1136 }
1137
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1138 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1139 struct bnxt_rx_ring_info *rxr,
1140 u16 cons, void *data, u8 *data_ptr,
1141 dma_addr_t dma_addr,
1142 unsigned int offset_and_len)
1143 {
1144 unsigned int len = offset_and_len & 0xffff;
1145 struct page *page = data;
1146 u16 prod = rxr->rx_prod;
1147 struct sk_buff *skb;
1148 int err;
1149
1150 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1151 if (unlikely(err)) {
1152 bnxt_reuse_rx_data(rxr, cons, data);
1153 return NULL;
1154 }
1155 dma_addr -= bp->rx_dma_offset;
1156 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1157 bp->rx_dir);
1158 skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
1159 if (!skb) {
1160 page_pool_recycle_direct(rxr->page_pool, page);
1161 return NULL;
1162 }
1163 skb_mark_for_recycle(skb);
1164 skb_reserve(skb, bp->rx_offset);
1165 __skb_put(skb, len);
1166
1167 return skb;
1168 }
1169
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1170 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1171 struct bnxt_rx_ring_info *rxr,
1172 u16 cons, void *data, u8 *data_ptr,
1173 dma_addr_t dma_addr,
1174 unsigned int offset_and_len)
1175 {
1176 unsigned int payload = offset_and_len >> 16;
1177 unsigned int len = offset_and_len & 0xffff;
1178 skb_frag_t *frag;
1179 struct page *page = data;
1180 u16 prod = rxr->rx_prod;
1181 struct sk_buff *skb;
1182 int off, err;
1183
1184 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1185 if (unlikely(err)) {
1186 bnxt_reuse_rx_data(rxr, cons, data);
1187 return NULL;
1188 }
1189 dma_addr -= bp->rx_dma_offset;
1190 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1191 bp->rx_dir);
1192
1193 if (unlikely(!payload))
1194 payload = eth_get_headlen(bp->dev, data_ptr, len);
1195
1196 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1197 if (!skb) {
1198 page_pool_recycle_direct(rxr->page_pool, page);
1199 return NULL;
1200 }
1201
1202 skb_mark_for_recycle(skb);
1203 off = (void *)data_ptr - page_address(page);
1204 skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
1205 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1206 payload + NET_IP_ALIGN);
1207
1208 frag = &skb_shinfo(skb)->frags[0];
1209 skb_frag_size_sub(frag, payload);
1210 skb_frag_off_add(frag, payload);
1211 skb->data_len -= payload;
1212 skb->tail += payload;
1213
1214 return skb;
1215 }
1216
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1217 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1218 struct bnxt_rx_ring_info *rxr, u16 cons,
1219 void *data, u8 *data_ptr,
1220 dma_addr_t dma_addr,
1221 unsigned int offset_and_len)
1222 {
1223 u16 prod = rxr->rx_prod;
1224 struct sk_buff *skb;
1225 int err;
1226
1227 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1228 if (unlikely(err)) {
1229 bnxt_reuse_rx_data(rxr, cons, data);
1230 return NULL;
1231 }
1232
1233 skb = napi_build_skb(data, bp->rx_buf_size);
1234 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1235 bp->rx_dir);
1236 if (!skb) {
1237 page_pool_free_va(rxr->head_pool, data, true);
1238 return NULL;
1239 }
1240
1241 skb_mark_for_recycle(skb);
1242 skb_reserve(skb, bp->rx_offset);
1243 skb_put(skb, offset_and_len & 0xffff);
1244 return skb;
1245 }
1246
__bnxt_rx_agg_netmems(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs,bool tpa,struct sk_buff * skb,struct xdp_buff * xdp)1247 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1248 struct bnxt_cp_ring_info *cpr,
1249 u16 idx, u32 agg_bufs, bool tpa,
1250 struct sk_buff *skb,
1251 struct xdp_buff *xdp)
1252 {
1253 struct bnxt_napi *bnapi = cpr->bnapi;
1254 struct skb_shared_info *shinfo;
1255 struct bnxt_rx_ring_info *rxr;
1256 u32 i, total_frag_len = 0;
1257 bool p5_tpa = false;
1258 u16 prod;
1259
1260 rxr = bnapi->rx_ring;
1261 prod = rxr->rx_agg_prod;
1262
1263 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1264 p5_tpa = true;
1265
1266 if (skb)
1267 shinfo = skb_shinfo(skb);
1268 else
1269 shinfo = xdp_get_shared_info_from_buff(xdp);
1270
1271 for (i = 0; i < agg_bufs; i++) {
1272 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1273 struct rx_agg_cmp *agg;
1274 u16 cons, frag_len;
1275 netmem_ref netmem;
1276
1277 if (p5_tpa)
1278 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1279 else
1280 agg = bnxt_get_agg(bp, cpr, idx, i);
1281 cons = agg->rx_agg_cmp_opaque;
1282 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1283 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1284
1285 cons_rx_buf = &rxr->rx_agg_ring[cons];
1286 if (skb) {
1287 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1288 cons_rx_buf->offset,
1289 frag_len, rxr->rx_page_size);
1290 } else {
1291 skb_frag_t *frag = &shinfo->frags[i];
1292
1293 skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1294 cons_rx_buf->offset,
1295 frag_len);
1296 shinfo->nr_frags = i + 1;
1297 }
1298 __clear_bit(cons, rxr->rx_agg_bmap);
1299
1300 /* It is possible for bnxt_alloc_rx_netmem() to allocate
1301 * a sw_prod index that equals the cons index, so we
1302 * need to clear the cons entry now.
1303 */
1304 netmem = cons_rx_buf->netmem;
1305 cons_rx_buf->netmem = 0;
1306
1307 if (xdp && netmem_is_pfmemalloc(netmem))
1308 xdp_buff_set_frag_pfmemalloc(xdp);
1309
1310 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1311 if (skb) {
1312 skb->len -= frag_len;
1313 skb->data_len -= frag_len;
1314 skb->truesize -= rxr->rx_page_size;
1315 }
1316
1317 --shinfo->nr_frags;
1318 cons_rx_buf->netmem = netmem;
1319
1320 /* Update prod since possibly some netmems have been
1321 * allocated already.
1322 */
1323 rxr->rx_agg_prod = prod;
1324 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1325 return 0;
1326 }
1327
1328 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1329 rxr->rx_page_size);
1330
1331 total_frag_len += frag_len;
1332 prod = NEXT_RX_AGG(prod);
1333 }
1334 rxr->rx_agg_prod = prod;
1335 return total_frag_len;
1336 }
1337
bnxt_rx_agg_netmems_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1338 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1339 struct bnxt_cp_ring_info *cpr,
1340 struct sk_buff *skb, u16 idx,
1341 u32 agg_bufs, bool tpa)
1342 {
1343 u32 total_frag_len = 0;
1344
1345 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1346 skb, NULL);
1347 if (!total_frag_len) {
1348 skb_mark_for_recycle(skb);
1349 dev_kfree_skb(skb);
1350 return NULL;
1351 }
1352
1353 return skb;
1354 }
1355
bnxt_rx_agg_netmems_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1356 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1357 struct bnxt_cp_ring_info *cpr,
1358 struct xdp_buff *xdp, u16 idx,
1359 u32 agg_bufs, bool tpa)
1360 {
1361 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1362 u32 total_frag_len = 0;
1363
1364 if (!xdp_buff_has_frags(xdp))
1365 shinfo->nr_frags = 0;
1366
1367 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1368 NULL, xdp);
1369 if (total_frag_len) {
1370 xdp_buff_set_frags_flag(xdp);
1371 shinfo->nr_frags = agg_bufs;
1372 shinfo->xdp_frags_size = total_frag_len;
1373 }
1374 return total_frag_len;
1375 }
1376
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1377 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1378 u8 agg_bufs, u32 *raw_cons)
1379 {
1380 u16 last;
1381 struct rx_agg_cmp *agg;
1382
1383 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1384 last = RING_CMP(*raw_cons);
1385 agg = (struct rx_agg_cmp *)
1386 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1387 return RX_AGG_CMP_VALID(agg, *raw_cons);
1388 }
1389
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1390 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1391 unsigned int len,
1392 dma_addr_t mapping)
1393 {
1394 struct bnxt *bp = bnapi->bp;
1395 struct pci_dev *pdev = bp->pdev;
1396 struct sk_buff *skb;
1397
1398 skb = napi_alloc_skb(&bnapi->napi, len);
1399 if (!skb)
1400 return NULL;
1401
1402 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1403 bp->rx_dir);
1404
1405 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1406 len + NET_IP_ALIGN);
1407
1408 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1409 bp->rx_dir);
1410
1411 skb_put(skb, len);
1412
1413 return skb;
1414 }
1415
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1416 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1417 unsigned int len,
1418 dma_addr_t mapping)
1419 {
1420 return bnxt_copy_data(bnapi, data, len, mapping);
1421 }
1422
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1423 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1424 struct xdp_buff *xdp,
1425 unsigned int len,
1426 dma_addr_t mapping)
1427 {
1428 unsigned int metasize = 0;
1429 u8 *data = xdp->data;
1430 struct sk_buff *skb;
1431
1432 len = xdp->data_end - xdp->data_meta;
1433 metasize = xdp->data - xdp->data_meta;
1434 data = xdp->data_meta;
1435
1436 skb = bnxt_copy_data(bnapi, data, len, mapping);
1437 if (!skb)
1438 return skb;
1439
1440 if (metasize) {
1441 skb_metadata_set(skb, metasize);
1442 __skb_pull(skb, metasize);
1443 }
1444
1445 return skb;
1446 }
1447
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1448 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1449 u32 *raw_cons, void *cmp)
1450 {
1451 struct rx_cmp *rxcmp = cmp;
1452 u32 tmp_raw_cons = *raw_cons;
1453 u8 cmp_type, agg_bufs = 0;
1454
1455 cmp_type = RX_CMP_TYPE(rxcmp);
1456
1457 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1458 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1459 RX_CMP_AGG_BUFS) >>
1460 RX_CMP_AGG_BUFS_SHIFT;
1461 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1462 struct rx_tpa_end_cmp *tpa_end = cmp;
1463
1464 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1465 return 0;
1466
1467 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1468 }
1469
1470 if (agg_bufs) {
1471 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1472 return -EBUSY;
1473 }
1474 *raw_cons = tmp_raw_cons;
1475 return 0;
1476 }
1477
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1478 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1479 {
1480 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1481 u16 idx = agg_id & MAX_TPA_P5_MASK;
1482
1483 if (test_bit(idx, map->agg_idx_bmap)) {
1484 idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
1485 if (idx >= MAX_TPA_P5)
1486 return INVALID_HW_RING_ID;
1487 }
1488 __set_bit(idx, map->agg_idx_bmap);
1489 map->agg_id_tbl[agg_id] = idx;
1490 return idx;
1491 }
1492
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1493 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1494 {
1495 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1496
1497 __clear_bit(idx, map->agg_idx_bmap);
1498 }
1499
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1500 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1501 {
1502 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1503
1504 return map->agg_id_tbl[agg_id];
1505 }
1506
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1507 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1508 struct rx_tpa_start_cmp *tpa_start,
1509 struct rx_tpa_start_cmp_ext *tpa_start1)
1510 {
1511 tpa_info->cfa_code_valid = 1;
1512 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1513 tpa_info->vlan_valid = 0;
1514 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1515 tpa_info->vlan_valid = 1;
1516 tpa_info->metadata =
1517 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1518 }
1519 }
1520
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1521 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1522 struct rx_tpa_start_cmp *tpa_start,
1523 struct rx_tpa_start_cmp_ext *tpa_start1)
1524 {
1525 tpa_info->vlan_valid = 0;
1526 if (TPA_START_VLAN_VALID(tpa_start)) {
1527 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1528 u32 vlan_proto = ETH_P_8021Q;
1529
1530 tpa_info->vlan_valid = 1;
1531 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1532 vlan_proto = ETH_P_8021AD;
1533 tpa_info->metadata = vlan_proto << 16 |
1534 TPA_START_METADATA0_TCI(tpa_start1);
1535 }
1536 }
1537
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1538 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1539 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1540 struct rx_tpa_start_cmp_ext *tpa_start1)
1541 {
1542 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1543 struct bnxt_tpa_info *tpa_info;
1544 u16 cons, prod, agg_id;
1545 struct rx_bd *prod_bd;
1546 dma_addr_t mapping;
1547
1548 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1549 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1550 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1551 if (unlikely(agg_id == INVALID_HW_RING_ID)) {
1552 netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
1553 rxr->bnapi->index,
1554 TPA_START_AGG_ID_P5(tpa_start));
1555 bnxt_sched_reset_rxr(bp, rxr);
1556 return;
1557 }
1558 } else {
1559 agg_id = TPA_START_AGG_ID(tpa_start);
1560 }
1561 cons = tpa_start->rx_tpa_start_cmp_opaque;
1562 prod = rxr->rx_prod;
1563 cons_rx_buf = &rxr->rx_buf_ring[cons];
1564 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1565 tpa_info = &rxr->rx_tpa[agg_id];
1566
1567 if (unlikely(cons != rxr->rx_next_cons ||
1568 TPA_START_ERROR(tpa_start))) {
1569 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1570 cons, rxr->rx_next_cons,
1571 TPA_START_ERROR_CODE(tpa_start1));
1572 bnxt_sched_reset_rxr(bp, rxr);
1573 return;
1574 }
1575 prod_rx_buf->data = tpa_info->data;
1576 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1577
1578 mapping = tpa_info->mapping;
1579 prod_rx_buf->mapping = mapping;
1580
1581 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1582
1583 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1584
1585 tpa_info->data = cons_rx_buf->data;
1586 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1587 cons_rx_buf->data = NULL;
1588 tpa_info->mapping = cons_rx_buf->mapping;
1589
1590 tpa_info->len =
1591 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1592 RX_TPA_START_CMP_LEN_SHIFT;
1593 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1594 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1595 tpa_info->gso_type = SKB_GSO_TCPV4;
1596 if (TPA_START_IS_IPV6(tpa_start1))
1597 tpa_info->gso_type = SKB_GSO_TCPV6;
1598 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1599 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1600 TPA_START_HASH_TYPE(tpa_start) == 3)
1601 tpa_info->gso_type = SKB_GSO_TCPV6;
1602 tpa_info->rss_hash =
1603 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1604 } else {
1605 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1606 tpa_info->gso_type = 0;
1607 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1608 }
1609 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1610 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1611 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1612 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1613 else
1614 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1615 tpa_info->agg_count = 0;
1616
1617 rxr->rx_prod = NEXT_RX(prod);
1618 cons = RING_RX(bp, NEXT_RX(cons));
1619 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1620 cons_rx_buf = &rxr->rx_buf_ring[cons];
1621
1622 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1623 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1624 cons_rx_buf->data = NULL;
1625 }
1626
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1627 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1628 {
1629 if (agg_bufs)
1630 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1631 }
1632
1633 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1634 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1635 {
1636 struct udphdr *uh = NULL;
1637
1638 if (ip_proto == htons(ETH_P_IP)) {
1639 struct iphdr *iph = (struct iphdr *)skb->data;
1640
1641 if (iph->protocol == IPPROTO_UDP)
1642 uh = (struct udphdr *)(iph + 1);
1643 } else {
1644 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1645
1646 if (iph->nexthdr == IPPROTO_UDP)
1647 uh = (struct udphdr *)(iph + 1);
1648 }
1649 if (uh) {
1650 if (uh->check)
1651 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1652 else
1653 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1654 }
1655 }
1656 #endif
1657
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1658 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1659 int payload_off, int tcp_ts,
1660 struct sk_buff *skb)
1661 {
1662 #ifdef CONFIG_INET
1663 struct tcphdr *th;
1664 int len, nw_off;
1665 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1666 u32 hdr_info = tpa_info->hdr_info;
1667 bool loopback = false;
1668
1669 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1670 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1671 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1672
1673 /* If the packet is an internal loopback packet, the offsets will
1674 * have an extra 4 bytes.
1675 */
1676 if (inner_mac_off == 4) {
1677 loopback = true;
1678 } else if (inner_mac_off > 4) {
1679 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1680 ETH_HLEN - 2));
1681
1682 /* We only support inner iPv4/ipv6. If we don't see the
1683 * correct protocol ID, it must be a loopback packet where
1684 * the offsets are off by 4.
1685 */
1686 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1687 loopback = true;
1688 }
1689 if (loopback) {
1690 /* internal loopback packet, subtract all offsets by 4 */
1691 inner_ip_off -= 4;
1692 inner_mac_off -= 4;
1693 outer_ip_off -= 4;
1694 }
1695
1696 nw_off = inner_ip_off - ETH_HLEN;
1697 skb_set_network_header(skb, nw_off);
1698 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1699 struct ipv6hdr *iph = ipv6_hdr(skb);
1700
1701 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1702 len = skb->len - skb_transport_offset(skb);
1703 th = tcp_hdr(skb);
1704 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1705 } else {
1706 struct iphdr *iph = ip_hdr(skb);
1707
1708 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1709 len = skb->len - skb_transport_offset(skb);
1710 th = tcp_hdr(skb);
1711 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1712 }
1713
1714 if (inner_mac_off) { /* tunnel */
1715 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1716 ETH_HLEN - 2));
1717
1718 bnxt_gro_tunnel(skb, proto);
1719 }
1720 #endif
1721 return skb;
1722 }
1723
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1724 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1725 int payload_off, int tcp_ts,
1726 struct sk_buff *skb)
1727 {
1728 #ifdef CONFIG_INET
1729 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1730 u32 hdr_info = tpa_info->hdr_info;
1731 int iphdr_len, nw_off;
1732
1733 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1734 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1735 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1736
1737 nw_off = inner_ip_off - ETH_HLEN;
1738 skb_set_network_header(skb, nw_off);
1739 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1740 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1741 skb_set_transport_header(skb, nw_off + iphdr_len);
1742
1743 if (inner_mac_off) { /* tunnel */
1744 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1745 ETH_HLEN - 2));
1746
1747 bnxt_gro_tunnel(skb, proto);
1748 }
1749 #endif
1750 return skb;
1751 }
1752
1753 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1754 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1755
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1756 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1757 int payload_off, int tcp_ts,
1758 struct sk_buff *skb)
1759 {
1760 #ifdef CONFIG_INET
1761 struct tcphdr *th;
1762 int len, nw_off, tcp_opt_len = 0;
1763
1764 if (tcp_ts)
1765 tcp_opt_len = 12;
1766
1767 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1768 struct iphdr *iph;
1769
1770 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1771 ETH_HLEN;
1772 skb_set_network_header(skb, nw_off);
1773 iph = ip_hdr(skb);
1774 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1775 len = skb->len - skb_transport_offset(skb);
1776 th = tcp_hdr(skb);
1777 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1778 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1779 struct ipv6hdr *iph;
1780
1781 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1782 ETH_HLEN;
1783 skb_set_network_header(skb, nw_off);
1784 iph = ipv6_hdr(skb);
1785 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1786 len = skb->len - skb_transport_offset(skb);
1787 th = tcp_hdr(skb);
1788 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1789 } else {
1790 dev_kfree_skb_any(skb);
1791 return NULL;
1792 }
1793
1794 if (nw_off) /* tunnel */
1795 bnxt_gro_tunnel(skb, skb->protocol);
1796 #endif
1797 return skb;
1798 }
1799
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb,struct bnxt_rx_sw_stats * rx_stats)1800 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1801 struct bnxt_tpa_info *tpa_info,
1802 struct rx_tpa_end_cmp *tpa_end,
1803 struct rx_tpa_end_cmp_ext *tpa_end1,
1804 struct sk_buff *skb,
1805 struct bnxt_rx_sw_stats *rx_stats)
1806 {
1807 #ifdef CONFIG_INET
1808 int payload_off;
1809 u16 segs;
1810
1811 segs = TPA_END_TPA_SEGS(tpa_end);
1812 if (segs == 1)
1813 return skb;
1814
1815 rx_stats->rx_hw_gro_packets++;
1816 rx_stats->rx_hw_gro_wire_packets += segs;
1817
1818 NAPI_GRO_CB(skb)->count = segs;
1819 skb_shinfo(skb)->gso_size =
1820 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1821 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1822 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1823 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1824 else
1825 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1826 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1827 if (likely(skb))
1828 tcp_gro_complete(skb);
1829 #endif
1830 return skb;
1831 }
1832
1833 /* Given the cfa_code of a received packet determine which
1834 * netdev (vf-rep or PF) the packet is destined to.
1835 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1836 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1837 {
1838 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1839
1840 /* if vf-rep dev is NULL, it must belong to the PF */
1841 return dev ? dev : bp->dev;
1842 }
1843
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1844 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1845 struct bnxt_cp_ring_info *cpr,
1846 u32 *raw_cons,
1847 struct rx_tpa_end_cmp *tpa_end,
1848 struct rx_tpa_end_cmp_ext *tpa_end1,
1849 u8 *event)
1850 {
1851 struct bnxt_napi *bnapi = cpr->bnapi;
1852 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1853 struct net_device *dev = bp->dev;
1854 u8 *data_ptr, agg_bufs;
1855 unsigned int len;
1856 struct bnxt_tpa_info *tpa_info;
1857 dma_addr_t mapping;
1858 struct sk_buff *skb;
1859 u16 idx = 0, agg_id;
1860 void *data;
1861 bool gro;
1862
1863 if (unlikely(bnapi->in_reset)) {
1864 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1865
1866 if (rc < 0)
1867 return ERR_PTR(-EBUSY);
1868 return NULL;
1869 }
1870
1871 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1872 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1873 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1874 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1875 tpa_info = &rxr->rx_tpa[agg_id];
1876 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1877 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1878 agg_bufs, tpa_info->agg_count);
1879 agg_bufs = tpa_info->agg_count;
1880 }
1881 tpa_info->agg_count = 0;
1882 *event |= BNXT_AGG_EVENT;
1883 bnxt_free_agg_idx(rxr, agg_id);
1884 idx = agg_id;
1885 gro = !!(bp->flags & BNXT_FLAG_GRO);
1886 } else {
1887 agg_id = TPA_END_AGG_ID(tpa_end);
1888 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1889 tpa_info = &rxr->rx_tpa[agg_id];
1890 idx = RING_CMP(*raw_cons);
1891 if (agg_bufs) {
1892 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1893 return ERR_PTR(-EBUSY);
1894
1895 *event |= BNXT_AGG_EVENT;
1896 idx = NEXT_CMP(idx);
1897 }
1898 gro = !!TPA_END_GRO(tpa_end);
1899 }
1900 data = tpa_info->data;
1901 data_ptr = tpa_info->data_ptr;
1902 prefetch(data_ptr);
1903 len = tpa_info->len;
1904 mapping = tpa_info->mapping;
1905
1906 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1907 bnxt_abort_tpa(cpr, idx, agg_bufs);
1908 if (agg_bufs > MAX_SKB_FRAGS)
1909 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1910 agg_bufs, (int)MAX_SKB_FRAGS);
1911 return NULL;
1912 }
1913
1914 if (len <= bp->rx_copybreak) {
1915 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1916 if (!skb) {
1917 bnxt_abort_tpa(cpr, idx, agg_bufs);
1918 cpr->sw_stats->rx.rx_oom_discards += 1;
1919 return NULL;
1920 }
1921 } else {
1922 u8 *new_data;
1923 dma_addr_t new_mapping;
1924
1925 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1926 GFP_ATOMIC);
1927 if (!new_data) {
1928 bnxt_abort_tpa(cpr, idx, agg_bufs);
1929 cpr->sw_stats->rx.rx_oom_discards += 1;
1930 return NULL;
1931 }
1932
1933 tpa_info->data = new_data;
1934 tpa_info->data_ptr = new_data + bp->rx_offset;
1935 tpa_info->mapping = new_mapping;
1936
1937 skb = napi_build_skb(data, bp->rx_buf_size);
1938 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1939 bp->rx_buf_use_size, bp->rx_dir);
1940
1941 if (!skb) {
1942 page_pool_free_va(rxr->head_pool, data, true);
1943 bnxt_abort_tpa(cpr, idx, agg_bufs);
1944 cpr->sw_stats->rx.rx_oom_discards += 1;
1945 return NULL;
1946 }
1947 skb_mark_for_recycle(skb);
1948 skb_reserve(skb, bp->rx_offset);
1949 skb_put(skb, len);
1950 }
1951
1952 if (agg_bufs) {
1953 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1954 true);
1955 if (!skb) {
1956 /* Page reuse already handled by bnxt_rx_pages(). */
1957 cpr->sw_stats->rx.rx_oom_discards += 1;
1958 return NULL;
1959 }
1960 }
1961
1962 if (tpa_info->cfa_code_valid)
1963 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1964 skb->protocol = eth_type_trans(skb, dev);
1965
1966 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1967 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1968
1969 if (tpa_info->vlan_valid &&
1970 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1971 __be16 vlan_proto = htons(tpa_info->metadata >>
1972 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1973 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1974
1975 if (eth_type_vlan(vlan_proto)) {
1976 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1977 } else {
1978 dev_kfree_skb(skb);
1979 return NULL;
1980 }
1981 }
1982
1983 skb_checksum_none_assert(skb);
1984 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1985 skb->ip_summed = CHECKSUM_UNNECESSARY;
1986 skb->csum_level =
1987 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1988 }
1989
1990 if (gro)
1991 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb,
1992 &cpr->sw_stats->rx);
1993
1994 return skb;
1995 }
1996
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1997 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1998 struct rx_agg_cmp *rx_agg)
1999 {
2000 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
2001 struct bnxt_tpa_info *tpa_info;
2002
2003 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
2004 tpa_info = &rxr->rx_tpa[agg_id];
2005 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
2006 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
2007 }
2008
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)2009 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
2010 struct sk_buff *skb)
2011 {
2012 skb_mark_for_recycle(skb);
2013
2014 if (skb->dev != bp->dev) {
2015 /* this packet belongs to a vf-rep */
2016 bnxt_vf_rep_rx(bp, skb);
2017 return;
2018 }
2019 skb_record_rx_queue(skb, bnapi->index);
2020 napi_gro_receive(&bnapi->napi, skb);
2021 }
2022
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)2023 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2024 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2025 {
2026 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2027
2028 if (BNXT_PTP_RX_TS_VALID(flags))
2029 goto ts_valid;
2030 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2031 return false;
2032
2033 ts_valid:
2034 *cmpl_ts = ts;
2035 return true;
2036 }
2037
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)2038 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2039 struct rx_cmp *rxcmp,
2040 struct rx_cmp_ext *rxcmp1)
2041 {
2042 __be16 vlan_proto;
2043 u16 vtag;
2044
2045 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2046 __le32 flags2 = rxcmp1->rx_cmp_flags2;
2047 u32 meta_data;
2048
2049 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2050 return skb;
2051
2052 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2053 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2054 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2055 if (eth_type_vlan(vlan_proto))
2056 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2057 else
2058 goto vlan_err;
2059 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2060 if (RX_CMP_VLAN_VALID(rxcmp)) {
2061 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2062
2063 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2064 vlan_proto = htons(ETH_P_8021Q);
2065 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2066 vlan_proto = htons(ETH_P_8021AD);
2067 else
2068 goto vlan_err;
2069 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2070 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2071 }
2072 }
2073 return skb;
2074 vlan_err:
2075 skb_mark_for_recycle(skb);
2076 dev_kfree_skb(skb);
2077 return NULL;
2078 }
2079
bnxt_rss_ext_op(struct bnxt * bp,struct rx_cmp * rxcmp)2080 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2081 struct rx_cmp *rxcmp)
2082 {
2083 u8 ext_op;
2084
2085 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2086 switch (ext_op) {
2087 case EXT_OP_INNER_4:
2088 case EXT_OP_OUTER_4:
2089 case EXT_OP_INNFL_3:
2090 case EXT_OP_OUTFL_3:
2091 return PKT_HASH_TYPE_L4;
2092 default:
2093 return PKT_HASH_TYPE_L3;
2094 }
2095 }
2096
2097 /* returns the following:
2098 * 1 - 1 packet successfully received
2099 * 0 - successful TPA_START, packet not completed yet
2100 * -EBUSY - completion ring does not have all the agg buffers yet
2101 * -ENOMEM - packet aborted due to out of memory
2102 * -EIO - packet aborted due to hw error indicated in BD
2103 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2104 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2105 u32 *raw_cons, u8 *event)
2106 {
2107 struct bnxt_napi *bnapi = cpr->bnapi;
2108 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2109 struct net_device *dev = bp->dev;
2110 struct rx_cmp *rxcmp;
2111 struct rx_cmp_ext *rxcmp1;
2112 u32 tmp_raw_cons = *raw_cons;
2113 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2114 struct skb_shared_info *sinfo;
2115 struct bnxt_sw_rx_bd *rx_buf;
2116 unsigned int len;
2117 u8 *data_ptr, agg_bufs, cmp_type;
2118 bool xdp_active = false;
2119 dma_addr_t dma_addr;
2120 struct sk_buff *skb;
2121 struct xdp_buff xdp;
2122 u32 flags, misc;
2123 u32 cmpl_ts;
2124 void *data;
2125 int rc = 0;
2126
2127 rxcmp = (struct rx_cmp *)
2128 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2129
2130 cmp_type = RX_CMP_TYPE(rxcmp);
2131
2132 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2133 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2134 goto next_rx_no_prod_no_len;
2135 }
2136
2137 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2138 cp_cons = RING_CMP(tmp_raw_cons);
2139 rxcmp1 = (struct rx_cmp_ext *)
2140 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2141
2142 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2143 return -EBUSY;
2144
2145 /* The valid test of the entry must be done first before
2146 * reading any further.
2147 */
2148 dma_rmb();
2149 prod = rxr->rx_prod;
2150
2151 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2152 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2153 bnxt_tpa_start(bp, rxr, cmp_type,
2154 (struct rx_tpa_start_cmp *)rxcmp,
2155 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2156
2157 *event |= BNXT_RX_EVENT;
2158 goto next_rx_no_prod_no_len;
2159
2160 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2161 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2162 (struct rx_tpa_end_cmp *)rxcmp,
2163 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2164
2165 if (IS_ERR(skb))
2166 return -EBUSY;
2167
2168 rc = -ENOMEM;
2169 if (likely(skb)) {
2170 bnxt_deliver_skb(bp, bnapi, skb);
2171 rc = 1;
2172 }
2173 *event |= BNXT_RX_EVENT;
2174 goto next_rx_no_prod_no_len;
2175 }
2176
2177 cons = rxcmp->rx_cmp_opaque;
2178 if (unlikely(cons != rxr->rx_next_cons)) {
2179 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2180
2181 /* 0xffff is forced error, don't print it */
2182 if (rxr->rx_next_cons != 0xffff)
2183 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2184 cons, rxr->rx_next_cons);
2185 bnxt_sched_reset_rxr(bp, rxr);
2186 if (rc1)
2187 return rc1;
2188 goto next_rx_no_prod_no_len;
2189 }
2190 rx_buf = &rxr->rx_buf_ring[cons];
2191 data = rx_buf->data;
2192 data_ptr = rx_buf->data_ptr;
2193 prefetch(data_ptr);
2194
2195 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2196 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2197
2198 if (agg_bufs) {
2199 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2200 return -EBUSY;
2201
2202 cp_cons = NEXT_CMP(cp_cons);
2203 *event |= BNXT_AGG_EVENT;
2204 }
2205 *event |= BNXT_RX_EVENT;
2206
2207 rx_buf->data = NULL;
2208 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2209 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2210
2211 bnxt_reuse_rx_data(rxr, cons, data);
2212 if (agg_bufs)
2213 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2214 false);
2215
2216 rc = -EIO;
2217 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2218 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2219 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2220 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2221 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2222 rx_err);
2223 bnxt_sched_reset_rxr(bp, rxr);
2224 }
2225 }
2226 goto next_rx_no_len;
2227 }
2228
2229 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2230 len = flags >> RX_CMP_LEN_SHIFT;
2231 dma_addr = rx_buf->mapping;
2232
2233 if (bnxt_xdp_attached(bp, rxr)) {
2234 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2235 if (agg_bufs) {
2236 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
2237 cp_cons,
2238 agg_bufs,
2239 false);
2240 if (!frag_len)
2241 goto oom_next_rx;
2242
2243 }
2244 xdp_active = true;
2245 }
2246
2247 if (xdp_active) {
2248 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2249 rc = 1;
2250 goto next_rx;
2251 }
2252 if (xdp_buff_has_frags(&xdp)) {
2253 sinfo = xdp_get_shared_info_from_buff(&xdp);
2254 agg_bufs = sinfo->nr_frags;
2255 } else {
2256 agg_bufs = 0;
2257 }
2258 }
2259
2260 if (len <= bp->rx_copybreak) {
2261 if (!xdp_active)
2262 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2263 else
2264 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2265 bnxt_reuse_rx_data(rxr, cons, data);
2266 if (!skb) {
2267 if (agg_bufs) {
2268 if (!xdp_active)
2269 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2270 agg_bufs, false);
2271 else
2272 bnxt_xdp_buff_frags_free(rxr, &xdp);
2273 }
2274 goto oom_next_rx;
2275 }
2276 } else {
2277 u32 payload;
2278
2279 if (rx_buf->data_ptr == data_ptr)
2280 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2281 else
2282 payload = 0;
2283 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2284 payload | len);
2285 if (!skb)
2286 goto oom_next_rx;
2287 }
2288
2289 if (agg_bufs) {
2290 if (!xdp_active) {
2291 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2292 agg_bufs, false);
2293 if (!skb)
2294 goto oom_next_rx;
2295 } else {
2296 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
2297 if (!skb) {
2298 /* we should be able to free the old skb here */
2299 bnxt_xdp_buff_frags_free(rxr, &xdp);
2300 goto oom_next_rx;
2301 }
2302 }
2303 }
2304
2305 if (RX_CMP_HASH_VALID(rxcmp)) {
2306 enum pkt_hash_types type;
2307
2308 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2309 type = bnxt_rss_ext_op(bp, rxcmp);
2310 } else {
2311 u32 itypes = RX_CMP_ITYPES(rxcmp);
2312
2313 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2314 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2315 type = PKT_HASH_TYPE_L4;
2316 else
2317 type = PKT_HASH_TYPE_L3;
2318 }
2319 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2320 }
2321
2322 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2323 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2324 skb->protocol = eth_type_trans(skb, dev);
2325
2326 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2327 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2328 if (!skb)
2329 goto next_rx;
2330 }
2331
2332 skb_checksum_none_assert(skb);
2333 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2334 if (dev->features & NETIF_F_RXCSUM) {
2335 skb->ip_summed = CHECKSUM_UNNECESSARY;
2336 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2337 }
2338 } else {
2339 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2340 if (dev->features & NETIF_F_RXCSUM)
2341 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2342 }
2343 }
2344
2345 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2346 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2347 u64 ns, ts;
2348
2349 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2350 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2351
2352 ns = bnxt_timecounter_cyc2time(ptp, ts);
2353 memset(skb_hwtstamps(skb), 0,
2354 sizeof(*skb_hwtstamps(skb)));
2355 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2356 }
2357 }
2358 }
2359 bnxt_deliver_skb(bp, bnapi, skb);
2360 rc = 1;
2361
2362 next_rx:
2363 cpr->rx_packets += 1;
2364 cpr->rx_bytes += len;
2365
2366 next_rx_no_len:
2367 rxr->rx_prod = NEXT_RX(prod);
2368 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2369
2370 next_rx_no_prod_no_len:
2371 *raw_cons = tmp_raw_cons;
2372
2373 return rc;
2374
2375 oom_next_rx:
2376 cpr->sw_stats->rx.rx_oom_discards += 1;
2377 rc = -ENOMEM;
2378 goto next_rx;
2379 }
2380
2381 /* In netpoll mode, if we are using a combined completion ring, we need to
2382 * discard the rx packets and recycle the buffers.
2383 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2384 static int bnxt_force_rx_discard(struct bnxt *bp,
2385 struct bnxt_cp_ring_info *cpr,
2386 u32 *raw_cons, u8 *event)
2387 {
2388 u32 tmp_raw_cons = *raw_cons;
2389 struct rx_cmp_ext *rxcmp1;
2390 struct rx_cmp *rxcmp;
2391 u16 cp_cons;
2392 u8 cmp_type;
2393 int rc;
2394
2395 cp_cons = RING_CMP(tmp_raw_cons);
2396 rxcmp = (struct rx_cmp *)
2397 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2398
2399 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2400 cp_cons = RING_CMP(tmp_raw_cons);
2401 rxcmp1 = (struct rx_cmp_ext *)
2402 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2403
2404 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2405 return -EBUSY;
2406
2407 /* The valid test of the entry must be done first before
2408 * reading any further.
2409 */
2410 dma_rmb();
2411 cmp_type = RX_CMP_TYPE(rxcmp);
2412 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2413 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2414 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2415 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2416 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2417 struct rx_tpa_end_cmp_ext *tpa_end1;
2418
2419 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2420 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2421 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2422 }
2423 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2424 if (rc && rc != -EBUSY)
2425 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2426 return rc;
2427 }
2428
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2429 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2430 {
2431 struct bnxt_fw_health *fw_health = bp->fw_health;
2432 u32 reg = fw_health->regs[reg_idx];
2433 u32 reg_type, reg_off, val = 0;
2434
2435 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2436 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2437 switch (reg_type) {
2438 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2439 pci_read_config_dword(bp->pdev, reg_off, &val);
2440 break;
2441 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2442 reg_off = fw_health->mapped_regs[reg_idx];
2443 fallthrough;
2444 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2445 val = readl(bp->bar0 + reg_off);
2446 break;
2447 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2448 val = readl(bp->bar1 + reg_off);
2449 break;
2450 }
2451 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2452 val &= fw_health->fw_reset_inprog_reg_mask;
2453 return val;
2454 }
2455
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2456 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2457 {
2458 int i;
2459
2460 for (i = 0; i < bp->rx_nr_rings; i++) {
2461 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2462 struct bnxt_ring_grp_info *grp_info;
2463
2464 grp_info = &bp->grp_info[grp_idx];
2465 if (grp_info->agg_fw_ring_id == ring_id)
2466 return grp_idx;
2467 }
2468 return INVALID_HW_RING_ID;
2469 }
2470
bnxt_get_force_speed(struct bnxt_link_info * link_info)2471 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2472 {
2473 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2474
2475 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2476 return link_info->force_link_speed2;
2477 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2478 return link_info->force_pam4_link_speed;
2479 return link_info->force_link_speed;
2480 }
2481
bnxt_set_force_speed(struct bnxt_link_info * link_info)2482 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2483 {
2484 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2485
2486 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2487 link_info->req_link_speed = link_info->force_link_speed2;
2488 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2489 switch (link_info->req_link_speed) {
2490 case BNXT_LINK_SPEED_50GB_PAM4:
2491 case BNXT_LINK_SPEED_100GB_PAM4:
2492 case BNXT_LINK_SPEED_200GB_PAM4:
2493 case BNXT_LINK_SPEED_400GB_PAM4:
2494 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2495 break;
2496 case BNXT_LINK_SPEED_100GB_PAM4_112:
2497 case BNXT_LINK_SPEED_200GB_PAM4_112:
2498 case BNXT_LINK_SPEED_400GB_PAM4_112:
2499 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2500 break;
2501 default:
2502 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2503 }
2504 return;
2505 }
2506 link_info->req_link_speed = link_info->force_link_speed;
2507 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2508 if (link_info->force_pam4_link_speed) {
2509 link_info->req_link_speed = link_info->force_pam4_link_speed;
2510 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2511 }
2512 }
2513
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2514 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2515 {
2516 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2517
2518 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2519 link_info->advertising = link_info->auto_link_speeds2;
2520 return;
2521 }
2522 link_info->advertising = link_info->auto_link_speeds;
2523 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2524 }
2525
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2526 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2527 {
2528 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2529
2530 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2531 if (link_info->req_link_speed != link_info->force_link_speed2)
2532 return true;
2533 return false;
2534 }
2535 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2536 link_info->req_link_speed != link_info->force_link_speed)
2537 return true;
2538 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2539 link_info->req_link_speed != link_info->force_pam4_link_speed)
2540 return true;
2541 return false;
2542 }
2543
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2544 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2545 {
2546 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2547
2548 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2549 if (link_info->advertising != link_info->auto_link_speeds2)
2550 return true;
2551 return false;
2552 }
2553 if (link_info->advertising != link_info->auto_link_speeds ||
2554 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2555 return true;
2556 return false;
2557 }
2558
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2559 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2560 {
2561 u32 flags = bp->ctx->ctx_arr[type].flags;
2562
2563 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2564 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2565 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2566 }
2567
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2568 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2569 {
2570 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2571 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2572 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2573 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2574 struct bnxt_bs_trace_info *bs_trace;
2575 int last_pg;
2576
2577 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2578 return;
2579
2580 mem_size = ctxm->max_entries * ctxm->entry_size;
2581 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2582 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2583
2584 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2585 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2586
2587 rmem = &ctx_pg[0].ring_mem;
2588 bs_trace = &bp->bs_trace[trace_type];
2589 bs_trace->ctx_type = ctxm->type;
2590 bs_trace->trace_type = trace_type;
2591 if (pages > MAX_CTX_PAGES) {
2592 int last_pg_dir = rmem->nr_pages - 1;
2593
2594 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2595 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2596 } else {
2597 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2598 }
2599 bs_trace->magic_byte += magic_byte_offset;
2600 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2601 }
2602
2603 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2604 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2605 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2606
2607 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2608 (((data2) & \
2609 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2610 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2611
2612 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2613 ((data2) & \
2614 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2615
2616 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2617 (((data2) & \
2618 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2619 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2620
2621 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2622 ((data1) & \
2623 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2624
2625 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2626 (((data1) & \
2627 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2628 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2629
2630 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2631 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2632 {
2633 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2634
2635 switch (err_type) {
2636 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2637 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2638 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2639 break;
2640 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2641 netdev_warn(bp->dev, "Pause Storm detected!\n");
2642 break;
2643 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2644 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2645 break;
2646 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2647 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2648 char *threshold_type;
2649 bool notify = false;
2650 char *dir_str;
2651
2652 switch (type) {
2653 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2654 threshold_type = "warning";
2655 break;
2656 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2657 threshold_type = "critical";
2658 break;
2659 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2660 threshold_type = "fatal";
2661 break;
2662 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2663 threshold_type = "shutdown";
2664 break;
2665 default:
2666 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2667 return false;
2668 }
2669 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2670 dir_str = "above";
2671 notify = true;
2672 } else {
2673 dir_str = "below";
2674 }
2675 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2676 dir_str, threshold_type);
2677 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2678 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2679 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2680 if (notify) {
2681 bp->thermal_threshold_type = type;
2682 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2683 return true;
2684 }
2685 return false;
2686 }
2687 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2688 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2689 break;
2690 default:
2691 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2692 err_type);
2693 break;
2694 }
2695 return false;
2696 }
2697
2698 #define BNXT_GET_EVENT_PORT(data) \
2699 ((data) & \
2700 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2701
2702 #define BNXT_EVENT_RING_TYPE(data2) \
2703 ((data2) & \
2704 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2705
2706 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2707 (BNXT_EVENT_RING_TYPE(data2) == \
2708 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2709
2710 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2711 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2712 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2713
2714 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2715 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2716 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2717
2718 #define BNXT_PHC_BITS 48
2719
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2720 static int bnxt_async_event_process(struct bnxt *bp,
2721 struct hwrm_async_event_cmpl *cmpl)
2722 {
2723 u16 event_id = le16_to_cpu(cmpl->event_id);
2724 u32 data1 = le32_to_cpu(cmpl->event_data1);
2725 u32 data2 = le32_to_cpu(cmpl->event_data2);
2726
2727 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2728 event_id, data1, data2);
2729
2730 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2731 switch (event_id) {
2732 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2733 struct bnxt_link_info *link_info = &bp->link_info;
2734
2735 if (BNXT_VF(bp))
2736 goto async_event_process_exit;
2737
2738 /* print unsupported speed warning in forced speed mode only */
2739 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2740 (data1 & 0x20000)) {
2741 u16 fw_speed = bnxt_get_force_speed(link_info);
2742 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2743
2744 if (speed != SPEED_UNKNOWN)
2745 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2746 speed);
2747 }
2748 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2749 }
2750 fallthrough;
2751 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2752 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2753 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2754 fallthrough;
2755 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2756 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2757 break;
2758 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2759 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2760 break;
2761 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2762 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2763
2764 if (BNXT_VF(bp))
2765 break;
2766
2767 if (bp->pf.port_id != port_id)
2768 break;
2769
2770 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2771 break;
2772 }
2773 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2774 if (BNXT_PF(bp))
2775 goto async_event_process_exit;
2776 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2777 break;
2778 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2779 char *type_str = "Solicited";
2780
2781 if (!bp->fw_health)
2782 goto async_event_process_exit;
2783
2784 bp->fw_reset_timestamp = jiffies;
2785 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2786 if (!bp->fw_reset_min_dsecs)
2787 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2788 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2789 if (!bp->fw_reset_max_dsecs)
2790 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2791 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2792 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2793 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2794 type_str = "Fatal";
2795 bp->fw_health->fatalities++;
2796 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2797 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2798 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2799 type_str = "Non-fatal";
2800 bp->fw_health->survivals++;
2801 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2802 }
2803 netif_warn(bp, hw, bp->dev,
2804 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2805 type_str, data1, data2,
2806 bp->fw_reset_min_dsecs * 100,
2807 bp->fw_reset_max_dsecs * 100);
2808 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2809 break;
2810 }
2811 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2812 struct bnxt_fw_health *fw_health = bp->fw_health;
2813 char *status_desc = "healthy";
2814 u32 status;
2815
2816 if (!fw_health)
2817 goto async_event_process_exit;
2818
2819 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2820 fw_health->enabled = false;
2821 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2822 break;
2823 }
2824 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2825 fw_health->tmr_multiplier =
2826 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2827 bp->current_interval * 10);
2828 fw_health->tmr_counter = fw_health->tmr_multiplier;
2829 if (!fw_health->enabled)
2830 fw_health->last_fw_heartbeat =
2831 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2832 fw_health->last_fw_reset_cnt =
2833 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2834 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2835 if (status != BNXT_FW_STATUS_HEALTHY)
2836 status_desc = "unhealthy";
2837 netif_info(bp, drv, bp->dev,
2838 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2839 fw_health->primary ? "primary" : "backup", status,
2840 status_desc, fw_health->last_fw_reset_cnt);
2841 if (!fw_health->enabled) {
2842 /* Make sure tmr_counter is set and visible to
2843 * bnxt_health_check() before setting enabled to true.
2844 */
2845 smp_wmb();
2846 fw_health->enabled = true;
2847 }
2848 goto async_event_process_exit;
2849 }
2850 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2851 netif_notice(bp, hw, bp->dev,
2852 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2853 data1, data2);
2854 goto async_event_process_exit;
2855 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2856 struct bnxt_rx_ring_info *rxr;
2857 u16 grp_idx;
2858
2859 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2860 goto async_event_process_exit;
2861
2862 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2863 BNXT_EVENT_RING_TYPE(data2), data1);
2864 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2865 goto async_event_process_exit;
2866
2867 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2868 if (grp_idx == INVALID_HW_RING_ID) {
2869 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2870 data1);
2871 goto async_event_process_exit;
2872 }
2873 rxr = bp->bnapi[grp_idx]->rx_ring;
2874 bnxt_sched_reset_rxr(bp, rxr);
2875 goto async_event_process_exit;
2876 }
2877 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2878 struct bnxt_fw_health *fw_health = bp->fw_health;
2879
2880 netif_notice(bp, hw, bp->dev,
2881 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2882 data1, data2);
2883 if (fw_health) {
2884 fw_health->echo_req_data1 = data1;
2885 fw_health->echo_req_data2 = data2;
2886 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2887 break;
2888 }
2889 goto async_event_process_exit;
2890 }
2891 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2892 bnxt_ptp_pps_event(bp, data1, data2);
2893 goto async_event_process_exit;
2894 }
2895 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2896 if (bnxt_event_error_report(bp, data1, data2))
2897 break;
2898 goto async_event_process_exit;
2899 }
2900 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2901 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2902 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2903 if (BNXT_PTP_USE_RTC(bp)) {
2904 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2905 unsigned long flags;
2906 u64 ns;
2907
2908 if (!ptp)
2909 goto async_event_process_exit;
2910
2911 bnxt_ptp_update_current_time(bp);
2912 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2913 BNXT_PHC_BITS) | ptp->current_time);
2914 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2915 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2916 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2917 }
2918 break;
2919 }
2920 goto async_event_process_exit;
2921 }
2922 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2923 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2924
2925 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2926 goto async_event_process_exit;
2927 }
2928 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2929 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2930 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2931
2932 if (type >= ARRAY_SIZE(bp->bs_trace))
2933 goto async_event_process_exit;
2934 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2935 goto async_event_process_exit;
2936 }
2937 default:
2938 goto async_event_process_exit;
2939 }
2940 __bnxt_queue_sp_work(bp);
2941 async_event_process_exit:
2942 bnxt_ulp_async_events(bp, cmpl);
2943 return 0;
2944 }
2945
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2946 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2947 {
2948 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2949 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2950 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2951 (struct hwrm_fwd_req_cmpl *)txcmp;
2952
2953 switch (cmpl_type) {
2954 case CMPL_BASE_TYPE_HWRM_DONE:
2955 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2956 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2957 break;
2958
2959 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2960 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2961
2962 if ((vf_id < bp->pf.first_vf_id) ||
2963 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2964 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2965 vf_id);
2966 return -EINVAL;
2967 }
2968
2969 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2970 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2971 break;
2972
2973 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2974 bnxt_async_event_process(bp,
2975 (struct hwrm_async_event_cmpl *)txcmp);
2976 break;
2977
2978 default:
2979 break;
2980 }
2981
2982 return 0;
2983 }
2984
bnxt_vnic_is_active(struct bnxt * bp)2985 static bool bnxt_vnic_is_active(struct bnxt *bp)
2986 {
2987 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2988
2989 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2990 }
2991
bnxt_msix(int irq,void * dev_instance)2992 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2993 {
2994 struct bnxt_napi *bnapi = dev_instance;
2995 struct bnxt *bp = bnapi->bp;
2996 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2997 u32 cons = RING_CMP(cpr->cp_raw_cons);
2998
2999 cpr->event_ctr++;
3000 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
3001 napi_schedule(&bnapi->napi);
3002 return IRQ_HANDLED;
3003 }
3004
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)3005 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3006 {
3007 u32 raw_cons = cpr->cp_raw_cons;
3008 u16 cons = RING_CMP(raw_cons);
3009 struct tx_cmp *txcmp;
3010
3011 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3012
3013 return TX_CMP_VALID(txcmp, raw_cons);
3014 }
3015
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3016 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3017 int budget)
3018 {
3019 struct bnxt_napi *bnapi = cpr->bnapi;
3020 u32 raw_cons = cpr->cp_raw_cons;
3021 bool flush_xdp = false;
3022 u32 cons;
3023 int rx_pkts = 0;
3024 u8 event = 0;
3025 struct tx_cmp *txcmp;
3026
3027 cpr->has_more_work = 0;
3028 cpr->had_work_done = 1;
3029 while (1) {
3030 u8 cmp_type;
3031 int rc;
3032
3033 cons = RING_CMP(raw_cons);
3034 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3035
3036 if (!TX_CMP_VALID(txcmp, raw_cons))
3037 break;
3038
3039 /* The valid test of the entry must be done first before
3040 * reading any further.
3041 */
3042 dma_rmb();
3043 cmp_type = TX_CMP_TYPE(txcmp);
3044 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3045 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3046 u32 opaque = txcmp->tx_cmp_opaque;
3047 struct bnxt_tx_ring_info *txr;
3048 u16 tx_freed;
3049
3050 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3051 event |= BNXT_TX_CMP_EVENT;
3052 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3053 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3054 else
3055 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3056 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3057 bp->tx_ring_mask;
3058 /* return full budget so NAPI will complete. */
3059 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3060 rx_pkts = budget;
3061 raw_cons = NEXT_RAW_CMP(raw_cons);
3062 if (budget)
3063 cpr->has_more_work = 1;
3064 break;
3065 }
3066 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3067 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3068 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3069 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3070 if (likely(budget))
3071 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3072 else
3073 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3074 &event);
3075 if (event & BNXT_REDIRECT_EVENT)
3076 flush_xdp = true;
3077 if (likely(rc >= 0))
3078 rx_pkts += rc;
3079 /* Increment rx_pkts when rc is -ENOMEM to count towards
3080 * the NAPI budget. Otherwise, we may potentially loop
3081 * here forever if we consistently cannot allocate
3082 * buffers.
3083 */
3084 else if (rc == -ENOMEM && budget)
3085 rx_pkts++;
3086 else if (rc == -EBUSY) /* partial completion */
3087 break;
3088 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3089 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3090 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3091 bnxt_hwrm_handler(bp, txcmp);
3092 }
3093 raw_cons = NEXT_RAW_CMP(raw_cons);
3094
3095 if (rx_pkts && rx_pkts == budget) {
3096 cpr->has_more_work = 1;
3097 break;
3098 }
3099 }
3100
3101 if (flush_xdp) {
3102 xdp_do_flush();
3103 event &= ~BNXT_REDIRECT_EVENT;
3104 }
3105
3106 if (event & BNXT_TX_EVENT) {
3107 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3108 u16 prod = txr->tx_prod;
3109
3110 /* Sync BD data before updating doorbell */
3111 wmb();
3112
3113 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3114 event &= ~BNXT_TX_EVENT;
3115 }
3116
3117 cpr->cp_raw_cons = raw_cons;
3118 bnapi->events |= event;
3119 return rx_pkts;
3120 }
3121
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3122 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3123 int budget)
3124 {
3125 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3126 bnapi->tx_int(bp, bnapi, budget);
3127
3128 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3129 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3130
3131 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3132 bnapi->events &= ~BNXT_RX_EVENT;
3133 }
3134 if (bnapi->events & BNXT_AGG_EVENT) {
3135 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3136
3137 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3138 bnapi->events &= ~BNXT_AGG_EVENT;
3139 }
3140 }
3141
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3142 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3143 int budget)
3144 {
3145 struct bnxt_napi *bnapi = cpr->bnapi;
3146 int rx_pkts;
3147
3148 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3149
3150 /* ACK completion ring before freeing tx ring and producing new
3151 * buffers in rx/agg rings to prevent overflowing the completion
3152 * ring.
3153 */
3154 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3155
3156 __bnxt_poll_work_done(bp, bnapi, budget);
3157 return rx_pkts;
3158 }
3159
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3160 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3161 {
3162 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3163 struct bnxt *bp = bnapi->bp;
3164 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3165 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3166 struct tx_cmp *txcmp;
3167 struct rx_cmp_ext *rxcmp1;
3168 u32 cp_cons, tmp_raw_cons;
3169 u32 raw_cons = cpr->cp_raw_cons;
3170 bool flush_xdp = false;
3171 u32 rx_pkts = 0;
3172 u8 event = 0;
3173
3174 while (1) {
3175 int rc;
3176
3177 cp_cons = RING_CMP(raw_cons);
3178 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3179
3180 if (!TX_CMP_VALID(txcmp, raw_cons))
3181 break;
3182
3183 /* The valid test of the entry must be done first before
3184 * reading any further.
3185 */
3186 dma_rmb();
3187 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3188 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3189 cp_cons = RING_CMP(tmp_raw_cons);
3190 rxcmp1 = (struct rx_cmp_ext *)
3191 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3192
3193 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3194 break;
3195
3196 /* force an error to recycle the buffer */
3197 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3198 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3199
3200 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3201 if (likely(rc == -EIO) && budget)
3202 rx_pkts++;
3203 else if (rc == -EBUSY) /* partial completion */
3204 break;
3205 if (event & BNXT_REDIRECT_EVENT)
3206 flush_xdp = true;
3207 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3208 CMPL_BASE_TYPE_HWRM_DONE)) {
3209 bnxt_hwrm_handler(bp, txcmp);
3210 } else {
3211 netdev_err(bp->dev,
3212 "Invalid completion received on special ring\n");
3213 }
3214 raw_cons = NEXT_RAW_CMP(raw_cons);
3215
3216 if (rx_pkts == budget)
3217 break;
3218 }
3219
3220 cpr->cp_raw_cons = raw_cons;
3221 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3222 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3223
3224 if (event & BNXT_AGG_EVENT)
3225 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3226 if (flush_xdp)
3227 xdp_do_flush();
3228
3229 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3230 napi_complete_done(napi, rx_pkts);
3231 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3232 }
3233 return rx_pkts;
3234 }
3235
bnxt_poll(struct napi_struct * napi,int budget)3236 static int bnxt_poll(struct napi_struct *napi, int budget)
3237 {
3238 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3239 struct bnxt *bp = bnapi->bp;
3240 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3241 int work_done = 0;
3242
3243 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3244 napi_complete(napi);
3245 return 0;
3246 }
3247 while (1) {
3248 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3249
3250 if (work_done >= budget) {
3251 if (!budget)
3252 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3253 break;
3254 }
3255
3256 if (!bnxt_has_work(bp, cpr)) {
3257 if (napi_complete_done(napi, work_done))
3258 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3259 break;
3260 }
3261 }
3262 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3263 struct dim_sample dim_sample = {};
3264
3265 dim_update_sample(cpr->event_ctr,
3266 cpr->rx_packets,
3267 cpr->rx_bytes,
3268 &dim_sample);
3269 net_dim(&cpr->dim, &dim_sample);
3270 }
3271 return work_done;
3272 }
3273
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3274 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3275 {
3276 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3277 int i, work_done = 0;
3278
3279 for (i = 0; i < cpr->cp_ring_count; i++) {
3280 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3281
3282 if (cpr2->had_nqe_notify) {
3283 work_done += __bnxt_poll_work(bp, cpr2,
3284 budget - work_done);
3285 cpr->has_more_work |= cpr2->has_more_work;
3286 }
3287 }
3288 return work_done;
3289 }
3290
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3291 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3292 u64 dbr_type, int budget)
3293 {
3294 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3295 int i;
3296
3297 for (i = 0; i < cpr->cp_ring_count; i++) {
3298 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3299 struct bnxt_db_info *db;
3300
3301 if (cpr2->had_work_done) {
3302 u32 tgl = 0;
3303
3304 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3305 cpr2->had_nqe_notify = 0;
3306 tgl = cpr2->toggle;
3307 }
3308 db = &cpr2->cp_db;
3309 bnxt_writeq(bp,
3310 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3311 DB_RING_IDX(db, cpr2->cp_raw_cons),
3312 db->doorbell);
3313 cpr2->had_work_done = 0;
3314 }
3315 }
3316 __bnxt_poll_work_done(bp, bnapi, budget);
3317 }
3318
bnxt_poll_p5(struct napi_struct * napi,int budget)3319 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3320 {
3321 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3322 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3323 struct bnxt_cp_ring_info *cpr_rx;
3324 u32 raw_cons = cpr->cp_raw_cons;
3325 struct bnxt *bp = bnapi->bp;
3326 struct nqe_cn *nqcmp;
3327 int work_done = 0;
3328 u32 cons;
3329
3330 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3331 napi_complete(napi);
3332 return 0;
3333 }
3334 if (cpr->has_more_work) {
3335 cpr->has_more_work = 0;
3336 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3337 }
3338 while (1) {
3339 u16 type;
3340
3341 cons = RING_CMP(raw_cons);
3342 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3343
3344 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3345 if (cpr->has_more_work)
3346 break;
3347
3348 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3349 budget);
3350 cpr->cp_raw_cons = raw_cons;
3351 if (napi_complete_done(napi, work_done))
3352 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3353 cpr->cp_raw_cons);
3354 goto poll_done;
3355 }
3356
3357 /* The valid test of the entry must be done first before
3358 * reading any further.
3359 */
3360 dma_rmb();
3361
3362 type = le16_to_cpu(nqcmp->type);
3363 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3364 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3365 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3366 struct bnxt_cp_ring_info *cpr2;
3367
3368 /* No more budget for RX work */
3369 if (budget && work_done >= budget &&
3370 cq_type == BNXT_NQ_HDL_TYPE_RX)
3371 break;
3372
3373 idx = BNXT_NQ_HDL_IDX(idx);
3374 cpr2 = &cpr->cp_ring_arr[idx];
3375 cpr2->had_nqe_notify = 1;
3376 cpr2->toggle = NQE_CN_TOGGLE(type);
3377 work_done += __bnxt_poll_work(bp, cpr2,
3378 budget - work_done);
3379 cpr->has_more_work |= cpr2->has_more_work;
3380 } else {
3381 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3382 }
3383 raw_cons = NEXT_RAW_CMP(raw_cons);
3384 }
3385 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3386 if (raw_cons != cpr->cp_raw_cons) {
3387 cpr->cp_raw_cons = raw_cons;
3388 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3389 }
3390 poll_done:
3391 cpr_rx = &cpr->cp_ring_arr[0];
3392 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3393 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3394 struct dim_sample dim_sample = {};
3395
3396 dim_update_sample(cpr->event_ctr,
3397 cpr_rx->rx_packets,
3398 cpr_rx->rx_bytes,
3399 &dim_sample);
3400 net_dim(&cpr->dim, &dim_sample);
3401 }
3402 return work_done;
3403 }
3404
bnxt_free_one_tx_ring_skbs(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int idx)3405 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3406 struct bnxt_tx_ring_info *txr, int idx)
3407 {
3408 int i, max_idx;
3409 struct pci_dev *pdev = bp->pdev;
3410
3411 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3412
3413 for (i = 0; i < max_idx;) {
3414 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3415 struct sk_buff *skb;
3416 int j, last;
3417
3418 if (idx < bp->tx_nr_rings_xdp &&
3419 tx_buf->action == XDP_REDIRECT) {
3420 dma_unmap_single(&pdev->dev,
3421 dma_unmap_addr(tx_buf, mapping),
3422 dma_unmap_len(tx_buf, len),
3423 DMA_TO_DEVICE);
3424 xdp_return_frame(tx_buf->xdpf);
3425 tx_buf->action = 0;
3426 tx_buf->xdpf = NULL;
3427 i++;
3428 continue;
3429 }
3430
3431 skb = tx_buf->skb;
3432 if (!skb) {
3433 i++;
3434 continue;
3435 }
3436
3437 tx_buf->skb = NULL;
3438
3439 if (tx_buf->is_push) {
3440 dev_kfree_skb(skb);
3441 i += 2;
3442 continue;
3443 }
3444
3445 dma_unmap_single(&pdev->dev,
3446 dma_unmap_addr(tx_buf, mapping),
3447 skb_headlen(skb),
3448 DMA_TO_DEVICE);
3449
3450 last = tx_buf->nr_frags;
3451 i += 2;
3452 for (j = 0; j < last; j++, i++) {
3453 int ring_idx = i & bp->tx_ring_mask;
3454 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3455
3456 tx_buf = &txr->tx_buf_ring[ring_idx];
3457 netmem_dma_unmap_page_attrs(&pdev->dev,
3458 dma_unmap_addr(tx_buf,
3459 mapping),
3460 skb_frag_size(frag),
3461 DMA_TO_DEVICE, 0);
3462 }
3463 dev_kfree_skb(skb);
3464 }
3465 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3466 }
3467
bnxt_free_tx_skbs(struct bnxt * bp)3468 static void bnxt_free_tx_skbs(struct bnxt *bp)
3469 {
3470 int i;
3471
3472 if (!bp->tx_ring)
3473 return;
3474
3475 for (i = 0; i < bp->tx_nr_rings; i++) {
3476 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3477
3478 if (!txr->tx_buf_ring)
3479 continue;
3480
3481 bnxt_free_one_tx_ring_skbs(bp, txr, i);
3482 }
3483
3484 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3485 bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3486 }
3487
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3488 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3489 {
3490 int i, max_idx;
3491
3492 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3493
3494 for (i = 0; i < max_idx; i++) {
3495 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3496 void *data = rx_buf->data;
3497
3498 if (!data)
3499 continue;
3500
3501 rx_buf->data = NULL;
3502 if (BNXT_RX_PAGE_MODE(bp))
3503 page_pool_recycle_direct(rxr->page_pool, data);
3504 else
3505 page_pool_free_va(rxr->head_pool, data, true);
3506 }
3507 }
3508
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3509 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3510 {
3511 int i, max_idx;
3512
3513 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3514
3515 for (i = 0; i < max_idx; i++) {
3516 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3517 netmem_ref netmem = rx_agg_buf->netmem;
3518
3519 if (!netmem)
3520 continue;
3521
3522 rx_agg_buf->netmem = 0;
3523 __clear_bit(i, rxr->rx_agg_bmap);
3524
3525 page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3526 }
3527 }
3528
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3529 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3530 struct bnxt_rx_ring_info *rxr)
3531 {
3532 int i;
3533
3534 for (i = 0; i < bp->max_tpa; i++) {
3535 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3536 u8 *data = tpa_info->data;
3537
3538 if (!data)
3539 continue;
3540
3541 tpa_info->data = NULL;
3542 page_pool_free_va(rxr->head_pool, data, false);
3543 }
3544 }
3545
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3546 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3547 struct bnxt_rx_ring_info *rxr)
3548 {
3549 struct bnxt_tpa_idx_map *map;
3550
3551 if (!rxr->rx_tpa)
3552 goto skip_rx_tpa_free;
3553
3554 bnxt_free_one_tpa_info_data(bp, rxr);
3555
3556 skip_rx_tpa_free:
3557 if (!rxr->rx_buf_ring)
3558 goto skip_rx_buf_free;
3559
3560 bnxt_free_one_rx_ring(bp, rxr);
3561
3562 skip_rx_buf_free:
3563 if (!rxr->rx_agg_ring)
3564 goto skip_rx_agg_free;
3565
3566 bnxt_free_one_rx_agg_ring(bp, rxr);
3567
3568 skip_rx_agg_free:
3569 map = rxr->rx_tpa_idx_map;
3570 if (map)
3571 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3572 }
3573
bnxt_free_rx_skbs(struct bnxt * bp)3574 static void bnxt_free_rx_skbs(struct bnxt *bp)
3575 {
3576 int i;
3577
3578 if (!bp->rx_ring)
3579 return;
3580
3581 for (i = 0; i < bp->rx_nr_rings; i++)
3582 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3583 }
3584
bnxt_free_skbs(struct bnxt * bp)3585 static void bnxt_free_skbs(struct bnxt *bp)
3586 {
3587 bnxt_free_tx_skbs(bp);
3588 bnxt_free_rx_skbs(bp);
3589 }
3590
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3591 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3592 {
3593 u8 init_val = ctxm->init_value;
3594 u16 offset = ctxm->init_offset;
3595 u8 *p2 = p;
3596 int i;
3597
3598 if (!init_val)
3599 return;
3600 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3601 memset(p, init_val, len);
3602 return;
3603 }
3604 for (i = 0; i < len; i += ctxm->entry_size)
3605 *(p2 + i + offset) = init_val;
3606 }
3607
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3608 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3609 void *buf, size_t offset, size_t head,
3610 size_t tail)
3611 {
3612 int i, head_page, start_idx, source_offset;
3613 size_t len, rem_len, total_len, max_bytes;
3614
3615 head_page = head / rmem->page_size;
3616 source_offset = head % rmem->page_size;
3617 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3618 if (!total_len)
3619 total_len = MAX_CTX_BYTES;
3620 start_idx = head_page % MAX_CTX_PAGES;
3621 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3622 source_offset;
3623 total_len = min(total_len, max_bytes);
3624 rem_len = total_len;
3625
3626 for (i = start_idx; rem_len; i++, source_offset = 0) {
3627 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3628 if (buf)
3629 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3630 len);
3631 offset += len;
3632 rem_len -= len;
3633 }
3634 return total_len;
3635 }
3636
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3637 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3638 {
3639 struct pci_dev *pdev = bp->pdev;
3640 int i;
3641
3642 if (!rmem->pg_arr)
3643 goto skip_pages;
3644
3645 for (i = 0; i < rmem->nr_pages; i++) {
3646 if (!rmem->pg_arr[i])
3647 continue;
3648
3649 dma_free_coherent(&pdev->dev, rmem->page_size,
3650 rmem->pg_arr[i], rmem->dma_arr[i]);
3651
3652 rmem->pg_arr[i] = NULL;
3653 }
3654 skip_pages:
3655 if (rmem->pg_tbl) {
3656 size_t pg_tbl_size = rmem->nr_pages * 8;
3657
3658 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3659 pg_tbl_size = rmem->page_size;
3660 dma_free_coherent(&pdev->dev, pg_tbl_size,
3661 rmem->pg_tbl, rmem->pg_tbl_map);
3662 rmem->pg_tbl = NULL;
3663 }
3664 if (rmem->vmem_size && *rmem->vmem) {
3665 vfree(*rmem->vmem);
3666 *rmem->vmem = NULL;
3667 }
3668 }
3669
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3670 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3671 {
3672 struct pci_dev *pdev = bp->pdev;
3673 u64 valid_bit = 0;
3674 int i;
3675
3676 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3677 valid_bit = PTU_PTE_VALID;
3678 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3679 size_t pg_tbl_size = rmem->nr_pages * 8;
3680
3681 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3682 pg_tbl_size = rmem->page_size;
3683 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3684 &rmem->pg_tbl_map,
3685 GFP_KERNEL);
3686 if (!rmem->pg_tbl)
3687 return -ENOMEM;
3688 }
3689
3690 for (i = 0; i < rmem->nr_pages; i++) {
3691 u64 extra_bits = valid_bit;
3692
3693 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3694 rmem->page_size,
3695 &rmem->dma_arr[i],
3696 GFP_KERNEL);
3697 if (!rmem->pg_arr[i])
3698 return -ENOMEM;
3699
3700 if (rmem->ctx_mem)
3701 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3702 rmem->page_size);
3703 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3704 if (i == rmem->nr_pages - 2 &&
3705 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3706 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3707 else if (i == rmem->nr_pages - 1 &&
3708 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3709 extra_bits |= PTU_PTE_LAST;
3710 rmem->pg_tbl[i] =
3711 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3712 }
3713 }
3714
3715 if (rmem->vmem_size) {
3716 *rmem->vmem = vzalloc(rmem->vmem_size);
3717 if (!(*rmem->vmem))
3718 return -ENOMEM;
3719 }
3720 return 0;
3721 }
3722
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3723 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3724 struct bnxt_rx_ring_info *rxr)
3725 {
3726 int i;
3727
3728 kfree(rxr->rx_tpa_idx_map);
3729 rxr->rx_tpa_idx_map = NULL;
3730 if (rxr->rx_tpa) {
3731 for (i = 0; i < bp->max_tpa; i++) {
3732 kfree(rxr->rx_tpa[i].agg_arr);
3733 rxr->rx_tpa[i].agg_arr = NULL;
3734 }
3735 }
3736 kfree(rxr->rx_tpa);
3737 rxr->rx_tpa = NULL;
3738 }
3739
bnxt_free_tpa_info(struct bnxt * bp)3740 static void bnxt_free_tpa_info(struct bnxt *bp)
3741 {
3742 int i;
3743
3744 for (i = 0; i < bp->rx_nr_rings; i++) {
3745 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3746
3747 bnxt_free_one_tpa_info(bp, rxr);
3748 }
3749 }
3750
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3751 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3752 struct bnxt_rx_ring_info *rxr)
3753 {
3754 struct rx_agg_cmp *agg;
3755 int i;
3756
3757 rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa);
3758 if (!rxr->rx_tpa)
3759 return -ENOMEM;
3760
3761 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3762 return 0;
3763 for (i = 0; i < bp->max_tpa; i++) {
3764 agg = kzalloc_objs(*agg, MAX_SKB_FRAGS);
3765 if (!agg)
3766 return -ENOMEM;
3767 rxr->rx_tpa[i].agg_arr = agg;
3768 }
3769 rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map);
3770 if (!rxr->rx_tpa_idx_map)
3771 return -ENOMEM;
3772
3773 return 0;
3774 }
3775
bnxt_alloc_tpa_info(struct bnxt * bp)3776 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3777 {
3778 int i, rc;
3779
3780 bp->max_tpa = MAX_TPA;
3781 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3782 if (!bp->max_tpa_v2)
3783 return 0;
3784 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3785 }
3786
3787 for (i = 0; i < bp->rx_nr_rings; i++) {
3788 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3789
3790 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3791 if (rc)
3792 return rc;
3793 }
3794 return 0;
3795 }
3796
bnxt_free_rx_rings(struct bnxt * bp)3797 static void bnxt_free_rx_rings(struct bnxt *bp)
3798 {
3799 int i;
3800
3801 if (!bp->rx_ring)
3802 return;
3803
3804 bnxt_free_tpa_info(bp);
3805 for (i = 0; i < bp->rx_nr_rings; i++) {
3806 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3807 struct bnxt_ring_struct *ring;
3808
3809 if (rxr->xdp_prog)
3810 bpf_prog_put(rxr->xdp_prog);
3811
3812 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3813 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3814
3815 page_pool_destroy(rxr->page_pool);
3816 page_pool_destroy(rxr->head_pool);
3817 rxr->page_pool = rxr->head_pool = NULL;
3818
3819 kfree(rxr->rx_agg_bmap);
3820 rxr->rx_agg_bmap = NULL;
3821
3822 ring = &rxr->rx_ring_struct;
3823 bnxt_free_ring(bp, &ring->ring_mem);
3824
3825 ring = &rxr->rx_agg_ring_struct;
3826 bnxt_free_ring(bp, &ring->ring_mem);
3827 }
3828 }
3829
bnxt_rx_agg_ring_fill_level(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3830 static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
3831 struct bnxt_rx_ring_info *rxr)
3832 {
3833 /* User may have chosen larger than default rx_page_size,
3834 * we keep the ring sizes uniform and also want uniform amount
3835 * of bytes consumed per ring, so cap how much of the rings we fill.
3836 */
3837 int fill_level = bp->rx_agg_ring_size;
3838
3839 if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
3840 fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3841
3842 return fill_level;
3843 }
3844
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3845 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3846 struct bnxt_rx_ring_info *rxr,
3847 int numa_node)
3848 {
3849 unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3850 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3851 struct page_pool_params pp = { 0 };
3852 struct page_pool *pool;
3853
3854 pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
3855 if (BNXT_RX_PAGE_MODE(bp))
3856 pp.pool_size += bp->rx_ring_size / rx_size_fac;
3857
3858 pp.order = get_order(rxr->rx_page_size);
3859 pp.nid = numa_node;
3860 pp.netdev = bp->dev;
3861 pp.dev = &bp->pdev->dev;
3862 pp.dma_dir = bp->rx_dir;
3863 pp.max_len = PAGE_SIZE << pp.order;
3864 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3865 PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3866 pp.queue_idx = rxr->bnapi->index;
3867
3868 pool = page_pool_create(&pp);
3869 if (IS_ERR(pool))
3870 return PTR_ERR(pool);
3871 rxr->page_pool = pool;
3872
3873 rxr->need_head_pool = page_pool_is_unreadable(pool);
3874 rxr->need_head_pool |= !!pp.order;
3875 if (bnxt_separate_head_pool(rxr)) {
3876 pp.order = 0;
3877 pp.max_len = PAGE_SIZE;
3878 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3879 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3880 pool = page_pool_create(&pp);
3881 if (IS_ERR(pool))
3882 goto err_destroy_pp;
3883 } else {
3884 page_pool_get(pool);
3885 }
3886 rxr->head_pool = pool;
3887
3888 return 0;
3889
3890 err_destroy_pp:
3891 page_pool_destroy(rxr->page_pool);
3892 rxr->page_pool = NULL;
3893 return PTR_ERR(pool);
3894 }
3895
bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info * rxr)3896 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3897 {
3898 page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3899 page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3900 }
3901
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3902 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3903 {
3904 u16 mem_size;
3905
3906 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3907 mem_size = rxr->rx_agg_bmap_size / 8;
3908 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3909 if (!rxr->rx_agg_bmap)
3910 return -ENOMEM;
3911
3912 return 0;
3913 }
3914
bnxt_alloc_rx_rings(struct bnxt * bp)3915 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3916 {
3917 int numa_node = dev_to_node(&bp->pdev->dev);
3918 int i, rc = 0, agg_rings = 0, cpu;
3919
3920 if (!bp->rx_ring)
3921 return -ENOMEM;
3922
3923 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3924 agg_rings = 1;
3925
3926 for (i = 0; i < bp->rx_nr_rings; i++) {
3927 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3928 struct bnxt_ring_struct *ring;
3929 int cpu_node;
3930
3931 ring = &rxr->rx_ring_struct;
3932
3933 cpu = cpumask_local_spread(i, numa_node);
3934 cpu_node = cpu_to_node(cpu);
3935 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3936 i, cpu_node);
3937 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3938 if (rc)
3939 return rc;
3940 bnxt_enable_rx_page_pool(rxr);
3941
3942 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3943 if (rc < 0)
3944 return rc;
3945
3946 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3947 MEM_TYPE_PAGE_POOL,
3948 rxr->page_pool);
3949 if (rc) {
3950 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3951 return rc;
3952 }
3953
3954 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3955 if (rc)
3956 return rc;
3957
3958 ring->grp_idx = i;
3959 if (agg_rings) {
3960 ring = &rxr->rx_agg_ring_struct;
3961 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3962 if (rc)
3963 return rc;
3964
3965 ring->grp_idx = i;
3966 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3967 if (rc)
3968 return rc;
3969 }
3970 }
3971 if (bp->flags & BNXT_FLAG_TPA)
3972 rc = bnxt_alloc_tpa_info(bp);
3973 return rc;
3974 }
3975
bnxt_free_tx_rings(struct bnxt * bp)3976 static void bnxt_free_tx_rings(struct bnxt *bp)
3977 {
3978 int i;
3979 struct pci_dev *pdev = bp->pdev;
3980
3981 if (!bp->tx_ring)
3982 return;
3983
3984 for (i = 0; i < bp->tx_nr_rings; i++) {
3985 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3986 struct bnxt_ring_struct *ring;
3987
3988 if (txr->tx_push) {
3989 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3990 txr->tx_push, txr->tx_push_mapping);
3991 txr->tx_push = NULL;
3992 }
3993
3994 ring = &txr->tx_ring_struct;
3995
3996 bnxt_free_ring(bp, &ring->ring_mem);
3997 }
3998 }
3999
4000 #define BNXT_TC_TO_RING_BASE(bp, tc) \
4001 ((tc) * (bp)->tx_nr_rings_per_tc)
4002
4003 #define BNXT_RING_TO_TC_OFF(bp, tx) \
4004 ((tx) % (bp)->tx_nr_rings_per_tc)
4005
4006 #define BNXT_RING_TO_TC(bp, tx) \
4007 ((tx) / (bp)->tx_nr_rings_per_tc)
4008
bnxt_alloc_tx_rings(struct bnxt * bp)4009 static int bnxt_alloc_tx_rings(struct bnxt *bp)
4010 {
4011 int i, j, rc;
4012 struct pci_dev *pdev = bp->pdev;
4013
4014 bp->tx_push_size = 0;
4015 if (bp->tx_push_thresh) {
4016 int push_size;
4017
4018 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
4019 bp->tx_push_thresh);
4020
4021 if (push_size > 256) {
4022 push_size = 0;
4023 bp->tx_push_thresh = 0;
4024 }
4025
4026 bp->tx_push_size = push_size;
4027 }
4028
4029 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
4030 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4031 struct bnxt_ring_struct *ring;
4032 u8 qidx;
4033
4034 ring = &txr->tx_ring_struct;
4035
4036 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4037 if (rc)
4038 return rc;
4039
4040 ring->grp_idx = txr->bnapi->index;
4041 if (bp->tx_push_size) {
4042 dma_addr_t mapping;
4043
4044 /* One pre-allocated DMA buffer to backup
4045 * TX push operation
4046 */
4047 txr->tx_push = dma_alloc_coherent(&pdev->dev,
4048 bp->tx_push_size,
4049 &txr->tx_push_mapping,
4050 GFP_KERNEL);
4051
4052 if (!txr->tx_push)
4053 return -ENOMEM;
4054
4055 mapping = txr->tx_push_mapping +
4056 sizeof(struct tx_push_bd);
4057 txr->data_mapping = cpu_to_le64(mapping);
4058 }
4059 qidx = bp->tc_to_qidx[j];
4060 ring->queue_id = bp->q_info[qidx].queue_id;
4061 spin_lock_init(&txr->xdp_tx_lock);
4062 if (i < bp->tx_nr_rings_xdp)
4063 continue;
4064 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4065 j++;
4066 }
4067 return 0;
4068 }
4069
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)4070 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4071 {
4072 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4073
4074 kfree(cpr->cp_desc_ring);
4075 cpr->cp_desc_ring = NULL;
4076 ring->ring_mem.pg_arr = NULL;
4077 kfree(cpr->cp_desc_mapping);
4078 cpr->cp_desc_mapping = NULL;
4079 ring->ring_mem.dma_arr = NULL;
4080 }
4081
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)4082 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4083 {
4084 cpr->cp_desc_ring = kzalloc_objs(*cpr->cp_desc_ring, n);
4085 if (!cpr->cp_desc_ring)
4086 return -ENOMEM;
4087 cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n);
4088 if (!cpr->cp_desc_mapping)
4089 return -ENOMEM;
4090 return 0;
4091 }
4092
bnxt_free_all_cp_arrays(struct bnxt * bp)4093 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4094 {
4095 int i;
4096
4097 if (!bp->bnapi)
4098 return;
4099 for (i = 0; i < bp->cp_nr_rings; i++) {
4100 struct bnxt_napi *bnapi = bp->bnapi[i];
4101
4102 if (!bnapi)
4103 continue;
4104 bnxt_free_cp_arrays(&bnapi->cp_ring);
4105 }
4106 }
4107
bnxt_alloc_all_cp_arrays(struct bnxt * bp)4108 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4109 {
4110 int i, n = bp->cp_nr_pages;
4111
4112 for (i = 0; i < bp->cp_nr_rings; i++) {
4113 struct bnxt_napi *bnapi = bp->bnapi[i];
4114 int rc;
4115
4116 if (!bnapi)
4117 continue;
4118 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4119 if (rc)
4120 return rc;
4121 }
4122 return 0;
4123 }
4124
bnxt_free_cp_rings(struct bnxt * bp)4125 static void bnxt_free_cp_rings(struct bnxt *bp)
4126 {
4127 int i;
4128
4129 if (!bp->bnapi)
4130 return;
4131
4132 for (i = 0; i < bp->cp_nr_rings; i++) {
4133 struct bnxt_napi *bnapi = bp->bnapi[i];
4134 struct bnxt_cp_ring_info *cpr;
4135 struct bnxt_ring_struct *ring;
4136 int j;
4137
4138 if (!bnapi)
4139 continue;
4140
4141 cpr = &bnapi->cp_ring;
4142 ring = &cpr->cp_ring_struct;
4143
4144 bnxt_free_ring(bp, &ring->ring_mem);
4145
4146 if (!cpr->cp_ring_arr)
4147 continue;
4148
4149 for (j = 0; j < cpr->cp_ring_count; j++) {
4150 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4151
4152 ring = &cpr2->cp_ring_struct;
4153 bnxt_free_ring(bp, &ring->ring_mem);
4154 bnxt_free_cp_arrays(cpr2);
4155 }
4156 kfree(cpr->cp_ring_arr);
4157 cpr->cp_ring_arr = NULL;
4158 cpr->cp_ring_count = 0;
4159 }
4160 }
4161
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4162 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4163 struct bnxt_cp_ring_info *cpr)
4164 {
4165 struct bnxt_ring_mem_info *rmem;
4166 struct bnxt_ring_struct *ring;
4167 int rc;
4168
4169 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4170 if (rc) {
4171 bnxt_free_cp_arrays(cpr);
4172 return -ENOMEM;
4173 }
4174 ring = &cpr->cp_ring_struct;
4175 rmem = &ring->ring_mem;
4176 rmem->nr_pages = bp->cp_nr_pages;
4177 rmem->page_size = HW_CMPD_RING_SIZE;
4178 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4179 rmem->dma_arr = cpr->cp_desc_mapping;
4180 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4181 rc = bnxt_alloc_ring(bp, rmem);
4182 if (rc) {
4183 bnxt_free_ring(bp, rmem);
4184 bnxt_free_cp_arrays(cpr);
4185 }
4186 return rc;
4187 }
4188
bnxt_alloc_cp_rings(struct bnxt * bp)4189 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4190 {
4191 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4192 int i, j, rc, ulp_msix;
4193 int tcs = bp->num_tc;
4194
4195 if (!tcs)
4196 tcs = 1;
4197 ulp_msix = bnxt_get_ulp_msix_num(bp);
4198 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4199 struct bnxt_napi *bnapi = bp->bnapi[i];
4200 struct bnxt_cp_ring_info *cpr, *cpr2;
4201 struct bnxt_ring_struct *ring;
4202 int cp_count = 0, k;
4203 int rx = 0, tx = 0;
4204
4205 if (!bnapi)
4206 continue;
4207
4208 cpr = &bnapi->cp_ring;
4209 cpr->bnapi = bnapi;
4210 ring = &cpr->cp_ring_struct;
4211
4212 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4213 if (rc)
4214 return rc;
4215
4216 ring->map_idx = ulp_msix + i;
4217
4218 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4219 continue;
4220
4221 if (i < bp->rx_nr_rings) {
4222 cp_count++;
4223 rx = 1;
4224 }
4225 if (i < bp->tx_nr_rings_xdp) {
4226 cp_count++;
4227 tx = 1;
4228 } else if ((sh && i < bp->tx_nr_rings) ||
4229 (!sh && i >= bp->rx_nr_rings)) {
4230 cp_count += tcs;
4231 tx = 1;
4232 }
4233
4234 cpr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
4235 if (!cpr->cp_ring_arr)
4236 return -ENOMEM;
4237 cpr->cp_ring_count = cp_count;
4238
4239 for (k = 0; k < cp_count; k++) {
4240 cpr2 = &cpr->cp_ring_arr[k];
4241 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4242 if (rc)
4243 return rc;
4244 cpr2->bnapi = bnapi;
4245 cpr2->sw_stats = cpr->sw_stats;
4246 cpr2->cp_idx = k;
4247 if (!k && rx) {
4248 bp->rx_ring[i].rx_cpr = cpr2;
4249 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4250 } else {
4251 int n, tc = k - rx;
4252
4253 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4254 bp->tx_ring[n].tx_cpr = cpr2;
4255 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4256 }
4257 }
4258 if (tx)
4259 j++;
4260 }
4261 return 0;
4262 }
4263
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4264 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4265 struct bnxt_rx_ring_info *rxr)
4266 {
4267 struct bnxt_ring_mem_info *rmem;
4268 struct bnxt_ring_struct *ring;
4269
4270 ring = &rxr->rx_ring_struct;
4271 rmem = &ring->ring_mem;
4272 rmem->nr_pages = bp->rx_nr_pages;
4273 rmem->page_size = HW_RXBD_RING_SIZE;
4274 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4275 rmem->dma_arr = rxr->rx_desc_mapping;
4276 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4277 rmem->vmem = (void **)&rxr->rx_buf_ring;
4278
4279 ring = &rxr->rx_agg_ring_struct;
4280 rmem = &ring->ring_mem;
4281 rmem->nr_pages = bp->rx_agg_nr_pages;
4282 rmem->page_size = HW_RXBD_RING_SIZE;
4283 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4284 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4285 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4286 rmem->vmem = (void **)&rxr->rx_agg_ring;
4287 }
4288
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4289 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4290 struct bnxt_rx_ring_info *rxr)
4291 {
4292 struct bnxt_ring_mem_info *rmem;
4293 struct bnxt_ring_struct *ring;
4294 int i;
4295
4296 rxr->page_pool->p.napi = NULL;
4297 rxr->page_pool = NULL;
4298 rxr->head_pool->p.napi = NULL;
4299 rxr->head_pool = NULL;
4300 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4301
4302 ring = &rxr->rx_ring_struct;
4303 rmem = &ring->ring_mem;
4304 rmem->pg_tbl = NULL;
4305 rmem->pg_tbl_map = 0;
4306 for (i = 0; i < rmem->nr_pages; i++) {
4307 rmem->pg_arr[i] = NULL;
4308 rmem->dma_arr[i] = 0;
4309 }
4310 *rmem->vmem = NULL;
4311
4312 ring = &rxr->rx_agg_ring_struct;
4313 rmem = &ring->ring_mem;
4314 rmem->pg_tbl = NULL;
4315 rmem->pg_tbl_map = 0;
4316 for (i = 0; i < rmem->nr_pages; i++) {
4317 rmem->pg_arr[i] = NULL;
4318 rmem->dma_arr[i] = 0;
4319 }
4320 *rmem->vmem = NULL;
4321 }
4322
bnxt_init_ring_struct(struct bnxt * bp)4323 static void bnxt_init_ring_struct(struct bnxt *bp)
4324 {
4325 int i, j;
4326
4327 for (i = 0; i < bp->cp_nr_rings; i++) {
4328 struct bnxt_napi *bnapi = bp->bnapi[i];
4329 struct netdev_queue_config qcfg;
4330 struct bnxt_ring_mem_info *rmem;
4331 struct bnxt_cp_ring_info *cpr;
4332 struct bnxt_rx_ring_info *rxr;
4333 struct bnxt_tx_ring_info *txr;
4334 struct bnxt_ring_struct *ring;
4335
4336 if (!bnapi)
4337 continue;
4338
4339 cpr = &bnapi->cp_ring;
4340 ring = &cpr->cp_ring_struct;
4341 rmem = &ring->ring_mem;
4342 rmem->nr_pages = bp->cp_nr_pages;
4343 rmem->page_size = HW_CMPD_RING_SIZE;
4344 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4345 rmem->dma_arr = cpr->cp_desc_mapping;
4346 rmem->vmem_size = 0;
4347
4348 rxr = bnapi->rx_ring;
4349 if (!rxr)
4350 goto skip_rx;
4351
4352 netdev_queue_config(bp->dev, i, &qcfg);
4353 rxr->rx_page_size = qcfg.rx_page_size;
4354
4355 ring = &rxr->rx_ring_struct;
4356 rmem = &ring->ring_mem;
4357 rmem->nr_pages = bp->rx_nr_pages;
4358 rmem->page_size = HW_RXBD_RING_SIZE;
4359 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4360 rmem->dma_arr = rxr->rx_desc_mapping;
4361 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4362 rmem->vmem = (void **)&rxr->rx_buf_ring;
4363
4364 ring = &rxr->rx_agg_ring_struct;
4365 rmem = &ring->ring_mem;
4366 rmem->nr_pages = bp->rx_agg_nr_pages;
4367 rmem->page_size = HW_RXBD_RING_SIZE;
4368 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4369 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4370 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4371 rmem->vmem = (void **)&rxr->rx_agg_ring;
4372
4373 skip_rx:
4374 bnxt_for_each_napi_tx(j, bnapi, txr) {
4375 ring = &txr->tx_ring_struct;
4376 rmem = &ring->ring_mem;
4377 rmem->nr_pages = bp->tx_nr_pages;
4378 rmem->page_size = HW_TXBD_RING_SIZE;
4379 rmem->pg_arr = (void **)txr->tx_desc_ring;
4380 rmem->dma_arr = txr->tx_desc_mapping;
4381 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4382 rmem->vmem = (void **)&txr->tx_buf_ring;
4383 }
4384 }
4385 }
4386
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4387 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4388 {
4389 int i;
4390 u32 prod;
4391 struct rx_bd **rx_buf_ring;
4392
4393 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4394 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4395 int j;
4396 struct rx_bd *rxbd;
4397
4398 rxbd = rx_buf_ring[i];
4399 if (!rxbd)
4400 continue;
4401
4402 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4403 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4404 rxbd->rx_bd_opaque = prod;
4405 }
4406 }
4407 }
4408
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4409 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4410 struct bnxt_rx_ring_info *rxr,
4411 int ring_nr)
4412 {
4413 u32 prod;
4414 int i;
4415
4416 prod = rxr->rx_prod;
4417 for (i = 0; i < bp->rx_ring_size; i++) {
4418 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4419 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4420 ring_nr, i, bp->rx_ring_size);
4421 break;
4422 }
4423 prod = NEXT_RX(prod);
4424 }
4425 rxr->rx_prod = prod;
4426 }
4427
bnxt_alloc_one_rx_ring_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4428 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4429 struct bnxt_rx_ring_info *rxr,
4430 int ring_nr)
4431 {
4432 int fill_level, i;
4433 u32 prod;
4434
4435 fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
4436
4437 prod = rxr->rx_agg_prod;
4438 for (i = 0; i < fill_level; i++) {
4439 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4440 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4441 ring_nr, i, bp->rx_agg_ring_size);
4442 break;
4443 }
4444 prod = NEXT_RX_AGG(prod);
4445 }
4446 rxr->rx_agg_prod = prod;
4447 }
4448
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4449 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4450 struct bnxt_rx_ring_info *rxr)
4451 {
4452 dma_addr_t mapping;
4453 u8 *data;
4454 int i;
4455
4456 for (i = 0; i < bp->max_tpa; i++) {
4457 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4458 GFP_KERNEL);
4459 if (!data)
4460 return -ENOMEM;
4461
4462 rxr->rx_tpa[i].data = data;
4463 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4464 rxr->rx_tpa[i].mapping = mapping;
4465 }
4466
4467 return 0;
4468 }
4469
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4470 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4471 {
4472 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4473 int rc;
4474
4475 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4476
4477 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4478 return 0;
4479
4480 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4481
4482 if (rxr->rx_tpa) {
4483 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4484 if (rc)
4485 return rc;
4486 }
4487 return 0;
4488 }
4489
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4490 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4491 struct bnxt_rx_ring_info *rxr)
4492 {
4493 struct bnxt_ring_struct *ring;
4494 u32 type;
4495
4496 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4497 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4498
4499 if (NET_IP_ALIGN == 2)
4500 type |= RX_BD_FLAGS_SOP;
4501
4502 ring = &rxr->rx_ring_struct;
4503 bnxt_init_rxbd_pages(ring, type);
4504 ring->fw_ring_id = INVALID_HW_RING_ID;
4505 }
4506
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4507 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4508 struct bnxt_rx_ring_info *rxr)
4509 {
4510 struct bnxt_ring_struct *ring;
4511 u32 type;
4512
4513 ring = &rxr->rx_agg_ring_struct;
4514 ring->fw_ring_id = INVALID_HW_RING_ID;
4515 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4516 type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
4517 RX_BD_TYPE_RX_AGG_BD;
4518
4519 /* On P7, setting EOP will cause the chip to disable
4520 * Relaxed Ordering (RO) for TPA data. Disable EOP for
4521 * potentially higher performance with RO.
4522 */
4523 if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
4524 type |= RX_BD_FLAGS_AGG_EOP;
4525
4526 bnxt_init_rxbd_pages(ring, type);
4527 }
4528 }
4529
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4530 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4531 {
4532 struct bnxt_rx_ring_info *rxr;
4533
4534 rxr = &bp->rx_ring[ring_nr];
4535 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4536
4537 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4538 &rxr->bnapi->napi);
4539
4540 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4541 bpf_prog_add(bp->xdp_prog, 1);
4542 rxr->xdp_prog = bp->xdp_prog;
4543 }
4544
4545 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4546
4547 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4548 }
4549
bnxt_init_cp_rings(struct bnxt * bp)4550 static void bnxt_init_cp_rings(struct bnxt *bp)
4551 {
4552 int i, j;
4553
4554 for (i = 0; i < bp->cp_nr_rings; i++) {
4555 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4556 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4557
4558 ring->fw_ring_id = INVALID_HW_RING_ID;
4559 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4560 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4561 if (!cpr->cp_ring_arr)
4562 continue;
4563 for (j = 0; j < cpr->cp_ring_count; j++) {
4564 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4565
4566 ring = &cpr2->cp_ring_struct;
4567 ring->fw_ring_id = INVALID_HW_RING_ID;
4568 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4569 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4570 }
4571 }
4572 }
4573
bnxt_init_rx_rings(struct bnxt * bp)4574 static int bnxt_init_rx_rings(struct bnxt *bp)
4575 {
4576 int i, rc = 0;
4577
4578 if (BNXT_RX_PAGE_MODE(bp)) {
4579 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4580 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4581 } else {
4582 bp->rx_offset = BNXT_RX_OFFSET;
4583 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4584 }
4585
4586 for (i = 0; i < bp->rx_nr_rings; i++) {
4587 rc = bnxt_init_one_rx_ring(bp, i);
4588 if (rc)
4589 break;
4590 }
4591
4592 return rc;
4593 }
4594
bnxt_init_tx_rings(struct bnxt * bp)4595 static int bnxt_init_tx_rings(struct bnxt *bp)
4596 {
4597 u16 i;
4598
4599 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4600 BNXT_MIN_TX_DESC_CNT);
4601
4602 for (i = 0; i < bp->tx_nr_rings; i++) {
4603 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4604 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4605
4606 ring->fw_ring_id = INVALID_HW_RING_ID;
4607
4608 if (i >= bp->tx_nr_rings_xdp)
4609 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4610 NETDEV_QUEUE_TYPE_TX,
4611 &txr->bnapi->napi);
4612 }
4613
4614 return 0;
4615 }
4616
bnxt_free_ring_grps(struct bnxt * bp)4617 static void bnxt_free_ring_grps(struct bnxt *bp)
4618 {
4619 kfree(bp->grp_info);
4620 bp->grp_info = NULL;
4621 }
4622
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4623 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4624 {
4625 int i;
4626
4627 if (irq_re_init) {
4628 bp->grp_info = kzalloc_objs(struct bnxt_ring_grp_info,
4629 bp->cp_nr_rings);
4630 if (!bp->grp_info)
4631 return -ENOMEM;
4632 }
4633 for (i = 0; i < bp->cp_nr_rings; i++) {
4634 if (irq_re_init)
4635 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4636 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4637 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4638 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4639 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4640 }
4641 return 0;
4642 }
4643
bnxt_free_vnics(struct bnxt * bp)4644 static void bnxt_free_vnics(struct bnxt *bp)
4645 {
4646 kfree(bp->vnic_info);
4647 bp->vnic_info = NULL;
4648 bp->nr_vnics = 0;
4649 }
4650
bnxt_alloc_vnics(struct bnxt * bp)4651 static int bnxt_alloc_vnics(struct bnxt *bp)
4652 {
4653 int num_vnics = 1;
4654
4655 #ifdef CONFIG_RFS_ACCEL
4656 if (bp->flags & BNXT_FLAG_RFS) {
4657 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4658 num_vnics++;
4659 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4660 num_vnics += bp->rx_nr_rings;
4661 }
4662 #endif
4663
4664 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4665 num_vnics++;
4666
4667 bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics);
4668 if (!bp->vnic_info)
4669 return -ENOMEM;
4670
4671 bp->nr_vnics = num_vnics;
4672 return 0;
4673 }
4674
bnxt_init_vnics(struct bnxt * bp)4675 static void bnxt_init_vnics(struct bnxt *bp)
4676 {
4677 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4678 int i;
4679
4680 for (i = 0; i < bp->nr_vnics; i++) {
4681 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4682 int j;
4683
4684 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4685 vnic->vnic_id = i;
4686 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4687 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4688
4689 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4690
4691 if (bp->vnic_info[i].rss_hash_key) {
4692 if (i == BNXT_VNIC_DEFAULT) {
4693 u8 *key = (void *)vnic->rss_hash_key;
4694 int k;
4695
4696 if (!bp->rss_hash_key_valid &&
4697 !bp->rss_hash_key_updated) {
4698 get_random_bytes(bp->rss_hash_key,
4699 HW_HASH_KEY_SIZE);
4700 bp->rss_hash_key_updated = true;
4701 }
4702
4703 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4704 HW_HASH_KEY_SIZE);
4705
4706 if (!bp->rss_hash_key_updated)
4707 continue;
4708
4709 bp->rss_hash_key_updated = false;
4710 bp->rss_hash_key_valid = true;
4711
4712 bp->toeplitz_prefix = 0;
4713 for (k = 0; k < 8; k++) {
4714 bp->toeplitz_prefix <<= 8;
4715 bp->toeplitz_prefix |= key[k];
4716 }
4717 } else {
4718 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4719 HW_HASH_KEY_SIZE);
4720 }
4721 }
4722 }
4723 }
4724
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4725 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4726 {
4727 int pages;
4728
4729 pages = ring_size / desc_per_pg;
4730
4731 if (!pages)
4732 return 1;
4733
4734 pages++;
4735
4736 while (pages & (pages - 1))
4737 pages++;
4738
4739 return pages;
4740 }
4741
bnxt_set_tpa_flags(struct bnxt * bp)4742 void bnxt_set_tpa_flags(struct bnxt *bp)
4743 {
4744 bp->flags &= ~BNXT_FLAG_TPA;
4745 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4746 return;
4747 if (bp->dev->features & NETIF_F_LRO)
4748 bp->flags |= BNXT_FLAG_LRO;
4749 else if (bp->dev->features & NETIF_F_GRO_HW)
4750 bp->flags |= BNXT_FLAG_GRO;
4751 }
4752
bnxt_init_ring_params(struct bnxt * bp)4753 static void bnxt_init_ring_params(struct bnxt *bp)
4754 {
4755 unsigned int rx_size;
4756
4757 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4758 /* Try to fit 4 chunks into a 4k page */
4759 rx_size = SZ_1K -
4760 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4761 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4762 }
4763
4764 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4765 * be set on entry.
4766 */
bnxt_set_ring_params(struct bnxt * bp)4767 void bnxt_set_ring_params(struct bnxt *bp)
4768 {
4769 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4770 u32 agg_factor = 0, agg_ring_size = 0;
4771
4772 /* 8 for CRC and VLAN */
4773 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4774
4775 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4776 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4777
4778 ring_size = bp->rx_ring_size;
4779 bp->rx_agg_ring_size = 0;
4780 bp->rx_agg_nr_pages = 0;
4781
4782 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4783 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4784
4785 bp->flags &= ~BNXT_FLAG_JUMBO;
4786 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4787 u32 jumbo_factor;
4788
4789 bp->flags |= BNXT_FLAG_JUMBO;
4790 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4791 if (jumbo_factor > agg_factor)
4792 agg_factor = jumbo_factor;
4793 }
4794 if (agg_factor) {
4795 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4796 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4797 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4798 bp->rx_ring_size, ring_size);
4799 bp->rx_ring_size = ring_size;
4800 }
4801 agg_ring_size = ring_size * agg_factor;
4802
4803 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4804 RX_DESC_CNT);
4805 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4806 u32 tmp = agg_ring_size;
4807
4808 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4809 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4810 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4811 tmp, agg_ring_size);
4812 }
4813 bp->rx_agg_ring_size = agg_ring_size;
4814 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4815
4816 if (BNXT_RX_PAGE_MODE(bp)) {
4817 rx_space = PAGE_SIZE;
4818 rx_size = PAGE_SIZE -
4819 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4820 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4821 } else {
4822 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4823 bp->rx_copybreak,
4824 bp->dev->cfg_pending->hds_thresh);
4825 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4826 rx_space = rx_size + NET_SKB_PAD +
4827 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4828 }
4829 }
4830
4831 bp->rx_buf_use_size = rx_size;
4832 bp->rx_buf_size = rx_space;
4833
4834 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4835 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4836
4837 ring_size = bp->tx_ring_size;
4838 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4839 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4840
4841 max_rx_cmpl = bp->rx_ring_size;
4842 /* MAX TPA needs to be added because TPA_START completions are
4843 * immediately recycled, so the TPA completions are not bound by
4844 * the RX ring size.
4845 */
4846 if (bp->flags & BNXT_FLAG_TPA)
4847 max_rx_cmpl += bp->max_tpa;
4848 /* RX and TPA completions are 32-byte, all others are 16-byte */
4849 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4850 bp->cp_ring_size = ring_size;
4851
4852 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4853 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4854 bp->cp_nr_pages = MAX_CP_PAGES;
4855 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4856 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4857 ring_size, bp->cp_ring_size);
4858 }
4859 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4860 bp->cp_ring_mask = bp->cp_bit - 1;
4861 }
4862
4863 /* Changing allocation mode of RX rings.
4864 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4865 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4866 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4867 {
4868 struct net_device *dev = bp->dev;
4869
4870 if (page_mode) {
4871 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4872 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4873
4874 if (bp->xdp_prog->aux->xdp_has_frags)
4875 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4876 else
4877 dev->max_mtu =
4878 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4879 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4880 bp->flags |= BNXT_FLAG_JUMBO;
4881 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4882 } else {
4883 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4884 bp->rx_skb_func = bnxt_rx_page_skb;
4885 }
4886 bp->rx_dir = DMA_BIDIRECTIONAL;
4887 } else {
4888 dev->max_mtu = bp->max_mtu;
4889 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4890 bp->rx_dir = DMA_FROM_DEVICE;
4891 bp->rx_skb_func = bnxt_rx_skb;
4892 }
4893 }
4894
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4895 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4896 {
4897 __bnxt_set_rx_skb_mode(bp, page_mode);
4898
4899 if (!page_mode) {
4900 int rx, tx;
4901
4902 bnxt_get_max_rings(bp, &rx, &tx, true);
4903 if (rx > 1) {
4904 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4905 bp->dev->hw_features |= NETIF_F_LRO;
4906 }
4907 }
4908
4909 /* Update LRO and GRO_HW availability */
4910 netdev_update_features(bp->dev);
4911 }
4912
bnxt_free_vnic_attributes(struct bnxt * bp)4913 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4914 {
4915 int i;
4916 struct bnxt_vnic_info *vnic;
4917 struct pci_dev *pdev = bp->pdev;
4918
4919 if (!bp->vnic_info)
4920 return;
4921
4922 for (i = 0; i < bp->nr_vnics; i++) {
4923 vnic = &bp->vnic_info[i];
4924
4925 kfree(vnic->fw_grp_ids);
4926 vnic->fw_grp_ids = NULL;
4927
4928 kfree(vnic->uc_list);
4929 vnic->uc_list = NULL;
4930
4931 if (vnic->mc_list) {
4932 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4933 vnic->mc_list, vnic->mc_list_mapping);
4934 vnic->mc_list = NULL;
4935 }
4936
4937 if (vnic->rss_table) {
4938 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4939 vnic->rss_table,
4940 vnic->rss_table_dma_addr);
4941 vnic->rss_table = NULL;
4942 }
4943
4944 vnic->rss_hash_key = NULL;
4945 vnic->flags = 0;
4946 }
4947 }
4948
bnxt_alloc_vnic_attributes(struct bnxt * bp)4949 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4950 {
4951 int i, rc = 0, size;
4952 struct bnxt_vnic_info *vnic;
4953 struct pci_dev *pdev = bp->pdev;
4954 int max_rings;
4955
4956 for (i = 0; i < bp->nr_vnics; i++) {
4957 vnic = &bp->vnic_info[i];
4958
4959 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4960 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4961
4962 if (mem_size > 0) {
4963 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4964 if (!vnic->uc_list) {
4965 rc = -ENOMEM;
4966 goto out;
4967 }
4968 }
4969 }
4970
4971 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4972 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4973 vnic->mc_list =
4974 dma_alloc_coherent(&pdev->dev,
4975 vnic->mc_list_size,
4976 &vnic->mc_list_mapping,
4977 GFP_KERNEL);
4978 if (!vnic->mc_list) {
4979 rc = -ENOMEM;
4980 goto out;
4981 }
4982 }
4983
4984 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4985 goto vnic_skip_grps;
4986
4987 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4988 max_rings = bp->rx_nr_rings;
4989 else
4990 max_rings = 1;
4991
4992 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4993 if (!vnic->fw_grp_ids) {
4994 rc = -ENOMEM;
4995 goto out;
4996 }
4997 vnic_skip_grps:
4998 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4999 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
5000 continue;
5001
5002 /* Allocate rss table and hash key */
5003 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
5004 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5005 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
5006
5007 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
5008 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
5009 vnic->rss_table_size,
5010 &vnic->rss_table_dma_addr,
5011 GFP_KERNEL);
5012 if (!vnic->rss_table) {
5013 rc = -ENOMEM;
5014 goto out;
5015 }
5016
5017 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
5018 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
5019 }
5020 return 0;
5021
5022 out:
5023 return rc;
5024 }
5025
bnxt_free_hwrm_resources(struct bnxt * bp)5026 static void bnxt_free_hwrm_resources(struct bnxt *bp)
5027 {
5028 struct bnxt_hwrm_wait_token *token;
5029
5030 dma_pool_destroy(bp->hwrm_dma_pool);
5031 bp->hwrm_dma_pool = NULL;
5032
5033 rcu_read_lock();
5034 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
5035 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
5036 rcu_read_unlock();
5037 }
5038
bnxt_alloc_hwrm_resources(struct bnxt * bp)5039 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5040 {
5041 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5042 BNXT_HWRM_DMA_SIZE,
5043 BNXT_HWRM_DMA_ALIGN, 0);
5044 if (!bp->hwrm_dma_pool)
5045 return -ENOMEM;
5046
5047 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5048
5049 return 0;
5050 }
5051
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)5052 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5053 {
5054 kfree(stats->hw_masks);
5055 stats->hw_masks = NULL;
5056 kfree(stats->sw_stats);
5057 stats->sw_stats = NULL;
5058 if (stats->hw_stats) {
5059 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5060 stats->hw_stats_map);
5061 stats->hw_stats = NULL;
5062 }
5063 }
5064
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)5065 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5066 bool alloc_masks)
5067 {
5068 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5069 &stats->hw_stats_map, GFP_KERNEL);
5070 if (!stats->hw_stats)
5071 return -ENOMEM;
5072
5073 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5074 if (!stats->sw_stats)
5075 goto stats_mem_err;
5076
5077 if (alloc_masks) {
5078 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5079 if (!stats->hw_masks)
5080 goto stats_mem_err;
5081 }
5082 return 0;
5083
5084 stats_mem_err:
5085 bnxt_free_stats_mem(bp, stats);
5086 return -ENOMEM;
5087 }
5088
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)5089 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5090 {
5091 int i;
5092
5093 for (i = 0; i < count; i++)
5094 mask_arr[i] = mask;
5095 }
5096
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)5097 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5098 {
5099 int i;
5100
5101 for (i = 0; i < count; i++)
5102 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5103 }
5104
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)5105 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5106 struct bnxt_stats_mem *stats)
5107 {
5108 struct hwrm_func_qstats_ext_output *resp;
5109 struct hwrm_func_qstats_ext_input *req;
5110 __le64 *hw_masks;
5111 int rc;
5112
5113 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5114 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5115 return -EOPNOTSUPP;
5116
5117 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5118 if (rc)
5119 return rc;
5120
5121 req->fid = cpu_to_le16(0xffff);
5122 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5123
5124 resp = hwrm_req_hold(bp, req);
5125 rc = hwrm_req_send(bp, req);
5126 if (!rc) {
5127 hw_masks = &resp->rx_ucast_pkts;
5128 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5129 }
5130 hwrm_req_drop(bp, req);
5131 return rc;
5132 }
5133
5134 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5135 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5136
bnxt_init_stats(struct bnxt * bp)5137 static void bnxt_init_stats(struct bnxt *bp)
5138 {
5139 struct bnxt_napi *bnapi = bp->bnapi[0];
5140 struct bnxt_cp_ring_info *cpr;
5141 struct bnxt_stats_mem *stats;
5142 __le64 *rx_stats, *tx_stats;
5143 int rc, rx_count, tx_count;
5144 u64 *rx_masks, *tx_masks;
5145 u64 mask;
5146 u8 flags;
5147
5148 cpr = &bnapi->cp_ring;
5149 stats = &cpr->stats;
5150 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5151 if (rc) {
5152 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5153 mask = (1ULL << 48) - 1;
5154 else
5155 mask = -1ULL;
5156 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5157 }
5158 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5159 stats = &bp->port_stats;
5160 rx_stats = stats->hw_stats;
5161 rx_masks = stats->hw_masks;
5162 rx_count = sizeof(struct rx_port_stats) / 8;
5163 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5164 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5165 tx_count = sizeof(struct tx_port_stats) / 8;
5166
5167 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5168 rc = bnxt_hwrm_port_qstats(bp, flags);
5169 if (rc) {
5170 mask = (1ULL << 40) - 1;
5171
5172 bnxt_fill_masks(rx_masks, mask, rx_count);
5173 bnxt_fill_masks(tx_masks, mask, tx_count);
5174 } else {
5175 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5176 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5177 bnxt_hwrm_port_qstats(bp, 0);
5178 }
5179 }
5180 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5181 stats = &bp->rx_port_stats_ext;
5182 rx_stats = stats->hw_stats;
5183 rx_masks = stats->hw_masks;
5184 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5185 stats = &bp->tx_port_stats_ext;
5186 tx_stats = stats->hw_stats;
5187 tx_masks = stats->hw_masks;
5188 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5189
5190 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5191 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5192 if (rc) {
5193 mask = (1ULL << 40) - 1;
5194
5195 bnxt_fill_masks(rx_masks, mask, rx_count);
5196 if (tx_stats)
5197 bnxt_fill_masks(tx_masks, mask, tx_count);
5198 } else {
5199 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5200 if (tx_stats)
5201 bnxt_copy_hw_masks(tx_masks, tx_stats,
5202 tx_count);
5203 bnxt_hwrm_port_qstats_ext(bp, 0);
5204 }
5205 }
5206 }
5207
bnxt_free_port_stats(struct bnxt * bp)5208 static void bnxt_free_port_stats(struct bnxt *bp)
5209 {
5210 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5211 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5212
5213 bnxt_free_stats_mem(bp, &bp->port_stats);
5214 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5215 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5216 }
5217
bnxt_free_ring_stats(struct bnxt * bp)5218 static void bnxt_free_ring_stats(struct bnxt *bp)
5219 {
5220 int i;
5221
5222 if (!bp->bnapi)
5223 return;
5224
5225 for (i = 0; i < bp->cp_nr_rings; i++) {
5226 struct bnxt_napi *bnapi = bp->bnapi[i];
5227 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5228
5229 bnxt_free_stats_mem(bp, &cpr->stats);
5230
5231 kfree(cpr->sw_stats);
5232 cpr->sw_stats = NULL;
5233 }
5234 }
5235
bnxt_alloc_stats(struct bnxt * bp)5236 static int bnxt_alloc_stats(struct bnxt *bp)
5237 {
5238 u32 size, i;
5239 int rc;
5240
5241 size = bp->hw_ring_stats_size;
5242
5243 for (i = 0; i < bp->cp_nr_rings; i++) {
5244 struct bnxt_napi *bnapi = bp->bnapi[i];
5245 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5246
5247 cpr->sw_stats = kzalloc_obj(*cpr->sw_stats);
5248 if (!cpr->sw_stats)
5249 return -ENOMEM;
5250
5251 cpr->stats.len = size;
5252 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5253 if (rc)
5254 return rc;
5255
5256 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5257 }
5258
5259 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5260 return 0;
5261
5262 if (bp->port_stats.hw_stats)
5263 goto alloc_ext_stats;
5264
5265 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5266 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5267 if (rc)
5268 return rc;
5269
5270 bp->flags |= BNXT_FLAG_PORT_STATS;
5271
5272 alloc_ext_stats:
5273 /* Display extended statistics only if FW supports it */
5274 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5275 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5276 return 0;
5277
5278 if (bp->rx_port_stats_ext.hw_stats)
5279 goto alloc_tx_ext_stats;
5280
5281 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5282 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5283 /* Extended stats are optional */
5284 if (rc)
5285 return 0;
5286
5287 alloc_tx_ext_stats:
5288 if (bp->tx_port_stats_ext.hw_stats)
5289 return 0;
5290
5291 if (bp->hwrm_spec_code >= 0x10902 ||
5292 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5293 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5294 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5295 /* Extended stats are optional */
5296 if (rc)
5297 return 0;
5298 }
5299 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5300 return 0;
5301 }
5302
bnxt_clear_ring_indices(struct bnxt * bp)5303 static void bnxt_clear_ring_indices(struct bnxt *bp)
5304 {
5305 int i, j;
5306
5307 if (!bp->bnapi)
5308 return;
5309
5310 for (i = 0; i < bp->cp_nr_rings; i++) {
5311 struct bnxt_napi *bnapi = bp->bnapi[i];
5312 struct bnxt_cp_ring_info *cpr;
5313 struct bnxt_rx_ring_info *rxr;
5314 struct bnxt_tx_ring_info *txr;
5315
5316 if (!bnapi)
5317 continue;
5318
5319 cpr = &bnapi->cp_ring;
5320 cpr->cp_raw_cons = 0;
5321
5322 bnxt_for_each_napi_tx(j, bnapi, txr) {
5323 txr->tx_prod = 0;
5324 txr->tx_cons = 0;
5325 txr->tx_hw_cons = 0;
5326 }
5327
5328 rxr = bnapi->rx_ring;
5329 if (rxr) {
5330 rxr->rx_prod = 0;
5331 rxr->rx_agg_prod = 0;
5332 rxr->rx_sw_agg_prod = 0;
5333 rxr->rx_next_cons = 0;
5334 }
5335 bnapi->events = 0;
5336 }
5337 }
5338
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5339 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5340 {
5341 u8 type = fltr->type, flags = fltr->flags;
5342
5343 INIT_LIST_HEAD(&fltr->list);
5344 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5345 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5346 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5347 }
5348
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5349 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5350 {
5351 if (!list_empty(&fltr->list))
5352 list_del_init(&fltr->list);
5353 }
5354
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5355 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5356 {
5357 struct bnxt_filter_base *usr_fltr, *tmp;
5358
5359 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5360 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5361 continue;
5362 bnxt_del_one_usr_fltr(bp, usr_fltr);
5363 }
5364 }
5365
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5366 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5367 {
5368 hlist_del(&fltr->hash);
5369 bnxt_del_one_usr_fltr(bp, fltr);
5370 if (fltr->flags) {
5371 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5372 bp->ntp_fltr_count--;
5373 }
5374 kfree(fltr);
5375 }
5376
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5377 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5378 {
5379 int i;
5380
5381 netdev_assert_locked_or_invisible(bp->dev);
5382
5383 /* Under netdev instance lock and all our NAPIs have been disabled.
5384 * It's safe to delete the hash table.
5385 */
5386 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5387 struct hlist_head *head;
5388 struct hlist_node *tmp;
5389 struct bnxt_ntuple_filter *fltr;
5390
5391 head = &bp->ntp_fltr_hash_tbl[i];
5392 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5393 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5394 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5395 !list_empty(&fltr->base.list)))
5396 continue;
5397 bnxt_del_fltr(bp, &fltr->base);
5398 }
5399 }
5400 if (!all)
5401 return;
5402
5403 bitmap_free(bp->ntp_fltr_bmap);
5404 bp->ntp_fltr_bmap = NULL;
5405 bp->ntp_fltr_count = 0;
5406 }
5407
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5408 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5409 {
5410 int i, rc = 0;
5411
5412 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5413 return 0;
5414
5415 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5416 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5417
5418 bp->ntp_fltr_count = 0;
5419 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5420
5421 if (!bp->ntp_fltr_bmap)
5422 rc = -ENOMEM;
5423
5424 return rc;
5425 }
5426
bnxt_free_l2_filters(struct bnxt * bp,bool all)5427 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5428 {
5429 int i;
5430
5431 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5432 struct hlist_head *head;
5433 struct hlist_node *tmp;
5434 struct bnxt_l2_filter *fltr;
5435
5436 head = &bp->l2_fltr_hash_tbl[i];
5437 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5438 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5439 !list_empty(&fltr->base.list)))
5440 continue;
5441 bnxt_del_fltr(bp, &fltr->base);
5442 }
5443 }
5444 }
5445
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5446 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5447 {
5448 int i;
5449
5450 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5451 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5452 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5453 }
5454
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5455 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5456 {
5457 bnxt_free_vnic_attributes(bp);
5458 bnxt_free_tx_rings(bp);
5459 bnxt_free_rx_rings(bp);
5460 bnxt_free_cp_rings(bp);
5461 bnxt_free_all_cp_arrays(bp);
5462 bnxt_free_ntp_fltrs(bp, false);
5463 bnxt_free_l2_filters(bp, false);
5464 if (irq_re_init) {
5465 bnxt_free_ring_stats(bp);
5466 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5467 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5468 bnxt_free_port_stats(bp);
5469 bnxt_free_ring_grps(bp);
5470 bnxt_free_vnics(bp);
5471 kfree(bp->tx_ring_map);
5472 bp->tx_ring_map = NULL;
5473 kfree(bp->tx_ring);
5474 bp->tx_ring = NULL;
5475 kfree(bp->rx_ring);
5476 bp->rx_ring = NULL;
5477 kfree(bp->bnapi);
5478 bp->bnapi = NULL;
5479 } else {
5480 bnxt_clear_ring_indices(bp);
5481 }
5482 }
5483
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5484 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5485 {
5486 int i, j, rc, size, arr_size;
5487 void *bnapi;
5488
5489 if (irq_re_init) {
5490 /* Allocate bnapi mem pointer array and mem block for
5491 * all queues
5492 */
5493 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5494 bp->cp_nr_rings);
5495 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5496 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5497 if (!bnapi)
5498 return -ENOMEM;
5499
5500 bp->bnapi = bnapi;
5501 bnapi += arr_size;
5502 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5503 bp->bnapi[i] = bnapi;
5504 bp->bnapi[i]->index = i;
5505 bp->bnapi[i]->bp = bp;
5506 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5507 struct bnxt_cp_ring_info *cpr =
5508 &bp->bnapi[i]->cp_ring;
5509
5510 cpr->cp_ring_struct.ring_mem.flags =
5511 BNXT_RMEM_RING_PTE_FLAG;
5512 }
5513 }
5514
5515 bp->rx_ring = kzalloc_objs(struct bnxt_rx_ring_info,
5516 bp->rx_nr_rings);
5517 if (!bp->rx_ring)
5518 return -ENOMEM;
5519
5520 for (i = 0; i < bp->rx_nr_rings; i++) {
5521 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5522
5523 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5524 rxr->rx_ring_struct.ring_mem.flags =
5525 BNXT_RMEM_RING_PTE_FLAG;
5526 rxr->rx_agg_ring_struct.ring_mem.flags =
5527 BNXT_RMEM_RING_PTE_FLAG;
5528 } else {
5529 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5530 }
5531 rxr->bnapi = bp->bnapi[i];
5532 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5533 }
5534
5535 bp->tx_ring = kzalloc_objs(struct bnxt_tx_ring_info,
5536 bp->tx_nr_rings);
5537 if (!bp->tx_ring)
5538 return -ENOMEM;
5539
5540 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5541 GFP_KERNEL);
5542
5543 if (!bp->tx_ring_map)
5544 return -ENOMEM;
5545
5546 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5547 j = 0;
5548 else
5549 j = bp->rx_nr_rings;
5550
5551 for (i = 0; i < bp->tx_nr_rings; i++) {
5552 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5553 struct bnxt_napi *bnapi2;
5554
5555 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5556 txr->tx_ring_struct.ring_mem.flags =
5557 BNXT_RMEM_RING_PTE_FLAG;
5558 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5559 if (i >= bp->tx_nr_rings_xdp) {
5560 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5561
5562 bnapi2 = bp->bnapi[k];
5563 txr->txq_index = i - bp->tx_nr_rings_xdp;
5564 txr->tx_napi_idx =
5565 BNXT_RING_TO_TC(bp, txr->txq_index);
5566 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5567 bnapi2->tx_int = bnxt_tx_int;
5568 } else {
5569 bnapi2 = bp->bnapi[j];
5570 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5571 bnapi2->tx_ring[0] = txr;
5572 bnapi2->tx_int = bnxt_tx_int_xdp;
5573 j++;
5574 }
5575 txr->bnapi = bnapi2;
5576 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5577 txr->tx_cpr = &bnapi2->cp_ring;
5578 }
5579
5580 rc = bnxt_alloc_stats(bp);
5581 if (rc)
5582 goto alloc_mem_err;
5583 bnxt_init_stats(bp);
5584
5585 rc = bnxt_alloc_ntp_fltrs(bp);
5586 if (rc)
5587 goto alloc_mem_err;
5588
5589 rc = bnxt_alloc_vnics(bp);
5590 if (rc)
5591 goto alloc_mem_err;
5592 }
5593
5594 rc = bnxt_alloc_all_cp_arrays(bp);
5595 if (rc)
5596 goto alloc_mem_err;
5597
5598 bnxt_init_ring_struct(bp);
5599
5600 rc = bnxt_alloc_rx_rings(bp);
5601 if (rc)
5602 goto alloc_mem_err;
5603
5604 rc = bnxt_alloc_tx_rings(bp);
5605 if (rc)
5606 goto alloc_mem_err;
5607
5608 rc = bnxt_alloc_cp_rings(bp);
5609 if (rc)
5610 goto alloc_mem_err;
5611
5612 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5613 BNXT_VNIC_MCAST_FLAG |
5614 BNXT_VNIC_UCAST_FLAG;
5615 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5616 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5617 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5618
5619 rc = bnxt_alloc_vnic_attributes(bp);
5620 if (rc)
5621 goto alloc_mem_err;
5622 return 0;
5623
5624 alloc_mem_err:
5625 bnxt_free_mem(bp, true);
5626 return rc;
5627 }
5628
bnxt_disable_int(struct bnxt * bp)5629 static void bnxt_disable_int(struct bnxt *bp)
5630 {
5631 int i;
5632
5633 if (!bp->bnapi)
5634 return;
5635
5636 for (i = 0; i < bp->cp_nr_rings; i++) {
5637 struct bnxt_napi *bnapi = bp->bnapi[i];
5638 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5639 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5640
5641 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5642 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5643 }
5644 }
5645
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5646 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5647 {
5648 struct bnxt_napi *bnapi = bp->bnapi[n];
5649 struct bnxt_cp_ring_info *cpr;
5650
5651 cpr = &bnapi->cp_ring;
5652 return cpr->cp_ring_struct.map_idx;
5653 }
5654
bnxt_disable_int_sync(struct bnxt * bp)5655 static void bnxt_disable_int_sync(struct bnxt *bp)
5656 {
5657 int i;
5658
5659 if (!bp->irq_tbl)
5660 return;
5661
5662 atomic_inc(&bp->intr_sem);
5663
5664 bnxt_disable_int(bp);
5665 for (i = 0; i < bp->cp_nr_rings; i++) {
5666 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5667
5668 synchronize_irq(bp->irq_tbl[map_idx].vector);
5669 }
5670 }
5671
bnxt_enable_int(struct bnxt * bp)5672 static void bnxt_enable_int(struct bnxt *bp)
5673 {
5674 int i;
5675
5676 atomic_set(&bp->intr_sem, 0);
5677 for (i = 0; i < bp->cp_nr_rings; i++) {
5678 struct bnxt_napi *bnapi = bp->bnapi[i];
5679 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5680
5681 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5682 }
5683 }
5684
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5685 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5686 bool async_only)
5687 {
5688 DECLARE_BITMAP(async_events_bmap, 256);
5689 u32 *events = (u32 *)async_events_bmap;
5690 struct hwrm_func_drv_rgtr_output *resp;
5691 struct hwrm_func_drv_rgtr_input *req;
5692 u32 flags;
5693 int rc, i;
5694
5695 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5696 if (rc)
5697 return rc;
5698
5699 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5700 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5701 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5702
5703 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5704 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5705 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5706 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5707 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5708 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5709 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5710 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5711 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5712 req->flags = cpu_to_le32(flags);
5713 req->ver_maj_8b = DRV_VER_MAJ;
5714 req->ver_min_8b = DRV_VER_MIN;
5715 req->ver_upd_8b = DRV_VER_UPD;
5716 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5717 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5718 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5719
5720 if (BNXT_PF(bp)) {
5721 u32 data[8];
5722 int i;
5723
5724 memset(data, 0, sizeof(data));
5725 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5726 u16 cmd = bnxt_vf_req_snif[i];
5727 unsigned int bit, idx;
5728
5729 if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
5730 cmd == HWRM_PORT_PHY_QCFG)
5731 continue;
5732
5733 idx = cmd / 32;
5734 bit = cmd % 32;
5735 data[idx] |= 1 << bit;
5736 }
5737
5738 for (i = 0; i < 8; i++)
5739 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5740
5741 req->enables |=
5742 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5743 }
5744
5745 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5746 req->flags |= cpu_to_le32(
5747 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5748
5749 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5750 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5751 u16 event_id = bnxt_async_events_arr[i];
5752
5753 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5754 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5755 continue;
5756 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5757 !bp->ptp_cfg)
5758 continue;
5759 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5760 }
5761 if (bmap && bmap_size) {
5762 for (i = 0; i < bmap_size; i++) {
5763 if (test_bit(i, bmap))
5764 __set_bit(i, async_events_bmap);
5765 }
5766 }
5767 for (i = 0; i < 8; i++)
5768 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5769
5770 if (async_only)
5771 req->enables =
5772 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5773
5774 resp = hwrm_req_hold(bp, req);
5775 rc = hwrm_req_send(bp, req);
5776 if (!rc) {
5777 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5778 if (resp->flags &
5779 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5780 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5781 }
5782 hwrm_req_drop(bp, req);
5783 return rc;
5784 }
5785
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5786 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5787 {
5788 struct hwrm_func_drv_unrgtr_input *req;
5789 int rc;
5790
5791 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5792 return 0;
5793
5794 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5795 if (rc)
5796 return rc;
5797 return hwrm_req_send(bp, req);
5798 }
5799
5800 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5801
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5802 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5803 {
5804 struct hwrm_tunnel_dst_port_free_input *req;
5805 int rc;
5806
5807 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5808 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5809 return 0;
5810 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5811 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5812 return 0;
5813
5814 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5815 if (rc)
5816 return rc;
5817
5818 req->tunnel_type = tunnel_type;
5819
5820 switch (tunnel_type) {
5821 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5822 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5823 bp->vxlan_port = 0;
5824 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5825 break;
5826 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5827 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5828 bp->nge_port = 0;
5829 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5830 break;
5831 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5832 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5833 bp->vxlan_gpe_port = 0;
5834 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5835 break;
5836 default:
5837 break;
5838 }
5839
5840 rc = hwrm_req_send(bp, req);
5841 if (rc)
5842 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5843 rc);
5844 if (bp->flags & BNXT_FLAG_TPA)
5845 bnxt_set_tpa(bp, true);
5846 return rc;
5847 }
5848
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5849 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5850 u8 tunnel_type)
5851 {
5852 struct hwrm_tunnel_dst_port_alloc_output *resp;
5853 struct hwrm_tunnel_dst_port_alloc_input *req;
5854 int rc;
5855
5856 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5857 if (rc)
5858 return rc;
5859
5860 req->tunnel_type = tunnel_type;
5861 req->tunnel_dst_port_val = port;
5862
5863 resp = hwrm_req_hold(bp, req);
5864 rc = hwrm_req_send(bp, req);
5865 if (rc) {
5866 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5867 rc);
5868 goto err_out;
5869 }
5870
5871 switch (tunnel_type) {
5872 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5873 bp->vxlan_port = port;
5874 bp->vxlan_fw_dst_port_id =
5875 le16_to_cpu(resp->tunnel_dst_port_id);
5876 break;
5877 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5878 bp->nge_port = port;
5879 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5880 break;
5881 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5882 bp->vxlan_gpe_port = port;
5883 bp->vxlan_gpe_fw_dst_port_id =
5884 le16_to_cpu(resp->tunnel_dst_port_id);
5885 break;
5886 default:
5887 break;
5888 }
5889 if (bp->flags & BNXT_FLAG_TPA)
5890 bnxt_set_tpa(bp, true);
5891
5892 err_out:
5893 hwrm_req_drop(bp, req);
5894 return rc;
5895 }
5896
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5897 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5898 {
5899 struct hwrm_cfa_l2_set_rx_mask_input *req;
5900 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5901 int rc;
5902
5903 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5904 if (rc)
5905 return rc;
5906
5907 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5908 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5909 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5910 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5911 }
5912 req->mask = cpu_to_le32(vnic->rx_mask);
5913 return hwrm_req_send_silent(bp, req);
5914 }
5915
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)5916 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5917 {
5918 if (!atomic_dec_and_test(&fltr->refcnt))
5919 return;
5920 spin_lock_bh(&bp->ntp_fltr_lock);
5921 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5922 spin_unlock_bh(&bp->ntp_fltr_lock);
5923 return;
5924 }
5925 hlist_del_rcu(&fltr->base.hash);
5926 bnxt_del_one_usr_fltr(bp, &fltr->base);
5927 if (fltr->base.flags) {
5928 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5929 bp->ntp_fltr_count--;
5930 }
5931 spin_unlock_bh(&bp->ntp_fltr_lock);
5932 kfree_rcu(fltr, base.rcu);
5933 }
5934
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5935 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5936 struct bnxt_l2_key *key,
5937 u32 idx)
5938 {
5939 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5940 struct bnxt_l2_filter *fltr;
5941
5942 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5943 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5944
5945 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5946 l2_key->vlan == key->vlan)
5947 return fltr;
5948 }
5949 return NULL;
5950 }
5951
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5952 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5953 struct bnxt_l2_key *key,
5954 u32 idx)
5955 {
5956 struct bnxt_l2_filter *fltr = NULL;
5957
5958 rcu_read_lock();
5959 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5960 if (fltr)
5961 atomic_inc(&fltr->refcnt);
5962 rcu_read_unlock();
5963 return fltr;
5964 }
5965
5966 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5967 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5968 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5969 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5970 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5971
5972 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5973 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5974 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5975 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5976 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5977
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)5978 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5979 {
5980 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5981 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5982 return sizeof(fkeys->addrs.v4addrs) +
5983 sizeof(fkeys->ports);
5984
5985 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5986 return sizeof(fkeys->addrs.v4addrs);
5987 }
5988
5989 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5990 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5991 return sizeof(fkeys->addrs.v6addrs) +
5992 sizeof(fkeys->ports);
5993
5994 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5995 return sizeof(fkeys->addrs.v6addrs);
5996 }
5997
5998 return 0;
5999 }
6000
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)6001 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
6002 const unsigned char *key)
6003 {
6004 u64 prefix = bp->toeplitz_prefix, hash = 0;
6005 struct bnxt_ipv4_tuple tuple4;
6006 struct bnxt_ipv6_tuple tuple6;
6007 int i, j, len = 0;
6008 u8 *four_tuple;
6009
6010 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
6011 if (!len)
6012 return 0;
6013
6014 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6015 tuple4.v4addrs = fkeys->addrs.v4addrs;
6016 tuple4.ports = fkeys->ports;
6017 four_tuple = (unsigned char *)&tuple4;
6018 } else {
6019 tuple6.v6addrs = fkeys->addrs.v6addrs;
6020 tuple6.ports = fkeys->ports;
6021 four_tuple = (unsigned char *)&tuple6;
6022 }
6023
6024 for (i = 0, j = 8; i < len; i++, j++) {
6025 u8 byte = four_tuple[i];
6026 int bit;
6027
6028 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
6029 if (byte & 0x80)
6030 hash ^= prefix;
6031 }
6032 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
6033 }
6034
6035 /* The valid part of the hash is in the upper 32 bits. */
6036 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
6037 }
6038
6039 #ifdef CONFIG_RFS_ACCEL
6040 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)6041 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6042 {
6043 struct bnxt_l2_filter *fltr;
6044 u32 idx;
6045
6046 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6047 BNXT_L2_FLTR_HASH_MASK;
6048 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6049 return fltr;
6050 }
6051 #endif
6052
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)6053 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6054 struct bnxt_l2_key *key, u32 idx)
6055 {
6056 struct hlist_head *head;
6057
6058 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6059 fltr->l2_key.vlan = key->vlan;
6060 fltr->base.type = BNXT_FLTR_TYPE_L2;
6061 if (fltr->base.flags) {
6062 int bit_id;
6063
6064 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6065 bp->max_fltr, 0);
6066 if (bit_id < 0)
6067 return -ENOMEM;
6068 fltr->base.sw_id = (u16)bit_id;
6069 bp->ntp_fltr_count++;
6070 }
6071 head = &bp->l2_fltr_hash_tbl[idx];
6072 hlist_add_head_rcu(&fltr->base.hash, head);
6073 bnxt_insert_usr_fltr(bp, &fltr->base);
6074 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6075 atomic_set(&fltr->refcnt, 1);
6076 return 0;
6077 }
6078
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)6079 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6080 struct bnxt_l2_key *key,
6081 gfp_t gfp)
6082 {
6083 struct bnxt_l2_filter *fltr;
6084 u32 idx;
6085 int rc;
6086
6087 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6088 BNXT_L2_FLTR_HASH_MASK;
6089 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6090 if (fltr)
6091 return fltr;
6092
6093 fltr = kzalloc_obj(*fltr, gfp);
6094 if (!fltr)
6095 return ERR_PTR(-ENOMEM);
6096 spin_lock_bh(&bp->ntp_fltr_lock);
6097 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6098 spin_unlock_bh(&bp->ntp_fltr_lock);
6099 if (rc) {
6100 bnxt_del_l2_filter(bp, fltr);
6101 fltr = ERR_PTR(rc);
6102 }
6103 return fltr;
6104 }
6105
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)6106 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6107 struct bnxt_l2_key *key,
6108 u16 flags)
6109 {
6110 struct bnxt_l2_filter *fltr;
6111 u32 idx;
6112 int rc;
6113
6114 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6115 BNXT_L2_FLTR_HASH_MASK;
6116 spin_lock_bh(&bp->ntp_fltr_lock);
6117 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6118 if (fltr) {
6119 fltr = ERR_PTR(-EEXIST);
6120 goto l2_filter_exit;
6121 }
6122 fltr = kzalloc_obj(*fltr, GFP_ATOMIC);
6123 if (!fltr) {
6124 fltr = ERR_PTR(-ENOMEM);
6125 goto l2_filter_exit;
6126 }
6127 fltr->base.flags = flags;
6128 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6129 if (rc) {
6130 spin_unlock_bh(&bp->ntp_fltr_lock);
6131 bnxt_del_l2_filter(bp, fltr);
6132 return ERR_PTR(rc);
6133 }
6134
6135 l2_filter_exit:
6136 spin_unlock_bh(&bp->ntp_fltr_lock);
6137 return fltr;
6138 }
6139
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6140 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6141 {
6142 #ifdef CONFIG_BNXT_SRIOV
6143 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6144
6145 return vf->fw_fid;
6146 #else
6147 return INVALID_HW_RING_ID;
6148 #endif
6149 }
6150
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6151 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6152 {
6153 struct hwrm_cfa_l2_filter_free_input *req;
6154 u16 target_id = 0xffff;
6155 int rc;
6156
6157 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6158 struct bnxt_pf_info *pf = &bp->pf;
6159
6160 if (fltr->base.vf_idx >= pf->active_vfs)
6161 return -EINVAL;
6162
6163 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6164 if (target_id == INVALID_HW_RING_ID)
6165 return -EINVAL;
6166 }
6167
6168 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6169 if (rc)
6170 return rc;
6171
6172 req->target_id = cpu_to_le16(target_id);
6173 req->l2_filter_id = fltr->base.filter_id;
6174 return hwrm_req_send(bp, req);
6175 }
6176
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6177 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6178 {
6179 struct hwrm_cfa_l2_filter_alloc_output *resp;
6180 struct hwrm_cfa_l2_filter_alloc_input *req;
6181 u16 target_id = 0xffff;
6182 int rc;
6183
6184 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6185 struct bnxt_pf_info *pf = &bp->pf;
6186
6187 if (fltr->base.vf_idx >= pf->active_vfs)
6188 return -EINVAL;
6189
6190 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6191 }
6192 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6193 if (rc)
6194 return rc;
6195
6196 req->target_id = cpu_to_le16(target_id);
6197 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6198
6199 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6200 req->flags |=
6201 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6202 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6203 req->enables =
6204 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6205 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6206 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6207 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6208 eth_broadcast_addr(req->l2_addr_mask);
6209
6210 if (fltr->l2_key.vlan) {
6211 req->enables |=
6212 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6213 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6214 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6215 req->num_vlans = 1;
6216 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6217 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6218 }
6219
6220 resp = hwrm_req_hold(bp, req);
6221 rc = hwrm_req_send(bp, req);
6222 if (!rc) {
6223 fltr->base.filter_id = resp->l2_filter_id;
6224 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6225 }
6226 hwrm_req_drop(bp, req);
6227 return rc;
6228 }
6229
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6230 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6231 struct bnxt_ntuple_filter *fltr)
6232 {
6233 struct hwrm_cfa_ntuple_filter_free_input *req;
6234 int rc;
6235
6236 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6237 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
6238 return 0;
6239
6240 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6241 if (rc)
6242 return rc;
6243
6244 req->ntuple_filter_id = fltr->base.filter_id;
6245 return hwrm_req_send(bp, req);
6246 }
6247
6248 #define BNXT_NTP_FLTR_FLAGS \
6249 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6250 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6251 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6252 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6253 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6254 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6255 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6256 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6257 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6258 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6259 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6260 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6261 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6262
6263 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6264 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6265
bnxt_fill_ipv6_mask(__be32 mask[4])6266 void bnxt_fill_ipv6_mask(__be32 mask[4])
6267 {
6268 int i;
6269
6270 for (i = 0; i < 4; i++)
6271 mask[i] = cpu_to_be32(~0);
6272 }
6273
6274 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6275 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6276 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6277 struct bnxt_ntuple_filter *fltr)
6278 {
6279 u16 rxq = fltr->base.rxq;
6280
6281 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6282 struct ethtool_rxfh_context *ctx;
6283 struct bnxt_rss_ctx *rss_ctx;
6284 struct bnxt_vnic_info *vnic;
6285
6286 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6287 fltr->base.fw_vnic_id);
6288 if (ctx) {
6289 rss_ctx = ethtool_rxfh_context_priv(ctx);
6290 vnic = &rss_ctx->vnic;
6291
6292 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6293 }
6294 return;
6295 }
6296 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6297 struct bnxt_vnic_info *vnic;
6298 u32 enables;
6299
6300 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6301 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6302 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6303 req->enables |= cpu_to_le32(enables);
6304 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6305 } else {
6306 u32 flags;
6307
6308 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6309 req->flags |= cpu_to_le32(flags);
6310 req->dst_id = cpu_to_le16(rxq);
6311 }
6312 }
6313
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6314 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6315 struct bnxt_ntuple_filter *fltr)
6316 {
6317 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6318 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6319 struct bnxt_flow_masks *masks = &fltr->fmasks;
6320 struct flow_keys *keys = &fltr->fkeys;
6321 struct bnxt_l2_filter *l2_fltr;
6322 struct bnxt_vnic_info *vnic;
6323 int rc;
6324
6325 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6326 if (rc)
6327 return rc;
6328
6329 l2_fltr = fltr->l2_fltr;
6330 req->l2_filter_id = l2_fltr->base.filter_id;
6331
6332 if (fltr->base.flags & BNXT_ACT_DROP) {
6333 req->flags =
6334 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6335 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6336 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6337 } else {
6338 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6339 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6340 }
6341 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6342
6343 req->ethertype = htons(ETH_P_IP);
6344 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6345 req->ip_protocol = keys->basic.ip_proto;
6346
6347 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6348 req->ethertype = htons(ETH_P_IPV6);
6349 req->ip_addr_type =
6350 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6351 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6352 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6353 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6354 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6355 } else {
6356 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6357 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6358 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6359 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6360 }
6361 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6362 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6363 req->tunnel_type =
6364 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6365 }
6366
6367 req->src_port = keys->ports.src;
6368 req->src_port_mask = masks->ports.src;
6369 req->dst_port = keys->ports.dst;
6370 req->dst_port_mask = masks->ports.dst;
6371
6372 resp = hwrm_req_hold(bp, req);
6373 rc = hwrm_req_send(bp, req);
6374 if (!rc)
6375 fltr->base.filter_id = resp->ntuple_filter_id;
6376 hwrm_req_drop(bp, req);
6377 return rc;
6378 }
6379
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6380 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6381 const u8 *mac_addr)
6382 {
6383 struct bnxt_l2_filter *fltr;
6384 struct bnxt_l2_key key;
6385 int rc;
6386
6387 ether_addr_copy(key.dst_mac_addr, mac_addr);
6388 key.vlan = 0;
6389 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6390 if (IS_ERR(fltr))
6391 return PTR_ERR(fltr);
6392
6393 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6394 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6395 if (rc)
6396 bnxt_del_l2_filter(bp, fltr);
6397 else
6398 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6399 return rc;
6400 }
6401
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6402 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6403 {
6404 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6405
6406 /* Any associated ntuple filters will also be cleared by firmware. */
6407 for (i = 0; i < num_of_vnics; i++) {
6408 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6409
6410 for (j = 0; j < vnic->uc_filter_count; j++) {
6411 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6412
6413 bnxt_hwrm_l2_filter_free(bp, fltr);
6414 bnxt_del_l2_filter(bp, fltr);
6415 }
6416 vnic->uc_filter_count = 0;
6417 }
6418 }
6419
6420 #define BNXT_DFLT_TUNL_TPA_BMAP \
6421 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6422 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6423 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6424
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6425 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6426 struct hwrm_vnic_tpa_cfg_input *req)
6427 {
6428 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6429
6430 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6431 return;
6432
6433 if (bp->vxlan_port)
6434 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6435 if (bp->vxlan_gpe_port)
6436 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6437 if (bp->nge_port)
6438 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6439
6440 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6441 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6442 }
6443
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6444 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6445 u32 tpa_flags)
6446 {
6447 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6448 struct hwrm_vnic_tpa_cfg_input *req;
6449 int rc;
6450
6451 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6452 return 0;
6453
6454 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6455 if (rc)
6456 return rc;
6457
6458 if (tpa_flags) {
6459 u16 mss = bp->dev->mtu - 40;
6460 u32 nsegs, n, segs = 0, flags;
6461
6462 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6463 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6464 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6465 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6466 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6467 if (tpa_flags & BNXT_FLAG_GRO)
6468 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6469
6470 req->flags = cpu_to_le32(flags);
6471
6472 req->enables =
6473 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6474 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6475 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6476
6477 /* Number of segs are log2 units, and first packet is not
6478 * included as part of this units.
6479 */
6480 if (mss <= BNXT_RX_PAGE_SIZE) {
6481 n = BNXT_RX_PAGE_SIZE / mss;
6482 nsegs = (MAX_SKB_FRAGS - 1) * n;
6483 } else {
6484 n = mss / BNXT_RX_PAGE_SIZE;
6485 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6486 n++;
6487 nsegs = (MAX_SKB_FRAGS - n) / n;
6488 }
6489
6490 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6491 segs = MAX_TPA_SEGS_P5;
6492 max_aggs = bp->max_tpa;
6493 } else {
6494 segs = ilog2(nsegs);
6495 }
6496 req->max_agg_segs = cpu_to_le16(segs);
6497 req->max_aggs = cpu_to_le16(max_aggs);
6498
6499 req->min_agg_len = cpu_to_le32(512);
6500 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6501 }
6502 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6503
6504 return hwrm_req_send(bp, req);
6505 }
6506
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6507 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6508 {
6509 struct bnxt_ring_grp_info *grp_info;
6510
6511 grp_info = &bp->grp_info[ring->grp_idx];
6512 return grp_info->cp_fw_ring_id;
6513 }
6514
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6515 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6516 {
6517 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6518 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6519 else
6520 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6521 }
6522
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6523 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6524 {
6525 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6526 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6527 else
6528 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6529 }
6530
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6531 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6532 {
6533 int entries;
6534
6535 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6536 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6537 else
6538 entries = HW_HASH_INDEX_SIZE;
6539
6540 bp->rss_indir_tbl_entries = entries;
6541 bp->rss_indir_tbl =
6542 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6543 if (!bp->rss_indir_tbl)
6544 return -ENOMEM;
6545
6546 return 0;
6547 }
6548
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6549 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6550 struct ethtool_rxfh_context *rss_ctx)
6551 {
6552 u16 max_rings, max_entries, pad, i;
6553 u32 *rss_indir_tbl;
6554
6555 if (!bp->rx_nr_rings)
6556 return;
6557
6558 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6559 max_rings = bp->rx_nr_rings - 1;
6560 else
6561 max_rings = bp->rx_nr_rings;
6562
6563 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6564 if (rss_ctx)
6565 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6566 else
6567 rss_indir_tbl = &bp->rss_indir_tbl[0];
6568
6569 for (i = 0; i < max_entries; i++)
6570 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6571
6572 pad = bp->rss_indir_tbl_entries - max_entries;
6573 if (pad)
6574 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6575 }
6576
bnxt_get_max_rss_ring(struct bnxt * bp)6577 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6578 {
6579 u32 i, tbl_size, max_ring = 0;
6580
6581 if (!bp->rss_indir_tbl)
6582 return 0;
6583
6584 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6585 for (i = 0; i < tbl_size; i++)
6586 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6587 return max_ring;
6588 }
6589
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6590 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6591 {
6592 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6593 if (!rx_rings)
6594 return 0;
6595 if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
6596 return BNXT_RSS_TABLE_MAX_TBL_P5;
6597
6598 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6599 BNXT_RSS_TABLE_ENTRIES_P5);
6600 }
6601 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6602 return 2;
6603 return 1;
6604 }
6605
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6606 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6607 {
6608 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6609 u16 i, j;
6610
6611 /* Fill the RSS indirection table with ring group ids */
6612 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6613 if (!no_rss)
6614 j = bp->rss_indir_tbl[i];
6615 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6616 }
6617 }
6618
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6619 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6620 struct bnxt_vnic_info *vnic)
6621 {
6622 __le16 *ring_tbl = vnic->rss_table;
6623 struct bnxt_rx_ring_info *rxr;
6624 u16 tbl_size, i;
6625
6626 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6627
6628 for (i = 0; i < tbl_size; i++) {
6629 u16 ring_id, j;
6630
6631 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6632 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6633 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6634 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6635 else
6636 j = bp->rss_indir_tbl[i];
6637 rxr = &bp->rx_ring[j];
6638
6639 ring_id = rxr->rx_ring_struct.fw_ring_id;
6640 *ring_tbl++ = cpu_to_le16(ring_id);
6641 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6642 *ring_tbl++ = cpu_to_le16(ring_id);
6643 }
6644 }
6645
6646 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6647 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6648 struct bnxt_vnic_info *vnic)
6649 {
6650 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6651 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6652 if (bp->flags & BNXT_FLAG_CHIP_P7)
6653 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6654 } else {
6655 bnxt_fill_hw_rss_tbl(bp, vnic);
6656 }
6657
6658 if (bp->rss_hash_delta) {
6659 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6660 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6661 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6662 else
6663 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6664 } else {
6665 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6666 }
6667 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6668 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6669 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6670 }
6671
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6672 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6673 bool set_rss)
6674 {
6675 struct hwrm_vnic_rss_cfg_input *req;
6676 int rc;
6677
6678 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6679 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6680 return 0;
6681
6682 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6683 if (rc)
6684 return rc;
6685
6686 if (set_rss)
6687 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6688 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6689 return hwrm_req_send(bp, req);
6690 }
6691
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6692 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6693 struct bnxt_vnic_info *vnic, bool set_rss)
6694 {
6695 struct hwrm_vnic_rss_cfg_input *req;
6696 dma_addr_t ring_tbl_map;
6697 u32 i, nr_ctxs;
6698 int rc;
6699
6700 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6701 if (rc)
6702 return rc;
6703
6704 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6705 if (!set_rss)
6706 return hwrm_req_send(bp, req);
6707
6708 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6709 ring_tbl_map = vnic->rss_table_dma_addr;
6710 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6711
6712 hwrm_req_hold(bp, req);
6713 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6714 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6715 req->ring_table_pair_index = i;
6716 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6717 rc = hwrm_req_send(bp, req);
6718 if (rc)
6719 goto exit;
6720 }
6721
6722 exit:
6723 hwrm_req_drop(bp, req);
6724 return rc;
6725 }
6726
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6727 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6728 {
6729 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6730 struct hwrm_vnic_rss_qcfg_output *resp;
6731 struct hwrm_vnic_rss_qcfg_input *req;
6732
6733 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6734 return;
6735
6736 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6737 /* all contexts configured to same hash_type, zero always exists */
6738 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6739 resp = hwrm_req_hold(bp, req);
6740 if (!hwrm_req_send(bp, req)) {
6741 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6742 bp->rss_hash_delta = 0;
6743 }
6744 hwrm_req_drop(bp, req);
6745 }
6746
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6747 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6748 {
6749 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6750 struct hwrm_vnic_plcmodes_cfg_input *req;
6751 int rc;
6752
6753 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6754 if (rc)
6755 return rc;
6756
6757 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6758 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6759 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6760
6761 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6762 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6763 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6764 req->enables |=
6765 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6766 req->hds_threshold = cpu_to_le16(hds_thresh);
6767 }
6768 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6769 return hwrm_req_send(bp, req);
6770 }
6771
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6772 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6773 struct bnxt_vnic_info *vnic,
6774 u16 ctx_idx)
6775 {
6776 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6777
6778 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6779 return;
6780
6781 req->rss_cos_lb_ctx_id =
6782 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6783
6784 hwrm_req_send(bp, req);
6785 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6786 }
6787
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6788 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6789 {
6790 int i, j;
6791
6792 for (i = 0; i < bp->nr_vnics; i++) {
6793 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6794
6795 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6796 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6797 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6798 }
6799 }
6800 bp->rsscos_nr_ctxs = 0;
6801 }
6802
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6803 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6804 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6805 {
6806 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6807 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6808 int rc;
6809
6810 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6811 if (rc)
6812 return rc;
6813
6814 resp = hwrm_req_hold(bp, req);
6815 rc = hwrm_req_send(bp, req);
6816 if (!rc)
6817 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6818 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6819 hwrm_req_drop(bp, req);
6820
6821 return rc;
6822 }
6823
bnxt_get_roce_vnic_mode(struct bnxt * bp)6824 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6825 {
6826 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6827 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6828 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6829 }
6830
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6831 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6832 {
6833 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6834 struct hwrm_vnic_cfg_input *req;
6835 unsigned int ring = 0, grp_idx;
6836 u16 def_vlan = 0;
6837 int rc;
6838
6839 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6840 if (rc)
6841 return rc;
6842
6843 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6844 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6845
6846 req->default_rx_ring_id =
6847 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6848 req->default_cmpl_ring_id =
6849 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6850 req->enables =
6851 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6852 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6853 goto vnic_mru;
6854 }
6855 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6856 /* Only RSS support for now TBD: COS & LB */
6857 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6858 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6859 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6860 VNIC_CFG_REQ_ENABLES_MRU);
6861 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6862 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6863 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6864 VNIC_CFG_REQ_ENABLES_MRU);
6865 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6866 } else {
6867 req->rss_rule = cpu_to_le16(0xffff);
6868 }
6869
6870 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6871 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6872 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6873 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6874 } else {
6875 req->cos_rule = cpu_to_le16(0xffff);
6876 }
6877
6878 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6879 ring = 0;
6880 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6881 ring = vnic->vnic_id - 1;
6882 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6883 ring = bp->rx_nr_rings - 1;
6884
6885 grp_idx = bp->rx_ring[ring].bnapi->index;
6886 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6887 req->lb_rule = cpu_to_le16(0xffff);
6888 vnic_mru:
6889 vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6890 req->mru = cpu_to_le16(vnic->mru);
6891
6892 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6893 #ifdef CONFIG_BNXT_SRIOV
6894 if (BNXT_VF(bp))
6895 def_vlan = bp->vf.vlan;
6896 #endif
6897 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6898 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6899 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6900 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6901
6902 return hwrm_req_send(bp, req);
6903 }
6904
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6905 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6906 struct bnxt_vnic_info *vnic)
6907 {
6908 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6909 struct hwrm_vnic_free_input *req;
6910
6911 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6912 return;
6913
6914 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6915
6916 hwrm_req_send(bp, req);
6917 vnic->fw_vnic_id = INVALID_HW_RING_ID;
6918 }
6919 }
6920
bnxt_hwrm_vnic_free(struct bnxt * bp)6921 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6922 {
6923 u16 i;
6924
6925 for (i = 0; i < bp->nr_vnics; i++)
6926 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6927 }
6928
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)6929 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6930 unsigned int start_rx_ring_idx,
6931 unsigned int nr_rings)
6932 {
6933 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6934 struct hwrm_vnic_alloc_output *resp;
6935 struct hwrm_vnic_alloc_input *req;
6936 int rc;
6937
6938 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6939 if (rc)
6940 return rc;
6941
6942 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6943 goto vnic_no_ring_grps;
6944
6945 /* map ring groups to this vnic */
6946 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6947 grp_idx = bp->rx_ring[i].bnapi->index;
6948 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6949 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6950 j, nr_rings);
6951 break;
6952 }
6953 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6954 }
6955
6956 vnic_no_ring_grps:
6957 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6958 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6959 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6960 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6961
6962 resp = hwrm_req_hold(bp, req);
6963 rc = hwrm_req_send(bp, req);
6964 if (!rc)
6965 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6966 hwrm_req_drop(bp, req);
6967 return rc;
6968 }
6969
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)6970 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6971 {
6972 struct hwrm_vnic_qcaps_output *resp;
6973 struct hwrm_vnic_qcaps_input *req;
6974 int rc;
6975
6976 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6977 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6978 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6979 if (bp->hwrm_spec_code < 0x10600)
6980 return 0;
6981
6982 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6983 if (rc)
6984 return rc;
6985
6986 resp = hwrm_req_hold(bp, req);
6987 rc = hwrm_req_send(bp, req);
6988 if (!rc) {
6989 u32 flags = le32_to_cpu(resp->flags);
6990
6991 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6992 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6993 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6994 if (flags &
6995 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6996 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6997
6998 /* Older P5 fw before EXT_HW_STATS support did not set
6999 * VLAN_STRIP_CAP properly.
7000 */
7001 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
7002 (BNXT_CHIP_P5(bp) &&
7003 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
7004 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
7005 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
7006 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
7007 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
7008 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
7009 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
7010 if (bp->max_tpa_v2) {
7011 if (BNXT_CHIP_P5(bp))
7012 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
7013 else
7014 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
7015 }
7016 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
7017 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
7018 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
7019 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
7020 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
7021 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
7022 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
7023 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
7024 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
7025 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
7026 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
7027 bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
7028 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
7029 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
7030 }
7031 hwrm_req_drop(bp, req);
7032 return rc;
7033 }
7034
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)7035 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
7036 {
7037 struct hwrm_ring_grp_alloc_output *resp;
7038 struct hwrm_ring_grp_alloc_input *req;
7039 int rc;
7040 u16 i;
7041
7042 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7043 return 0;
7044
7045 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
7046 if (rc)
7047 return rc;
7048
7049 resp = hwrm_req_hold(bp, req);
7050 for (i = 0; i < bp->rx_nr_rings; i++) {
7051 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7052
7053 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7054 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7055 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7056 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7057
7058 rc = hwrm_req_send(bp, req);
7059
7060 if (rc)
7061 break;
7062
7063 bp->grp_info[grp_idx].fw_grp_id =
7064 le32_to_cpu(resp->ring_group_id);
7065 }
7066 hwrm_req_drop(bp, req);
7067 return rc;
7068 }
7069
bnxt_hwrm_ring_grp_free(struct bnxt * bp)7070 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7071 {
7072 struct hwrm_ring_grp_free_input *req;
7073 u16 i;
7074
7075 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7076 return;
7077
7078 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7079 return;
7080
7081 hwrm_req_hold(bp, req);
7082 for (i = 0; i < bp->cp_nr_rings; i++) {
7083 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7084 continue;
7085 req->ring_group_id =
7086 cpu_to_le32(bp->grp_info[i].fw_grp_id);
7087
7088 hwrm_req_send(bp, req);
7089 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7090 }
7091 hwrm_req_drop(bp, req);
7092 }
7093
bnxt_set_rx_ring_params_p5(struct bnxt * bp,u32 ring_type,struct hwrm_ring_alloc_input * req,struct bnxt_rx_ring_info * rxr,struct bnxt_ring_struct * ring)7094 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7095 struct hwrm_ring_alloc_input *req,
7096 struct bnxt_rx_ring_info *rxr,
7097 struct bnxt_ring_struct *ring)
7098 {
7099 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7100 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7101 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7102
7103 if (ring_type == HWRM_RING_ALLOC_AGG) {
7104 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7105 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7106 req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
7107 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7108 } else {
7109 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7110 if (NET_IP_ALIGN == 2)
7111 req->flags =
7112 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7113 }
7114 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7115 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7116 req->enables |= cpu_to_le32(enables);
7117 }
7118
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)7119 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7120 struct bnxt_rx_ring_info *rxr,
7121 struct bnxt_ring_struct *ring,
7122 u32 ring_type, u32 map_index)
7123 {
7124 struct hwrm_ring_alloc_output *resp;
7125 struct hwrm_ring_alloc_input *req;
7126 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7127 struct bnxt_ring_grp_info *grp_info;
7128 int rc, err = 0;
7129 u16 ring_id;
7130
7131 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7132 if (rc)
7133 goto exit;
7134
7135 req->enables = 0;
7136 if (rmem->nr_pages > 1) {
7137 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7138 /* Page size is in log2 units */
7139 req->page_size = BNXT_PAGE_SHIFT;
7140 req->page_tbl_depth = 1;
7141 } else {
7142 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
7143 }
7144 req->fbo = 0;
7145 /* Association of ring index with doorbell index and MSIX number */
7146 req->logical_id = cpu_to_le16(map_index);
7147
7148 switch (ring_type) {
7149 case HWRM_RING_ALLOC_TX: {
7150 struct bnxt_tx_ring_info *txr;
7151 u16 flags = 0;
7152
7153 txr = container_of(ring, struct bnxt_tx_ring_info,
7154 tx_ring_struct);
7155 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7156 /* Association of transmit ring with completion ring */
7157 grp_info = &bp->grp_info[ring->grp_idx];
7158 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7159 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7160 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7161 req->queue_id = cpu_to_le16(ring->queue_id);
7162 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7163 req->cmpl_coal_cnt =
7164 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7165 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7166 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7167 req->flags = cpu_to_le16(flags);
7168 break;
7169 }
7170 case HWRM_RING_ALLOC_RX:
7171 case HWRM_RING_ALLOC_AGG:
7172 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7173 req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7174 cpu_to_le32(bp->rx_ring_mask + 1) :
7175 cpu_to_le32(bp->rx_agg_ring_mask + 1);
7176 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7177 bnxt_set_rx_ring_params_p5(bp, ring_type, req,
7178 rxr, ring);
7179 break;
7180 case HWRM_RING_ALLOC_CMPL:
7181 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7182 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7183 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7184 /* Association of cp ring with nq */
7185 grp_info = &bp->grp_info[map_index];
7186 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7187 req->cq_handle = cpu_to_le64(ring->handle);
7188 req->enables |= cpu_to_le32(
7189 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7190 } else {
7191 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7192 }
7193 break;
7194 case HWRM_RING_ALLOC_NQ:
7195 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7196 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7197 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7198 break;
7199 default:
7200 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7201 ring_type);
7202 return -EINVAL;
7203 }
7204
7205 resp = hwrm_req_hold(bp, req);
7206 rc = hwrm_req_send(bp, req);
7207 err = le16_to_cpu(resp->error_code);
7208 ring_id = le16_to_cpu(resp->ring_id);
7209 hwrm_req_drop(bp, req);
7210
7211 exit:
7212 if (rc || err) {
7213 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7214 ring_type, rc, err);
7215 return -EIO;
7216 }
7217 ring->fw_ring_id = ring_id;
7218 return rc;
7219 }
7220
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7221 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7222 {
7223 int rc;
7224
7225 if (BNXT_PF(bp)) {
7226 struct hwrm_func_cfg_input *req;
7227
7228 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7229 if (rc)
7230 return rc;
7231
7232 req->fid = cpu_to_le16(0xffff);
7233 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7234 req->async_event_cr = cpu_to_le16(idx);
7235 return hwrm_req_send(bp, req);
7236 } else {
7237 struct hwrm_func_vf_cfg_input *req;
7238
7239 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7240 if (rc)
7241 return rc;
7242
7243 req->enables =
7244 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7245 req->async_event_cr = cpu_to_le16(idx);
7246 return hwrm_req_send(bp, req);
7247 }
7248 }
7249
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7250 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7251 u32 ring_type)
7252 {
7253 switch (ring_type) {
7254 case HWRM_RING_ALLOC_TX:
7255 db->db_ring_mask = bp->tx_ring_mask;
7256 break;
7257 case HWRM_RING_ALLOC_RX:
7258 db->db_ring_mask = bp->rx_ring_mask;
7259 break;
7260 case HWRM_RING_ALLOC_AGG:
7261 db->db_ring_mask = bp->rx_agg_ring_mask;
7262 break;
7263 case HWRM_RING_ALLOC_CMPL:
7264 case HWRM_RING_ALLOC_NQ:
7265 db->db_ring_mask = bp->cp_ring_mask;
7266 break;
7267 }
7268 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7269 db->db_epoch_mask = db->db_ring_mask + 1;
7270 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7271 }
7272 }
7273
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7274 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7275 u32 map_idx, u32 xid)
7276 {
7277 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7278 switch (ring_type) {
7279 case HWRM_RING_ALLOC_TX:
7280 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7281 break;
7282 case HWRM_RING_ALLOC_RX:
7283 case HWRM_RING_ALLOC_AGG:
7284 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7285 break;
7286 case HWRM_RING_ALLOC_CMPL:
7287 db->db_key64 = DBR_PATH_L2;
7288 break;
7289 case HWRM_RING_ALLOC_NQ:
7290 db->db_key64 = DBR_PATH_L2;
7291 break;
7292 }
7293 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7294
7295 if (bp->flags & BNXT_FLAG_CHIP_P7)
7296 db->db_key64 |= DBR_VALID;
7297
7298 db->doorbell = bp->bar1 + bp->db_offset;
7299 } else {
7300 db->doorbell = bp->bar1 + map_idx * 0x80;
7301 switch (ring_type) {
7302 case HWRM_RING_ALLOC_TX:
7303 db->db_key32 = DB_KEY_TX;
7304 break;
7305 case HWRM_RING_ALLOC_RX:
7306 case HWRM_RING_ALLOC_AGG:
7307 db->db_key32 = DB_KEY_RX;
7308 break;
7309 case HWRM_RING_ALLOC_CMPL:
7310 db->db_key32 = DB_KEY_CP;
7311 break;
7312 }
7313 }
7314 bnxt_set_db_mask(bp, db, ring_type);
7315 }
7316
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7317 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7318 struct bnxt_rx_ring_info *rxr)
7319 {
7320 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7321 struct bnxt_napi *bnapi = rxr->bnapi;
7322 u32 type = HWRM_RING_ALLOC_RX;
7323 u32 map_idx = bnapi->index;
7324 int rc;
7325
7326 rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7327 if (rc)
7328 return rc;
7329
7330 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7331 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7332
7333 return 0;
7334 }
7335
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7336 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7337 struct bnxt_rx_ring_info *rxr)
7338 {
7339 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7340 u32 type = HWRM_RING_ALLOC_AGG;
7341 u32 grp_idx = ring->grp_idx;
7342 u32 map_idx;
7343 int rc;
7344
7345 map_idx = grp_idx + bp->rx_nr_rings;
7346 rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7347 if (rc)
7348 return rc;
7349
7350 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7351 ring->fw_ring_id);
7352 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7353 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7354 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7355
7356 return 0;
7357 }
7358
bnxt_hwrm_cp_ring_alloc_p5(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7359 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7360 struct bnxt_cp_ring_info *cpr)
7361 {
7362 const u32 type = HWRM_RING_ALLOC_CMPL;
7363 struct bnxt_napi *bnapi = cpr->bnapi;
7364 struct bnxt_ring_struct *ring;
7365 u32 map_idx = bnapi->index;
7366 int rc;
7367
7368 ring = &cpr->cp_ring_struct;
7369 ring->handle = BNXT_SET_NQ_HDL(cpr);
7370 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7371 if (rc)
7372 return rc;
7373 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7374 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7375 return 0;
7376 }
7377
bnxt_hwrm_tx_ring_alloc(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u32 tx_idx)7378 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7379 struct bnxt_tx_ring_info *txr, u32 tx_idx)
7380 {
7381 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7382 const u32 type = HWRM_RING_ALLOC_TX;
7383 int rc;
7384
7385 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
7386 if (rc)
7387 return rc;
7388 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7389 return 0;
7390 }
7391
bnxt_hwrm_ring_alloc(struct bnxt * bp)7392 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7393 {
7394 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7395 int i, rc = 0;
7396 u32 type;
7397
7398 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7399 type = HWRM_RING_ALLOC_NQ;
7400 else
7401 type = HWRM_RING_ALLOC_CMPL;
7402 for (i = 0; i < bp->cp_nr_rings; i++) {
7403 struct bnxt_napi *bnapi = bp->bnapi[i];
7404 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7405 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7406 u32 map_idx = ring->map_idx;
7407 unsigned int vector;
7408
7409 vector = bp->irq_tbl[map_idx].vector;
7410 disable_irq_nosync(vector);
7411 rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7412 if (rc) {
7413 enable_irq(vector);
7414 goto err_out;
7415 }
7416 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7417 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7418 enable_irq(vector);
7419 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7420
7421 if (!i) {
7422 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7423 if (rc)
7424 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7425 }
7426 }
7427
7428 for (i = 0; i < bp->tx_nr_rings; i++) {
7429 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7430
7431 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7432 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7433 if (rc)
7434 goto err_out;
7435 }
7436 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7437 if (rc)
7438 goto err_out;
7439 }
7440
7441 for (i = 0; i < bp->rx_nr_rings; i++) {
7442 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7443
7444 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7445 if (rc)
7446 goto err_out;
7447 /* If we have agg rings, post agg buffers first. */
7448 if (!agg_rings)
7449 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7450 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7451 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7452 if (rc)
7453 goto err_out;
7454 }
7455 }
7456
7457 if (agg_rings) {
7458 for (i = 0; i < bp->rx_nr_rings; i++) {
7459 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7460 if (rc)
7461 goto err_out;
7462 }
7463 }
7464 err_out:
7465 return rc;
7466 }
7467
bnxt_cancel_dim(struct bnxt * bp)7468 static void bnxt_cancel_dim(struct bnxt *bp)
7469 {
7470 int i;
7471
7472 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7473 * if NAPI is enabled.
7474 */
7475 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7476 return;
7477
7478 /* Make sure NAPI sees that the VNIC is disabled */
7479 synchronize_net();
7480 for (i = 0; i < bp->rx_nr_rings; i++) {
7481 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7482 struct bnxt_napi *bnapi = rxr->bnapi;
7483
7484 cancel_work_sync(&bnapi->cp_ring.dim.work);
7485 }
7486 }
7487
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7488 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7489 struct bnxt_ring_struct *ring,
7490 u32 ring_type, int cmpl_ring_id)
7491 {
7492 struct hwrm_ring_free_output *resp;
7493 struct hwrm_ring_free_input *req;
7494 u16 error_code = 0;
7495 int rc;
7496
7497 if (BNXT_NO_FW_ACCESS(bp))
7498 return 0;
7499
7500 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7501 if (rc)
7502 goto exit;
7503
7504 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7505 req->ring_type = ring_type;
7506 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7507
7508 resp = hwrm_req_hold(bp, req);
7509 rc = hwrm_req_send(bp, req);
7510 error_code = le16_to_cpu(resp->error_code);
7511 hwrm_req_drop(bp, req);
7512 exit:
7513 if (rc || error_code) {
7514 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7515 ring_type, rc, error_code);
7516 return -EIO;
7517 }
7518 return 0;
7519 }
7520
bnxt_hwrm_tx_ring_free(struct bnxt * bp,struct bnxt_tx_ring_info * txr,bool close_path)7521 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7522 struct bnxt_tx_ring_info *txr,
7523 bool close_path)
7524 {
7525 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7526 u32 cmpl_ring_id;
7527
7528 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7529 return;
7530
7531 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7532 INVALID_HW_RING_ID;
7533 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7534 cmpl_ring_id);
7535 ring->fw_ring_id = INVALID_HW_RING_ID;
7536 }
7537
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7538 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7539 struct bnxt_rx_ring_info *rxr,
7540 bool close_path)
7541 {
7542 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7543 u32 grp_idx = rxr->bnapi->index;
7544 u32 cmpl_ring_id;
7545
7546 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7547 return;
7548
7549 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7550 hwrm_ring_free_send_msg(bp, ring,
7551 RING_FREE_REQ_RING_TYPE_RX,
7552 close_path ? cmpl_ring_id :
7553 INVALID_HW_RING_ID);
7554 ring->fw_ring_id = INVALID_HW_RING_ID;
7555 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7556 }
7557
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7558 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7559 struct bnxt_rx_ring_info *rxr,
7560 bool close_path)
7561 {
7562 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7563 u32 grp_idx = rxr->bnapi->index;
7564 u32 type, cmpl_ring_id;
7565
7566 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7567 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7568 else
7569 type = RING_FREE_REQ_RING_TYPE_RX;
7570
7571 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7572 return;
7573
7574 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7575 hwrm_ring_free_send_msg(bp, ring, type,
7576 close_path ? cmpl_ring_id :
7577 INVALID_HW_RING_ID);
7578 ring->fw_ring_id = INVALID_HW_RING_ID;
7579 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7580 }
7581
bnxt_hwrm_cp_ring_free(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7582 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7583 struct bnxt_cp_ring_info *cpr)
7584 {
7585 struct bnxt_ring_struct *ring;
7586
7587 ring = &cpr->cp_ring_struct;
7588 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7589 return;
7590
7591 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7592 INVALID_HW_RING_ID);
7593 ring->fw_ring_id = INVALID_HW_RING_ID;
7594 }
7595
bnxt_clear_one_cp_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7596 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7597 {
7598 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7599 int i, size = ring->ring_mem.page_size;
7600
7601 cpr->cp_raw_cons = 0;
7602 cpr->toggle = 0;
7603
7604 for (i = 0; i < bp->cp_nr_pages; i++)
7605 if (cpr->cp_desc_ring[i])
7606 memset(cpr->cp_desc_ring[i], 0, size);
7607 }
7608
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7609 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7610 {
7611 u32 type;
7612 int i;
7613
7614 if (!bp->bnapi)
7615 return;
7616
7617 for (i = 0; i < bp->tx_nr_rings; i++)
7618 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7619
7620 bnxt_cancel_dim(bp);
7621 for (i = 0; i < bp->rx_nr_rings; i++) {
7622 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7623 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7624 }
7625
7626 /* The completion rings are about to be freed. After that the
7627 * IRQ doorbell will not work anymore. So we need to disable
7628 * IRQ here.
7629 */
7630 bnxt_disable_int_sync(bp);
7631
7632 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7633 type = RING_FREE_REQ_RING_TYPE_NQ;
7634 else
7635 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7636 for (i = 0; i < bp->cp_nr_rings; i++) {
7637 struct bnxt_napi *bnapi = bp->bnapi[i];
7638 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7639 struct bnxt_ring_struct *ring;
7640 int j;
7641
7642 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7643 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7644
7645 ring = &cpr->cp_ring_struct;
7646 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7647 hwrm_ring_free_send_msg(bp, ring, type,
7648 INVALID_HW_RING_ID);
7649 ring->fw_ring_id = INVALID_HW_RING_ID;
7650 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7651 }
7652 }
7653 }
7654
7655 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7656 bool shared);
7657 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7658 bool shared);
7659
bnxt_hwrm_get_rings(struct bnxt * bp)7660 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7661 {
7662 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7663 struct hwrm_func_qcfg_output *resp;
7664 struct hwrm_func_qcfg_input *req;
7665 int rc;
7666
7667 if (bp->hwrm_spec_code < 0x10601)
7668 return 0;
7669
7670 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7671 if (rc)
7672 return rc;
7673
7674 req->fid = cpu_to_le16(0xffff);
7675 resp = hwrm_req_hold(bp, req);
7676 rc = hwrm_req_send(bp, req);
7677 if (rc) {
7678 hwrm_req_drop(bp, req);
7679 return rc;
7680 }
7681
7682 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7683 if (BNXT_NEW_RM(bp)) {
7684 u16 cp, stats;
7685
7686 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7687 hw_resc->resv_hw_ring_grps =
7688 le32_to_cpu(resp->alloc_hw_ring_grps);
7689 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7690 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7691 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7692 stats = le16_to_cpu(resp->alloc_stat_ctx);
7693 hw_resc->resv_irqs = cp;
7694 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7695 int rx = hw_resc->resv_rx_rings;
7696 int tx = hw_resc->resv_tx_rings;
7697
7698 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7699 rx >>= 1;
7700 if (cp < (rx + tx)) {
7701 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7702 if (rc)
7703 goto get_rings_exit;
7704 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7705 rx <<= 1;
7706 hw_resc->resv_rx_rings = rx;
7707 hw_resc->resv_tx_rings = tx;
7708 }
7709 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7710 hw_resc->resv_hw_ring_grps = rx;
7711 }
7712 hw_resc->resv_cp_rings = cp;
7713 hw_resc->resv_stat_ctxs = stats;
7714 }
7715 get_rings_exit:
7716 hwrm_req_drop(bp, req);
7717 return rc;
7718 }
7719
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7720 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7721 {
7722 struct hwrm_func_qcfg_output *resp;
7723 struct hwrm_func_qcfg_input *req;
7724 int rc;
7725
7726 if (bp->hwrm_spec_code < 0x10601)
7727 return 0;
7728
7729 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7730 if (rc)
7731 return rc;
7732
7733 req->fid = cpu_to_le16(fid);
7734 resp = hwrm_req_hold(bp, req);
7735 rc = hwrm_req_send(bp, req);
7736 if (!rc)
7737 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7738
7739 hwrm_req_drop(bp, req);
7740 return rc;
7741 }
7742
7743 static bool bnxt_rfs_supported(struct bnxt *bp);
7744
7745 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7746 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7747 {
7748 struct hwrm_func_cfg_input *req;
7749 u32 enables = 0;
7750
7751 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7752 return NULL;
7753
7754 req->fid = cpu_to_le16(0xffff);
7755 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7756 req->num_tx_rings = cpu_to_le16(hwr->tx);
7757 if (BNXT_NEW_RM(bp)) {
7758 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7759 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7760 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7761 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7762 enables |= hwr->cp_p5 ?
7763 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7764 } else {
7765 enables |= hwr->cp ?
7766 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7767 enables |= hwr->grp ?
7768 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7769 }
7770 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7771 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7772 0;
7773 req->num_rx_rings = cpu_to_le16(hwr->rx);
7774 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7775 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7776 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7777 req->num_msix = cpu_to_le16(hwr->cp);
7778 } else {
7779 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7780 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7781 }
7782 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7783 req->num_vnics = cpu_to_le16(hwr->vnic);
7784 }
7785 req->enables = cpu_to_le32(enables);
7786 return req;
7787 }
7788
7789 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7790 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7791 {
7792 struct hwrm_func_vf_cfg_input *req;
7793 u32 enables = 0;
7794
7795 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7796 return NULL;
7797
7798 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7799 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7800 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7801 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7802 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7803 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7804 enables |= hwr->cp_p5 ?
7805 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7806 } else {
7807 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7808 enables |= hwr->grp ?
7809 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7810 }
7811 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7812 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7813
7814 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7815 req->num_tx_rings = cpu_to_le16(hwr->tx);
7816 req->num_rx_rings = cpu_to_le16(hwr->rx);
7817 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7818 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7819 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7820 } else {
7821 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7822 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7823 }
7824 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7825 req->num_vnics = cpu_to_le16(hwr->vnic);
7826
7827 req->enables = cpu_to_le32(enables);
7828 return req;
7829 }
7830
7831 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7832 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7833 {
7834 struct hwrm_func_cfg_input *req;
7835 int rc;
7836
7837 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7838 if (!req)
7839 return -ENOMEM;
7840
7841 if (!req->enables) {
7842 hwrm_req_drop(bp, req);
7843 return 0;
7844 }
7845
7846 rc = hwrm_req_send(bp, req);
7847 if (rc)
7848 return rc;
7849
7850 if (bp->hwrm_spec_code < 0x10601)
7851 bp->hw_resc.resv_tx_rings = hwr->tx;
7852
7853 return bnxt_hwrm_get_rings(bp);
7854 }
7855
7856 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7857 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7858 {
7859 struct hwrm_func_vf_cfg_input *req;
7860 int rc;
7861
7862 if (!BNXT_NEW_RM(bp)) {
7863 bp->hw_resc.resv_tx_rings = hwr->tx;
7864 return 0;
7865 }
7866
7867 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7868 if (!req)
7869 return -ENOMEM;
7870
7871 rc = hwrm_req_send(bp, req);
7872 if (rc)
7873 return rc;
7874
7875 return bnxt_hwrm_get_rings(bp);
7876 }
7877
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7878 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7879 {
7880 if (BNXT_PF(bp))
7881 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7882 else
7883 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7884 }
7885
bnxt_nq_rings_in_use(struct bnxt * bp)7886 int bnxt_nq_rings_in_use(struct bnxt *bp)
7887 {
7888 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7889 }
7890
bnxt_cp_rings_in_use(struct bnxt * bp)7891 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7892 {
7893 int cp;
7894
7895 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7896 return bnxt_nq_rings_in_use(bp);
7897
7898 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7899 return cp;
7900 }
7901
bnxt_get_func_stat_ctxs(struct bnxt * bp)7902 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7903 {
7904 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7905 }
7906
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7907 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7908 {
7909 if (!hwr->grp)
7910 return 0;
7911 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7912 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7913
7914 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7915 rss_ctx *= hwr->vnic;
7916 return rss_ctx;
7917 }
7918 if (BNXT_VF(bp))
7919 return BNXT_VF_MAX_RSS_CTX;
7920 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7921 return hwr->grp + 1;
7922 return 1;
7923 }
7924
7925 /* Check if a default RSS map needs to be setup. This function is only
7926 * used on older firmware that does not require reserving RX rings.
7927 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)7928 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7929 {
7930 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7931
7932 /* The RSS map is valid for RX rings set to resv_rx_rings */
7933 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7934 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7935 if (!netif_is_rxfh_configured(bp->dev))
7936 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7937 }
7938 }
7939
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)7940 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7941 {
7942 if (bp->flags & BNXT_FLAG_RFS) {
7943 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7944 return 2 + bp->num_rss_ctx;
7945 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7946 return rx_rings + 1;
7947 }
7948 return 1;
7949 }
7950
bnxt_get_total_resources(struct bnxt * bp,struct bnxt_hw_rings * hwr)7951 static void bnxt_get_total_resources(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7952 {
7953 hwr->cp = bnxt_nq_rings_in_use(bp);
7954 hwr->cp_p5 = 0;
7955 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7956 hwr->cp_p5 = bnxt_cp_rings_in_use(bp);
7957 hwr->tx = bp->tx_nr_rings;
7958 hwr->rx = bp->rx_nr_rings;
7959 hwr->grp = hwr->rx;
7960 hwr->vnic = bnxt_get_total_vnics(bp, hwr->rx);
7961 hwr->rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr);
7962 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7963 hwr->rx <<= 1;
7964 hwr->stat = bnxt_get_func_stat_ctxs(bp);
7965 }
7966
bnxt_need_reserve_rings(struct bnxt * bp)7967 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7968 {
7969 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7970 struct bnxt_hw_rings hwr;
7971
7972 bnxt_get_total_resources(bp, &hwr);
7973
7974 /* Old firmware does not need RX ring reservations but we still
7975 * need to setup a default RSS map when needed. With new firmware
7976 * we go through RX ring reservations first and then set up the
7977 * RSS map for the successfully reserved RX rings when needed.
7978 */
7979 if (!BNXT_NEW_RM(bp))
7980 bnxt_check_rss_tbl_no_rmgr(bp);
7981
7982 if (hw_resc->resv_tx_rings != hwr.tx && bp->hwrm_spec_code >= 0x10601)
7983 return true;
7984
7985 if (!BNXT_NEW_RM(bp))
7986 return false;
7987
7988 if (hw_resc->resv_rx_rings != hwr.rx ||
7989 hw_resc->resv_vnics != hwr.vnic ||
7990 hw_resc->resv_stat_ctxs != hwr.stat ||
7991 hw_resc->resv_rsscos_ctxs != hwr.rss_ctx ||
7992 (hw_resc->resv_hw_ring_grps != hwr.grp &&
7993 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7994 return true;
7995 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7996 if (hw_resc->resv_cp_rings != hwr.cp_p5)
7997 return true;
7998 } else if (hw_resc->resv_cp_rings != hwr.cp) {
7999 return true;
8000 }
8001 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
8002 hw_resc->resv_irqs != hwr.cp)
8003 return true;
8004 return false;
8005 }
8006
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8007 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8008 {
8009 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8010
8011 hwr->tx = hw_resc->resv_tx_rings;
8012 if (BNXT_NEW_RM(bp)) {
8013 hwr->rx = hw_resc->resv_rx_rings;
8014 hwr->cp = hw_resc->resv_irqs;
8015 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8016 hwr->cp_p5 = hw_resc->resv_cp_rings;
8017 hwr->grp = hw_resc->resv_hw_ring_grps;
8018 hwr->vnic = hw_resc->resv_vnics;
8019 hwr->stat = hw_resc->resv_stat_ctxs;
8020 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
8021 }
8022 }
8023
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)8024 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8025 {
8026 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
8027 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
8028 }
8029
8030 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
8031
__bnxt_reserve_rings(struct bnxt * bp)8032 static int __bnxt_reserve_rings(struct bnxt *bp)
8033 {
8034 struct bnxt_hw_rings hwr = {0};
8035 int rx_rings, old_rx_rings, rc;
8036 int cp = bp->cp_nr_rings;
8037 int ulp_msix = 0;
8038 bool sh = false;
8039 int tx_cp;
8040
8041 if (!bnxt_need_reserve_rings(bp))
8042 return 0;
8043
8044 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
8045 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
8046 if (!ulp_msix)
8047 bnxt_set_ulp_stat_ctxs(bp, 0);
8048
8049 if (ulp_msix > bp->ulp_num_msix_want)
8050 ulp_msix = bp->ulp_num_msix_want;
8051 hwr.cp = cp + ulp_msix;
8052 } else {
8053 hwr.cp = bnxt_nq_rings_in_use(bp);
8054 }
8055
8056 hwr.tx = bp->tx_nr_rings;
8057 hwr.rx = bp->rx_nr_rings;
8058 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8059 sh = true;
8060 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8061 hwr.cp_p5 = hwr.rx + hwr.tx;
8062
8063 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
8064
8065 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8066 hwr.rx <<= 1;
8067 hwr.grp = bp->rx_nr_rings;
8068 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8069 hwr.stat = bnxt_get_func_stat_ctxs(bp);
8070 old_rx_rings = bp->hw_resc.resv_rx_rings;
8071
8072 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8073 if (rc)
8074 return rc;
8075
8076 bnxt_copy_reserved_rings(bp, &hwr);
8077
8078 rx_rings = hwr.rx;
8079 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8080 if (hwr.rx >= 2) {
8081 rx_rings = hwr.rx >> 1;
8082 } else {
8083 if (netif_running(bp->dev))
8084 return -ENOMEM;
8085
8086 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8087 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8088 bp->dev->hw_features &= ~NETIF_F_LRO;
8089 bp->dev->features &= ~NETIF_F_LRO;
8090 bnxt_set_ring_params(bp);
8091 }
8092 }
8093 rx_rings = min_t(int, rx_rings, hwr.grp);
8094 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8095 if (bnxt_ulp_registered(bp->edev) &&
8096 hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8097 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8098 hwr.cp = min_t(int, hwr.cp, hwr.stat);
8099 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8100 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8101 hwr.rx = rx_rings << 1;
8102 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8103 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8104 if (hwr.tx != bp->tx_nr_rings) {
8105 netdev_warn(bp->dev,
8106 "Able to reserve only %d out of %d requested TX rings\n",
8107 hwr.tx, bp->tx_nr_rings);
8108 }
8109 bp->tx_nr_rings = hwr.tx;
8110
8111 /* If we cannot reserve all the RX rings, reset the RSS map only
8112 * if absolutely necessary
8113 */
8114 if (rx_rings != bp->rx_nr_rings) {
8115 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8116 rx_rings, bp->rx_nr_rings);
8117 if (netif_is_rxfh_configured(bp->dev) &&
8118 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8119 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8120 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8121 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
8122 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
8123 }
8124 }
8125 bp->rx_nr_rings = rx_rings;
8126 bp->cp_nr_rings = hwr.cp;
8127
8128 /* Fall back if we cannot reserve enough HW RSS contexts */
8129 if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
8130 hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
8131 bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
8132
8133 if (!bnxt_rings_ok(bp, &hwr))
8134 return -ENOMEM;
8135
8136 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8137 !netif_is_rxfh_configured(bp->dev))
8138 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8139
8140 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8141 int resv_msix, resv_ctx, ulp_ctxs;
8142 struct bnxt_hw_resc *hw_resc;
8143
8144 hw_resc = &bp->hw_resc;
8145 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8146 ulp_msix = min_t(int, resv_msix, ulp_msix);
8147 bnxt_set_ulp_msix_num(bp, ulp_msix);
8148 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
8149 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8150 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8151 }
8152
8153 return rc;
8154 }
8155
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8156 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8157 {
8158 struct hwrm_func_vf_cfg_input *req;
8159 u32 flags;
8160
8161 if (!BNXT_NEW_RM(bp))
8162 return 0;
8163
8164 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8165 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8166 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8167 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8168 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8169 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8170 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8171 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8172 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8173
8174 req->flags = cpu_to_le32(flags);
8175 return hwrm_req_send_silent(bp, req);
8176 }
8177
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8178 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8179 {
8180 struct hwrm_func_cfg_input *req;
8181 u32 flags;
8182
8183 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8184 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8185 if (BNXT_NEW_RM(bp)) {
8186 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8187 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8188 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8189 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8190 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8191 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8192 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8193 else
8194 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8195 }
8196
8197 req->flags = cpu_to_le32(flags);
8198 return hwrm_req_send_silent(bp, req);
8199 }
8200
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8201 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8202 {
8203 if (bp->hwrm_spec_code < 0x10801)
8204 return 0;
8205
8206 if (BNXT_PF(bp))
8207 return bnxt_hwrm_check_pf_rings(bp, hwr);
8208
8209 return bnxt_hwrm_check_vf_rings(bp, hwr);
8210 }
8211
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8212 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8213 {
8214 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8215 struct hwrm_ring_aggint_qcaps_output *resp;
8216 struct hwrm_ring_aggint_qcaps_input *req;
8217 int rc;
8218
8219 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8220 coal_cap->num_cmpl_dma_aggr_max = 63;
8221 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8222 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8223 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8224 coal_cap->int_lat_tmr_min_max = 65535;
8225 coal_cap->int_lat_tmr_max_max = 65535;
8226 coal_cap->num_cmpl_aggr_int_max = 65535;
8227 coal_cap->timer_units = 80;
8228
8229 if (bp->hwrm_spec_code < 0x10902)
8230 return;
8231
8232 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8233 return;
8234
8235 resp = hwrm_req_hold(bp, req);
8236 rc = hwrm_req_send_silent(bp, req);
8237 if (!rc) {
8238 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8239 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8240 coal_cap->num_cmpl_dma_aggr_max =
8241 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8242 coal_cap->num_cmpl_dma_aggr_during_int_max =
8243 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8244 coal_cap->cmpl_aggr_dma_tmr_max =
8245 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8246 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8247 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8248 coal_cap->int_lat_tmr_min_max =
8249 le16_to_cpu(resp->int_lat_tmr_min_max);
8250 coal_cap->int_lat_tmr_max_max =
8251 le16_to_cpu(resp->int_lat_tmr_max_max);
8252 coal_cap->num_cmpl_aggr_int_max =
8253 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8254 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8255 }
8256 hwrm_req_drop(bp, req);
8257 }
8258
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8259 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8260 {
8261 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8262
8263 return usec * 1000 / coal_cap->timer_units;
8264 }
8265
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8266 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8267 struct bnxt_coal *hw_coal,
8268 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8269 {
8270 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8271 u16 val, tmr, max, flags = hw_coal->flags;
8272 u32 cmpl_params = coal_cap->cmpl_params;
8273
8274 max = hw_coal->bufs_per_record * 128;
8275 if (hw_coal->budget)
8276 max = hw_coal->bufs_per_record * hw_coal->budget;
8277 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8278
8279 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8280 req->num_cmpl_aggr_int = cpu_to_le16(val);
8281
8282 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8283 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8284
8285 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8286 coal_cap->num_cmpl_dma_aggr_during_int_max);
8287 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8288
8289 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8290 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8291 req->int_lat_tmr_max = cpu_to_le16(tmr);
8292
8293 /* min timer set to 1/2 of interrupt timer */
8294 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8295 val = tmr / 2;
8296 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8297 req->int_lat_tmr_min = cpu_to_le16(val);
8298 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8299 }
8300
8301 /* buf timer set to 1/4 of interrupt timer */
8302 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8303 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8304
8305 if (cmpl_params &
8306 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8307 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8308 val = clamp_t(u16, tmr, 1,
8309 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8310 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8311 req->enables |=
8312 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8313 }
8314
8315 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8316 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8317 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8318 req->flags = cpu_to_le16(flags);
8319 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8320 }
8321
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8322 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8323 struct bnxt_coal *hw_coal)
8324 {
8325 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8326 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8327 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8328 u32 nq_params = coal_cap->nq_params;
8329 u16 tmr;
8330 int rc;
8331
8332 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8333 return 0;
8334
8335 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8336 if (rc)
8337 return rc;
8338
8339 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8340 req->flags =
8341 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8342
8343 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8344 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8345 req->int_lat_tmr_min = cpu_to_le16(tmr);
8346 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8347 return hwrm_req_send(bp, req);
8348 }
8349
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8350 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8351 {
8352 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8353 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8354 struct bnxt_coal coal;
8355 int rc;
8356
8357 /* Tick values in micro seconds.
8358 * 1 coal_buf x bufs_per_record = 1 completion record.
8359 */
8360 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8361
8362 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8363 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8364
8365 if (!bnapi->rx_ring)
8366 return -ENODEV;
8367
8368 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8369 if (rc)
8370 return rc;
8371
8372 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8373
8374 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8375
8376 return hwrm_req_send(bp, req_rx);
8377 }
8378
8379 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8380 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8381 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8382 {
8383 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8384
8385 req->ring_id = cpu_to_le16(ring_id);
8386 return hwrm_req_send(bp, req);
8387 }
8388
8389 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8390 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8391 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8392 {
8393 struct bnxt_tx_ring_info *txr;
8394 int i, rc;
8395
8396 bnxt_for_each_napi_tx(i, bnapi, txr) {
8397 u16 ring_id;
8398
8399 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8400 req->ring_id = cpu_to_le16(ring_id);
8401 rc = hwrm_req_send(bp, req);
8402 if (rc)
8403 return rc;
8404 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8405 return 0;
8406 }
8407 return 0;
8408 }
8409
bnxt_hwrm_set_coal(struct bnxt * bp)8410 int bnxt_hwrm_set_coal(struct bnxt *bp)
8411 {
8412 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8413 int i, rc;
8414
8415 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8416 if (rc)
8417 return rc;
8418
8419 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8420 if (rc) {
8421 hwrm_req_drop(bp, req_rx);
8422 return rc;
8423 }
8424
8425 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8426 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8427
8428 hwrm_req_hold(bp, req_rx);
8429 hwrm_req_hold(bp, req_tx);
8430 for (i = 0; i < bp->cp_nr_rings; i++) {
8431 struct bnxt_napi *bnapi = bp->bnapi[i];
8432 struct bnxt_coal *hw_coal;
8433
8434 if (!bnapi->rx_ring)
8435 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8436 else
8437 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8438 if (rc)
8439 break;
8440
8441 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8442 continue;
8443
8444 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8445 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8446 if (rc)
8447 break;
8448 }
8449 if (bnapi->rx_ring)
8450 hw_coal = &bp->rx_coal;
8451 else
8452 hw_coal = &bp->tx_coal;
8453 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8454 }
8455 hwrm_req_drop(bp, req_rx);
8456 hwrm_req_drop(bp, req_tx);
8457 return rc;
8458 }
8459
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8460 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8461 {
8462 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8463 struct hwrm_stat_ctx_free_input *req;
8464 int i;
8465
8466 if (!bp->bnapi)
8467 return;
8468
8469 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8470 return;
8471
8472 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8473 return;
8474 if (BNXT_FW_MAJ(bp) <= 20) {
8475 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8476 hwrm_req_drop(bp, req);
8477 return;
8478 }
8479 hwrm_req_hold(bp, req0);
8480 }
8481 hwrm_req_hold(bp, req);
8482 for (i = 0; i < bp->cp_nr_rings; i++) {
8483 struct bnxt_napi *bnapi = bp->bnapi[i];
8484 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8485
8486 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8487 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8488 if (req0) {
8489 req0->stat_ctx_id = req->stat_ctx_id;
8490 hwrm_req_send(bp, req0);
8491 }
8492 hwrm_req_send(bp, req);
8493
8494 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8495 }
8496 }
8497 hwrm_req_drop(bp, req);
8498 if (req0)
8499 hwrm_req_drop(bp, req0);
8500 }
8501
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8502 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8503 {
8504 struct hwrm_stat_ctx_alloc_output *resp;
8505 struct hwrm_stat_ctx_alloc_input *req;
8506 int rc, i;
8507
8508 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8509 return 0;
8510
8511 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8512 if (rc)
8513 return rc;
8514
8515 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8516 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8517
8518 resp = hwrm_req_hold(bp, req);
8519 for (i = 0; i < bp->cp_nr_rings; i++) {
8520 struct bnxt_napi *bnapi = bp->bnapi[i];
8521 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8522
8523 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8524
8525 rc = hwrm_req_send(bp, req);
8526 if (rc)
8527 break;
8528
8529 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8530
8531 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8532 }
8533 hwrm_req_drop(bp, req);
8534 return rc;
8535 }
8536
bnxt_hwrm_func_qcfg(struct bnxt * bp)8537 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8538 {
8539 struct hwrm_func_qcfg_output *resp;
8540 struct hwrm_func_qcfg_input *req;
8541 u16 flags;
8542 int rc;
8543
8544 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8545 if (rc)
8546 return rc;
8547
8548 req->fid = cpu_to_le16(0xffff);
8549 resp = hwrm_req_hold(bp, req);
8550 rc = hwrm_req_send(bp, req);
8551 if (rc)
8552 goto func_qcfg_exit;
8553
8554 flags = le16_to_cpu(resp->flags);
8555 #ifdef CONFIG_BNXT_SRIOV
8556 if (BNXT_VF(bp)) {
8557 struct bnxt_vf_info *vf = &bp->vf;
8558
8559 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8560 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8561 vf->flags |= BNXT_VF_TRUST;
8562 else
8563 vf->flags &= ~BNXT_VF_TRUST;
8564 } else {
8565 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8566 }
8567 #endif
8568 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8569 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8570 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8571 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8572 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8573 }
8574 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8575 bp->flags |= BNXT_FLAG_MULTI_HOST;
8576
8577 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8578 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8579
8580 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8581 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8582 if (resp->roce_bidi_opt_mode &
8583 FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
8584 bp->cos0_cos1_shared = 1;
8585 else
8586 bp->cos0_cos1_shared = 0;
8587
8588 switch (resp->port_partition_type) {
8589 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8590 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8591 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8592 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8593 bp->port_partition_type = resp->port_partition_type;
8594 break;
8595 }
8596 if (bp->hwrm_spec_code < 0x10707 ||
8597 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8598 bp->br_mode = BRIDGE_MODE_VEB;
8599 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8600 bp->br_mode = BRIDGE_MODE_VEPA;
8601 else
8602 bp->br_mode = BRIDGE_MODE_UNDEF;
8603
8604 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8605 if (!bp->max_mtu)
8606 bp->max_mtu = BNXT_MAX_MTU;
8607
8608 if (bp->db_size)
8609 goto func_qcfg_exit;
8610
8611 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8612 if (BNXT_CHIP_P5(bp)) {
8613 if (BNXT_PF(bp))
8614 bp->db_offset = DB_PF_OFFSET_P5;
8615 else
8616 bp->db_offset = DB_VF_OFFSET_P5;
8617 }
8618 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8619 1024);
8620 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8621 bp->db_size <= bp->db_offset)
8622 bp->db_size = pci_resource_len(bp->pdev, 2);
8623
8624 func_qcfg_exit:
8625 hwrm_req_drop(bp, req);
8626 return rc;
8627 }
8628
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8629 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8630 u8 init_val, u8 init_offset,
8631 bool init_mask_set)
8632 {
8633 ctxm->init_value = init_val;
8634 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8635 if (init_mask_set)
8636 ctxm->init_offset = init_offset * 4;
8637 else
8638 ctxm->init_value = 0;
8639 }
8640
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8641 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8642 {
8643 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8644 u16 type;
8645
8646 for (type = 0; type < ctx_max; type++) {
8647 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8648 int n = 1;
8649
8650 if (!ctxm->max_entries || ctxm->pg_info)
8651 continue;
8652
8653 if (ctxm->instance_bmap)
8654 n = hweight32(ctxm->instance_bmap);
8655 ctxm->pg_info = kzalloc_objs(*ctxm->pg_info, n);
8656 if (!ctxm->pg_info)
8657 return -ENOMEM;
8658 }
8659 return 0;
8660 }
8661
8662 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8663 struct bnxt_ctx_mem_type *ctxm, bool force);
8664
8665 #define BNXT_CTX_INIT_VALID(flags) \
8666 (!!((flags) & \
8667 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8668
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8669 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8670 {
8671 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8672 struct hwrm_func_backing_store_qcaps_v2_input *req;
8673 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8674 u16 type;
8675 int rc;
8676
8677 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8678 if (rc)
8679 return rc;
8680
8681 if (!ctx) {
8682 ctx = kzalloc_obj(*ctx);
8683 if (!ctx)
8684 return -ENOMEM;
8685 bp->ctx = ctx;
8686 }
8687
8688 resp = hwrm_req_hold(bp, req);
8689
8690 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8691 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8692 u8 init_val, init_off, i;
8693 u32 max_entries;
8694 u16 entry_size;
8695 __le32 *p;
8696 u32 flags;
8697
8698 req->type = cpu_to_le16(type);
8699 rc = hwrm_req_send(bp, req);
8700 if (rc)
8701 goto ctx_done;
8702 flags = le32_to_cpu(resp->flags);
8703 type = le16_to_cpu(resp->next_valid_type);
8704 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8705 bnxt_free_one_ctx_mem(bp, ctxm, true);
8706 continue;
8707 }
8708 entry_size = le16_to_cpu(resp->entry_size);
8709 max_entries = le32_to_cpu(resp->max_num_entries);
8710 if (ctxm->mem_valid) {
8711 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8712 ctxm->entry_size != entry_size ||
8713 ctxm->max_entries != max_entries)
8714 bnxt_free_one_ctx_mem(bp, ctxm, true);
8715 else
8716 continue;
8717 }
8718 ctxm->type = le16_to_cpu(resp->type);
8719 ctxm->entry_size = entry_size;
8720 ctxm->flags = flags;
8721 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8722 ctxm->entry_multiple = resp->entry_multiple;
8723 ctxm->max_entries = max_entries;
8724 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8725 init_val = resp->ctx_init_value;
8726 init_off = resp->ctx_init_offset;
8727 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8728 BNXT_CTX_INIT_VALID(flags));
8729 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8730 BNXT_MAX_SPLIT_ENTRY);
8731 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8732 i++, p++)
8733 ctxm->split[i] = le32_to_cpu(*p);
8734 }
8735 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8736
8737 ctx_done:
8738 hwrm_req_drop(bp, req);
8739 return rc;
8740 }
8741
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8742 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8743 {
8744 struct hwrm_func_backing_store_qcaps_output *resp;
8745 struct hwrm_func_backing_store_qcaps_input *req;
8746 int rc;
8747
8748 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8749 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8750 return 0;
8751
8752 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8753 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8754
8755 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8756 if (rc)
8757 return rc;
8758
8759 resp = hwrm_req_hold(bp, req);
8760 rc = hwrm_req_send_silent(bp, req);
8761 if (!rc) {
8762 struct bnxt_ctx_mem_type *ctxm;
8763 struct bnxt_ctx_mem_info *ctx;
8764 u8 init_val, init_idx = 0;
8765 u16 init_mask;
8766
8767 ctx = bp->ctx;
8768 if (!ctx) {
8769 ctx = kzalloc_obj(*ctx);
8770 if (!ctx) {
8771 rc = -ENOMEM;
8772 goto ctx_err;
8773 }
8774 bp->ctx = ctx;
8775 }
8776 init_val = resp->ctx_kind_initializer;
8777 init_mask = le16_to_cpu(resp->ctx_init_mask);
8778
8779 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8780 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8781 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8782 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8783 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8784 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8785 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8786 (init_mask & (1 << init_idx++)) != 0);
8787
8788 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8789 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8790 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8791 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8792 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8793 (init_mask & (1 << init_idx++)) != 0);
8794
8795 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8796 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8797 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8798 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8799 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8800 (init_mask & (1 << init_idx++)) != 0);
8801
8802 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8803 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8804 ctxm->max_entries = ctxm->vnic_entries +
8805 le16_to_cpu(resp->vnic_max_ring_table_entries);
8806 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8807 bnxt_init_ctx_initializer(ctxm, init_val,
8808 resp->vnic_init_offset,
8809 (init_mask & (1 << init_idx++)) != 0);
8810
8811 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8812 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8813 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8814 bnxt_init_ctx_initializer(ctxm, init_val,
8815 resp->stat_init_offset,
8816 (init_mask & (1 << init_idx++)) != 0);
8817
8818 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8819 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8820 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8821 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8822 ctxm->entry_multiple = resp->tqm_entries_multiple;
8823 if (!ctxm->entry_multiple)
8824 ctxm->entry_multiple = 1;
8825
8826 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8827
8828 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8829 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8830 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8831 ctxm->mrav_num_entries_units =
8832 le16_to_cpu(resp->mrav_num_entries_units);
8833 bnxt_init_ctx_initializer(ctxm, init_val,
8834 resp->mrav_init_offset,
8835 (init_mask & (1 << init_idx++)) != 0);
8836
8837 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8838 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8839 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8840
8841 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8842 if (!ctx->tqm_fp_rings_count)
8843 ctx->tqm_fp_rings_count = bp->max_q;
8844 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8845 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8846
8847 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8848 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8849 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8850
8851 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8852 } else {
8853 rc = 0;
8854 }
8855 ctx_err:
8856 hwrm_req_drop(bp, req);
8857 return rc;
8858 }
8859
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8860 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8861 __le64 *pg_dir)
8862 {
8863 if (!rmem->nr_pages)
8864 return;
8865
8866 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8867 if (rmem->depth >= 1) {
8868 if (rmem->depth == 2)
8869 *pg_attr |= 2;
8870 else
8871 *pg_attr |= 1;
8872 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8873 } else {
8874 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8875 }
8876 }
8877
8878 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8879 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8880 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8881 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8882 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8883 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8884
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8885 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8886 {
8887 struct hwrm_func_backing_store_cfg_input *req;
8888 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8889 struct bnxt_ctx_pg_info *ctx_pg;
8890 struct bnxt_ctx_mem_type *ctxm;
8891 void **__req = (void **)&req;
8892 u32 req_len = sizeof(*req);
8893 __le32 *num_entries;
8894 __le64 *pg_dir;
8895 u32 flags = 0;
8896 u8 *pg_attr;
8897 u32 ena;
8898 int rc;
8899 int i;
8900
8901 if (!ctx)
8902 return 0;
8903
8904 if (req_len > bp->hwrm_max_ext_req_len)
8905 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8906 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8907 if (rc)
8908 return rc;
8909
8910 req->enables = cpu_to_le32(enables);
8911 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8912 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8913 ctx_pg = ctxm->pg_info;
8914 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8915 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8916 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8917 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8918 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8919 &req->qpc_pg_size_qpc_lvl,
8920 &req->qpc_page_dir);
8921
8922 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8923 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8924 }
8925 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8926 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8927 ctx_pg = ctxm->pg_info;
8928 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8929 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8930 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8931 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8932 &req->srq_pg_size_srq_lvl,
8933 &req->srq_page_dir);
8934 }
8935 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8936 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8937 ctx_pg = ctxm->pg_info;
8938 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8939 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8940 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8941 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8942 &req->cq_pg_size_cq_lvl,
8943 &req->cq_page_dir);
8944 }
8945 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8946 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8947 ctx_pg = ctxm->pg_info;
8948 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8949 req->vnic_num_ring_table_entries =
8950 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8951 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8952 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8953 &req->vnic_pg_size_vnic_lvl,
8954 &req->vnic_page_dir);
8955 }
8956 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8957 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8958 ctx_pg = ctxm->pg_info;
8959 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8960 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8961 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8962 &req->stat_pg_size_stat_lvl,
8963 &req->stat_page_dir);
8964 }
8965 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8966 u32 units;
8967
8968 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8969 ctx_pg = ctxm->pg_info;
8970 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8971 units = ctxm->mrav_num_entries_units;
8972 if (units) {
8973 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8974 u32 entries;
8975
8976 num_mr = ctx_pg->entries - num_ah;
8977 entries = ((num_mr / units) << 16) | (num_ah / units);
8978 req->mrav_num_entries = cpu_to_le32(entries);
8979 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8980 }
8981 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8982 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8983 &req->mrav_pg_size_mrav_lvl,
8984 &req->mrav_page_dir);
8985 }
8986 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8987 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8988 ctx_pg = ctxm->pg_info;
8989 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8990 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8991 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8992 &req->tim_pg_size_tim_lvl,
8993 &req->tim_page_dir);
8994 }
8995 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8996 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8997 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8998 pg_dir = &req->tqm_sp_page_dir,
8999 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
9000 ctx_pg = ctxm->pg_info;
9001 i < BNXT_MAX_TQM_RINGS;
9002 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
9003 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
9004 if (!(enables & ena))
9005 continue;
9006
9007 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
9008 *num_entries = cpu_to_le32(ctx_pg->entries);
9009 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
9010 }
9011 req->flags = cpu_to_le32(flags);
9012 return hwrm_req_send(bp, req);
9013 }
9014
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9015 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
9016 struct bnxt_ctx_pg_info *ctx_pg)
9017 {
9018 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9019
9020 rmem->page_size = BNXT_PAGE_SIZE;
9021 rmem->pg_arr = ctx_pg->ctx_pg_arr;
9022 rmem->dma_arr = ctx_pg->ctx_dma_arr;
9023 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
9024 if (rmem->depth >= 1)
9025 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
9026 return bnxt_alloc_ring(bp, rmem);
9027 }
9028
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)9029 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
9030 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
9031 u8 depth, struct bnxt_ctx_mem_type *ctxm)
9032 {
9033 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9034 int rc;
9035
9036 if (!mem_size)
9037 return -EINVAL;
9038
9039 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9040 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
9041 ctx_pg->nr_pages = 0;
9042 return -EINVAL;
9043 }
9044 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
9045 int nr_tbls, i;
9046
9047 rmem->depth = 2;
9048 ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES);
9049 if (!ctx_pg->ctx_pg_tbl)
9050 return -ENOMEM;
9051 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
9052 rmem->nr_pages = nr_tbls;
9053 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9054 if (rc)
9055 return rc;
9056 for (i = 0; i < nr_tbls; i++) {
9057 struct bnxt_ctx_pg_info *pg_tbl;
9058
9059 pg_tbl = kzalloc_obj(*pg_tbl);
9060 if (!pg_tbl)
9061 return -ENOMEM;
9062 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
9063 rmem = &pg_tbl->ring_mem;
9064 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
9065 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
9066 rmem->depth = 1;
9067 rmem->nr_pages = MAX_CTX_PAGES;
9068 rmem->ctx_mem = ctxm;
9069 if (i == (nr_tbls - 1)) {
9070 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
9071
9072 if (rem)
9073 rmem->nr_pages = rem;
9074 }
9075 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
9076 if (rc)
9077 break;
9078 }
9079 } else {
9080 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9081 if (rmem->nr_pages > 1 || depth)
9082 rmem->depth = 1;
9083 rmem->ctx_mem = ctxm;
9084 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9085 }
9086 return rc;
9087 }
9088
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)9089 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9090 struct bnxt_ctx_pg_info *ctx_pg,
9091 void *buf, size_t offset, size_t head,
9092 size_t tail)
9093 {
9094 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9095 size_t nr_pages = ctx_pg->nr_pages;
9096 int page_size = rmem->page_size;
9097 size_t len = 0, total_len = 0;
9098 u16 depth = rmem->depth;
9099
9100 tail %= nr_pages * page_size;
9101 do {
9102 if (depth > 1) {
9103 int i = head / (page_size * MAX_CTX_PAGES);
9104 struct bnxt_ctx_pg_info *pg_tbl;
9105
9106 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9107 rmem = &pg_tbl->ring_mem;
9108 }
9109 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9110 head += len;
9111 offset += len;
9112 total_len += len;
9113 if (head >= nr_pages * page_size)
9114 head = 0;
9115 } while (head != tail);
9116 return total_len;
9117 }
9118
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9119 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9120 struct bnxt_ctx_pg_info *ctx_pg)
9121 {
9122 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9123
9124 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9125 ctx_pg->ctx_pg_tbl) {
9126 int i, nr_tbls = rmem->nr_pages;
9127
9128 for (i = 0; i < nr_tbls; i++) {
9129 struct bnxt_ctx_pg_info *pg_tbl;
9130 struct bnxt_ring_mem_info *rmem2;
9131
9132 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9133 if (!pg_tbl)
9134 continue;
9135 rmem2 = &pg_tbl->ring_mem;
9136 bnxt_free_ring(bp, rmem2);
9137 ctx_pg->ctx_pg_arr[i] = NULL;
9138 kfree(pg_tbl);
9139 ctx_pg->ctx_pg_tbl[i] = NULL;
9140 }
9141 kfree(ctx_pg->ctx_pg_tbl);
9142 ctx_pg->ctx_pg_tbl = NULL;
9143 }
9144 bnxt_free_ring(bp, rmem);
9145 ctx_pg->nr_pages = 0;
9146 }
9147
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)9148 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9149 struct bnxt_ctx_mem_type *ctxm, u32 entries,
9150 u8 pg_lvl)
9151 {
9152 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9153 int i, rc = 0, n = 1;
9154 u32 mem_size;
9155
9156 if (!ctxm->entry_size || !ctx_pg)
9157 return -EINVAL;
9158 if (ctxm->instance_bmap)
9159 n = hweight32(ctxm->instance_bmap);
9160 if (ctxm->entry_multiple)
9161 entries = roundup(entries, ctxm->entry_multiple);
9162 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9163 mem_size = entries * ctxm->entry_size;
9164 for (i = 0; i < n && !rc; i++) {
9165 ctx_pg[i].entries = entries;
9166 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9167 ctxm->init_value ? ctxm : NULL);
9168 }
9169 if (!rc)
9170 ctxm->mem_valid = 1;
9171 return rc;
9172 }
9173
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)9174 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9175 struct bnxt_ctx_mem_type *ctxm,
9176 bool last)
9177 {
9178 struct hwrm_func_backing_store_cfg_v2_input *req;
9179 u32 instance_bmap = ctxm->instance_bmap;
9180 int i, j, rc = 0, n = 1;
9181 __le32 *p;
9182
9183 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9184 return 0;
9185
9186 if (instance_bmap)
9187 n = hweight32(ctxm->instance_bmap);
9188 else
9189 instance_bmap = 1;
9190
9191 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9192 if (rc)
9193 return rc;
9194 hwrm_req_hold(bp, req);
9195 req->type = cpu_to_le16(ctxm->type);
9196 req->entry_size = cpu_to_le16(ctxm->entry_size);
9197 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9198 bnxt_bs_trace_avail(bp, ctxm->type)) {
9199 struct bnxt_bs_trace_info *bs_trace;
9200 u32 enables;
9201
9202 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9203 req->enables = cpu_to_le32(enables);
9204 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9205 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9206 }
9207 req->subtype_valid_cnt = ctxm->split_entry_cnt;
9208 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9209 p[i] = cpu_to_le32(ctxm->split[i]);
9210 for (i = 0, j = 0; j < n && !rc; i++) {
9211 struct bnxt_ctx_pg_info *ctx_pg;
9212
9213 if (!(instance_bmap & (1 << i)))
9214 continue;
9215 req->instance = cpu_to_le16(i);
9216 ctx_pg = &ctxm->pg_info[j++];
9217 if (!ctx_pg->entries)
9218 continue;
9219 req->num_entries = cpu_to_le32(ctx_pg->entries);
9220 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9221 &req->page_size_pbl_level,
9222 &req->page_dir);
9223 if (last && j == n)
9224 req->flags =
9225 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9226 rc = hwrm_req_send(bp, req);
9227 }
9228 hwrm_req_drop(bp, req);
9229 return rc;
9230 }
9231
bnxt_backing_store_cfg_v2(struct bnxt * bp)9232 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9233 {
9234 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9235 struct bnxt_ctx_mem_type *ctxm;
9236 u16 last_type = BNXT_CTX_INV;
9237 int rc = 0;
9238 u16 type;
9239
9240 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9241 ctxm = &ctx->ctx_arr[type];
9242 if (!bnxt_bs_trace_avail(bp, type))
9243 continue;
9244 if (!ctxm->mem_valid) {
9245 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9246 ctxm->max_entries, 1);
9247 if (rc) {
9248 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9249 type);
9250 continue;
9251 }
9252 bnxt_bs_trace_init(bp, ctxm);
9253 }
9254 last_type = type;
9255 }
9256
9257 if (last_type == BNXT_CTX_INV) {
9258 for (type = 0; type < BNXT_CTX_MAX; type++) {
9259 ctxm = &ctx->ctx_arr[type];
9260 if (ctxm->mem_valid)
9261 last_type = type;
9262 }
9263 if (last_type == BNXT_CTX_INV)
9264 return 0;
9265 }
9266 ctx->ctx_arr[last_type].last = 1;
9267
9268 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9269 ctxm = &ctx->ctx_arr[type];
9270
9271 if (!ctxm->mem_valid)
9272 continue;
9273 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9274 if (rc)
9275 return rc;
9276 }
9277 return 0;
9278 }
9279
9280 /**
9281 * __bnxt_copy_ctx_mem - copy host context memory
9282 * @bp: The driver context
9283 * @ctxm: The pointer to the context memory type
9284 * @buf: The destination buffer or NULL to just obtain the length
9285 * @offset: The buffer offset to copy the data to
9286 * @head: The head offset of context memory to copy from
9287 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9288 *
9289 * This function is called for debugging purposes to dump the host context
9290 * used by the chip.
9291 *
9292 * Return: Length of memory copied
9293 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9294 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9295 struct bnxt_ctx_mem_type *ctxm, void *buf,
9296 size_t offset, size_t head, size_t tail)
9297 {
9298 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9299 size_t len = 0, total_len = 0;
9300 int i, n = 1;
9301
9302 if (!ctx_pg)
9303 return 0;
9304
9305 if (ctxm->instance_bmap)
9306 n = hweight32(ctxm->instance_bmap);
9307 for (i = 0; i < n; i++) {
9308 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9309 tail);
9310 offset += len;
9311 total_len += len;
9312 }
9313 return total_len;
9314 }
9315
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9316 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9317 void *buf, size_t offset)
9318 {
9319 size_t tail = ctxm->max_entries * ctxm->entry_size;
9320
9321 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9322 }
9323
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9324 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9325 struct bnxt_ctx_mem_type *ctxm, bool force)
9326 {
9327 struct bnxt_ctx_pg_info *ctx_pg;
9328 int i, n = 1;
9329
9330 ctxm->last = 0;
9331
9332 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9333 return;
9334
9335 ctx_pg = ctxm->pg_info;
9336 if (ctx_pg) {
9337 if (ctxm->instance_bmap)
9338 n = hweight32(ctxm->instance_bmap);
9339 for (i = 0; i < n; i++)
9340 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9341
9342 kfree(ctx_pg);
9343 ctxm->pg_info = NULL;
9344 ctxm->mem_valid = 0;
9345 }
9346 memset(ctxm, 0, sizeof(*ctxm));
9347 }
9348
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9349 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9350 {
9351 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9352 u16 type;
9353
9354 if (!ctx)
9355 return;
9356
9357 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9358 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9359
9360 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9361 if (force) {
9362 kfree(ctx);
9363 bp->ctx = NULL;
9364 }
9365 }
9366
bnxt_alloc_ctx_mem(struct bnxt * bp)9367 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9368 {
9369 struct bnxt_ctx_mem_type *ctxm;
9370 struct bnxt_ctx_mem_info *ctx;
9371 u32 l2_qps, qp1_qps, max_qps;
9372 u32 ena, entries_sp, entries;
9373 u32 srqs, max_srqs, min;
9374 u32 num_mr, num_ah;
9375 u32 extra_srqs = 0;
9376 u32 extra_qps = 0;
9377 u32 fast_qpmd_qps;
9378 u8 pg_lvl = 1;
9379 int i, rc;
9380
9381 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9382 if (rc) {
9383 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9384 rc);
9385 return rc;
9386 }
9387 ctx = bp->ctx;
9388 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9389 return 0;
9390
9391 ena = 0;
9392 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9393 goto skip_legacy;
9394
9395 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9396 l2_qps = ctxm->qp_l2_entries;
9397 qp1_qps = ctxm->qp_qp1_entries;
9398 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9399 max_qps = ctxm->max_entries;
9400 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9401 srqs = ctxm->srq_l2_entries;
9402 max_srqs = ctxm->max_entries;
9403 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9404 pg_lvl = 2;
9405 if (BNXT_SW_RES_LMT(bp)) {
9406 extra_qps = max_qps - l2_qps - qp1_qps;
9407 extra_srqs = max_srqs - srqs;
9408 } else {
9409 extra_qps = min_t(u32, 65536,
9410 max_qps - l2_qps - qp1_qps);
9411 /* allocate extra qps if fw supports RoCE fast qp
9412 * destroy feature
9413 */
9414 extra_qps += fast_qpmd_qps;
9415 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9416 }
9417 if (fast_qpmd_qps)
9418 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9419 }
9420
9421 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9422 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9423 pg_lvl);
9424 if (rc)
9425 return rc;
9426
9427 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9428 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9429 if (rc)
9430 return rc;
9431
9432 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9433 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9434 extra_qps * 2, pg_lvl);
9435 if (rc)
9436 return rc;
9437
9438 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9439 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9440 if (rc)
9441 return rc;
9442
9443 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9444 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9445 if (rc)
9446 return rc;
9447
9448 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9449 goto skip_rdma;
9450
9451 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9452 if (BNXT_SW_RES_LMT(bp) &&
9453 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9454 num_ah = ctxm->mrav_av_entries;
9455 num_mr = ctxm->max_entries - num_ah;
9456 } else {
9457 /* 128K extra is needed to accommodate static AH context
9458 * allocation by f/w.
9459 */
9460 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9461 num_ah = min_t(u32, num_mr, 1024 * 128);
9462 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9463 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9464 ctxm->mrav_av_entries = num_ah;
9465 }
9466
9467 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9468 if (rc)
9469 return rc;
9470 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9471
9472 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9473 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9474 if (rc)
9475 return rc;
9476 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9477
9478 skip_rdma:
9479 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9480 min = ctxm->min_entries;
9481 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9482 2 * (extra_qps + qp1_qps) + min;
9483 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9484 if (rc)
9485 return rc;
9486
9487 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9488 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9489 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9490 if (rc)
9491 return rc;
9492 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9493 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9494 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9495
9496 skip_legacy:
9497 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9498 rc = bnxt_backing_store_cfg_v2(bp);
9499 else
9500 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9501 if (rc) {
9502 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9503 rc);
9504 return rc;
9505 }
9506 ctx->flags |= BNXT_CTX_FLAG_INITED;
9507 return 0;
9508 }
9509
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9510 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9511 {
9512 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9513 u16 page_attr;
9514 int rc;
9515
9516 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9517 return 0;
9518
9519 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9520 if (rc)
9521 return rc;
9522
9523 if (BNXT_PAGE_SIZE == 0x2000)
9524 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9525 else if (BNXT_PAGE_SIZE == 0x10000)
9526 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9527 else
9528 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9529 req->pg_size_lvl = cpu_to_le16(page_attr |
9530 bp->fw_crash_mem->ring_mem.depth);
9531 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9532 req->size = cpu_to_le32(bp->fw_crash_len);
9533 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9534 return hwrm_req_send(bp, req);
9535 }
9536
bnxt_free_crash_dump_mem(struct bnxt * bp)9537 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9538 {
9539 if (bp->fw_crash_mem) {
9540 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9541 kfree(bp->fw_crash_mem);
9542 bp->fw_crash_mem = NULL;
9543 }
9544 }
9545
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9546 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9547 {
9548 u32 mem_size = 0;
9549 int rc;
9550
9551 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9552 return 0;
9553
9554 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9555 if (rc)
9556 return rc;
9557
9558 mem_size = round_up(mem_size, 4);
9559
9560 /* keep and use the existing pages */
9561 if (bp->fw_crash_mem &&
9562 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9563 goto alloc_done;
9564
9565 if (bp->fw_crash_mem)
9566 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9567 else
9568 bp->fw_crash_mem = kzalloc_obj(*bp->fw_crash_mem);
9569 if (!bp->fw_crash_mem)
9570 return -ENOMEM;
9571
9572 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9573 if (rc) {
9574 bnxt_free_crash_dump_mem(bp);
9575 return rc;
9576 }
9577
9578 alloc_done:
9579 bp->fw_crash_len = mem_size;
9580 return 0;
9581 }
9582
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9583 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9584 {
9585 struct hwrm_func_resource_qcaps_output *resp;
9586 struct hwrm_func_resource_qcaps_input *req;
9587 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9588 int rc;
9589
9590 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9591 if (rc)
9592 return rc;
9593
9594 req->fid = cpu_to_le16(0xffff);
9595 resp = hwrm_req_hold(bp, req);
9596 rc = hwrm_req_send_silent(bp, req);
9597 if (rc)
9598 goto hwrm_func_resc_qcaps_exit;
9599
9600 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9601 if (!all)
9602 goto hwrm_func_resc_qcaps_exit;
9603
9604 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9605 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9606 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9607 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9608 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9609 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9610 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9611 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9612 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9613 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9614 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9615 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9616 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9617 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9618 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9619 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9620
9621 if (hw_resc->max_rsscos_ctxs >=
9622 hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
9623 bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
9624
9625 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9626 u16 max_msix = le16_to_cpu(resp->max_msix);
9627
9628 hw_resc->max_nqs = max_msix;
9629 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9630 }
9631
9632 if (BNXT_PF(bp)) {
9633 struct bnxt_pf_info *pf = &bp->pf;
9634
9635 pf->vf_resv_strategy =
9636 le16_to_cpu(resp->vf_reservation_strategy);
9637 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9638 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9639 }
9640 hwrm_func_resc_qcaps_exit:
9641 hwrm_req_drop(bp, req);
9642 return rc;
9643 }
9644
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9645 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9646 {
9647 struct hwrm_port_mac_ptp_qcfg_output *resp;
9648 struct hwrm_port_mac_ptp_qcfg_input *req;
9649 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9650 u8 flags;
9651 int rc;
9652
9653 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9654 rc = -ENODEV;
9655 goto no_ptp;
9656 }
9657
9658 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9659 if (rc)
9660 goto no_ptp;
9661
9662 req->port_id = cpu_to_le16(bp->pf.port_id);
9663 resp = hwrm_req_hold(bp, req);
9664 rc = hwrm_req_send(bp, req);
9665 if (rc)
9666 goto exit;
9667
9668 flags = resp->flags;
9669 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9670 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9671 rc = -ENODEV;
9672 goto exit;
9673 }
9674 if (!ptp) {
9675 ptp = kzalloc_obj(*ptp);
9676 if (!ptp) {
9677 rc = -ENOMEM;
9678 goto exit;
9679 }
9680 ptp->bp = bp;
9681 bp->ptp_cfg = ptp;
9682 }
9683
9684 if (flags &
9685 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9686 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9687 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9688 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9689 } else if (BNXT_CHIP_P5(bp)) {
9690 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9691 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9692 } else {
9693 rc = -ENODEV;
9694 goto exit;
9695 }
9696 ptp->rtc_configured =
9697 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9698 rc = bnxt_ptp_init(bp);
9699 if (rc)
9700 netdev_warn(bp->dev, "PTP initialization failed.\n");
9701 exit:
9702 hwrm_req_drop(bp, req);
9703 if (!rc)
9704 return 0;
9705
9706 no_ptp:
9707 bnxt_ptp_clear(bp);
9708 kfree(ptp);
9709 bp->ptp_cfg = NULL;
9710 return rc;
9711 }
9712
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9713 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9714 {
9715 u32 flags, flags_ext, flags_ext2, flags_ext3;
9716 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9717 struct hwrm_func_qcaps_output *resp;
9718 struct hwrm_func_qcaps_input *req;
9719 int rc;
9720
9721 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9722 if (rc)
9723 return rc;
9724
9725 req->fid = cpu_to_le16(0xffff);
9726 resp = hwrm_req_hold(bp, req);
9727 rc = hwrm_req_send(bp, req);
9728 if (rc)
9729 goto hwrm_func_qcaps_exit;
9730
9731 flags = le32_to_cpu(resp->flags);
9732 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9733 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9734 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9735 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9736 if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
9737 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
9738 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9739 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9740 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9741 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9742 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9743 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9744 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9745 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9746 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9747 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9748 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9749 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9750 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9751 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9752
9753 flags_ext = le32_to_cpu(resp->flags_ext);
9754 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9755 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9756 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9757 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9758 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
9759 bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
9760 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9761 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9762 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9763 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9764 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9765 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9766 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9767 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9768 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9769 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9770 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9771 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9772 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9773 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9774
9775 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9776 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9777 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9778 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9779 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9780 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9781 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9782 if (flags_ext2 &
9783 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9784 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9785 if (BNXT_PF(bp) &&
9786 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9787 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9788
9789 flags_ext3 = le32_to_cpu(resp->flags_ext3);
9790 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9791 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9792 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9793 bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9794
9795 bp->tx_push_thresh = 0;
9796 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9797 BNXT_FW_MAJ(bp) > 217)
9798 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9799
9800 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9801 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9802 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9803 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9804 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9805 if (!hw_resc->max_hw_ring_grps)
9806 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9807 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9808 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9809 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9810
9811 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9812 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9813 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9814 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9815 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9816 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9817
9818 if (BNXT_PF(bp)) {
9819 struct bnxt_pf_info *pf = &bp->pf;
9820
9821 pf->fw_fid = le16_to_cpu(resp->fid);
9822 pf->port_id = le16_to_cpu(resp->port_id);
9823 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9824 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9825 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9826 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9827 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9828 bp->flags |= BNXT_FLAG_WOL_CAP;
9829 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9830 bp->fw_cap |= BNXT_FW_CAP_PTP;
9831 } else {
9832 bnxt_ptp_clear(bp);
9833 kfree(bp->ptp_cfg);
9834 bp->ptp_cfg = NULL;
9835 }
9836 } else {
9837 #ifdef CONFIG_BNXT_SRIOV
9838 struct bnxt_vf_info *vf = &bp->vf;
9839
9840 vf->fw_fid = le16_to_cpu(resp->fid);
9841 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9842 #endif
9843 }
9844 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9845
9846 hwrm_func_qcaps_exit:
9847 hwrm_req_drop(bp, req);
9848 return rc;
9849 }
9850
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9851 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9852 {
9853 struct hwrm_dbg_qcaps_output *resp;
9854 struct hwrm_dbg_qcaps_input *req;
9855 int rc;
9856
9857 bp->fw_dbg_cap = 0;
9858 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9859 return;
9860
9861 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9862 if (rc)
9863 return;
9864
9865 req->fid = cpu_to_le16(0xffff);
9866 resp = hwrm_req_hold(bp, req);
9867 rc = hwrm_req_send(bp, req);
9868 if (rc)
9869 goto hwrm_dbg_qcaps_exit;
9870
9871 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9872
9873 hwrm_dbg_qcaps_exit:
9874 hwrm_req_drop(bp, req);
9875 }
9876
9877 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9878
bnxt_hwrm_func_qcaps(struct bnxt * bp)9879 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9880 {
9881 int rc;
9882
9883 rc = __bnxt_hwrm_func_qcaps(bp);
9884 if (rc)
9885 return rc;
9886
9887 bnxt_hwrm_dbg_qcaps(bp);
9888
9889 rc = bnxt_hwrm_queue_qportcfg(bp);
9890 if (rc) {
9891 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9892 return rc;
9893 }
9894 if (bp->hwrm_spec_code >= 0x10803) {
9895 rc = bnxt_alloc_ctx_mem(bp);
9896 if (rc)
9897 return rc;
9898 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9899 if (!rc)
9900 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9901 }
9902 return 0;
9903 }
9904
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9905 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9906 {
9907 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9908 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9909 u32 flags;
9910 int rc;
9911
9912 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9913 return 0;
9914
9915 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9916 if (rc)
9917 return rc;
9918
9919 resp = hwrm_req_hold(bp, req);
9920 rc = hwrm_req_send(bp, req);
9921 if (rc)
9922 goto hwrm_cfa_adv_qcaps_exit;
9923
9924 flags = le32_to_cpu(resp->flags);
9925 if (flags &
9926 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9927 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9928
9929 if (flags &
9930 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9931 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9932
9933 if (flags &
9934 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9935 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9936
9937 hwrm_cfa_adv_qcaps_exit:
9938 hwrm_req_drop(bp, req);
9939 return rc;
9940 }
9941
__bnxt_alloc_fw_health(struct bnxt * bp)9942 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9943 {
9944 if (bp->fw_health)
9945 return 0;
9946
9947 bp->fw_health = kzalloc_obj(*bp->fw_health);
9948 if (!bp->fw_health)
9949 return -ENOMEM;
9950
9951 mutex_init(&bp->fw_health->lock);
9952 return 0;
9953 }
9954
bnxt_alloc_fw_health(struct bnxt * bp)9955 static int bnxt_alloc_fw_health(struct bnxt *bp)
9956 {
9957 int rc;
9958
9959 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9960 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9961 return 0;
9962
9963 rc = __bnxt_alloc_fw_health(bp);
9964 if (rc) {
9965 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9966 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9967 return rc;
9968 }
9969
9970 return 0;
9971 }
9972
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)9973 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9974 {
9975 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9976 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9977 BNXT_FW_HEALTH_WIN_MAP_OFF);
9978 }
9979
bnxt_inv_fw_health_reg(struct bnxt * bp)9980 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9981 {
9982 struct bnxt_fw_health *fw_health = bp->fw_health;
9983 u32 reg_type;
9984
9985 if (!fw_health)
9986 return;
9987
9988 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9989 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9990 fw_health->status_reliable = false;
9991
9992 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9993 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9994 fw_health->resets_reliable = false;
9995 }
9996
bnxt_try_map_fw_health_reg(struct bnxt * bp)9997 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9998 {
9999 void __iomem *hs;
10000 u32 status_loc;
10001 u32 reg_type;
10002 u32 sig;
10003
10004 if (bp->fw_health)
10005 bp->fw_health->status_reliable = false;
10006
10007 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
10008 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
10009
10010 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
10011 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
10012 if (!bp->chip_num) {
10013 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
10014 bp->chip_num = readl(bp->bar0 +
10015 BNXT_FW_HEALTH_WIN_BASE +
10016 BNXT_GRC_REG_CHIP_NUM);
10017 }
10018 if (!BNXT_CHIP_P5_PLUS(bp))
10019 return;
10020
10021 status_loc = BNXT_GRC_REG_STATUS_P5 |
10022 BNXT_FW_HEALTH_REG_TYPE_BAR0;
10023 } else {
10024 status_loc = readl(hs + offsetof(struct hcomm_status,
10025 fw_status_loc));
10026 }
10027
10028 if (__bnxt_alloc_fw_health(bp)) {
10029 netdev_warn(bp->dev, "no memory for firmware status checks\n");
10030 return;
10031 }
10032
10033 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
10034 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
10035 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
10036 __bnxt_map_fw_health_reg(bp, status_loc);
10037 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
10038 BNXT_FW_HEALTH_WIN_OFF(status_loc);
10039 }
10040
10041 bp->fw_health->status_reliable = true;
10042 }
10043
bnxt_map_fw_health_regs(struct bnxt * bp)10044 static int bnxt_map_fw_health_regs(struct bnxt *bp)
10045 {
10046 struct bnxt_fw_health *fw_health = bp->fw_health;
10047 u32 reg_base = 0xffffffff;
10048 int i;
10049
10050 bp->fw_health->status_reliable = false;
10051 bp->fw_health->resets_reliable = false;
10052 /* Only pre-map the monitoring GRC registers using window 3 */
10053 for (i = 0; i < 4; i++) {
10054 u32 reg = fw_health->regs[i];
10055
10056 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
10057 continue;
10058 if (reg_base == 0xffffffff)
10059 reg_base = reg & BNXT_GRC_BASE_MASK;
10060 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
10061 return -ERANGE;
10062 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
10063 }
10064 bp->fw_health->status_reliable = true;
10065 bp->fw_health->resets_reliable = true;
10066 if (reg_base == 0xffffffff)
10067 return 0;
10068
10069 __bnxt_map_fw_health_reg(bp, reg_base);
10070 return 0;
10071 }
10072
bnxt_remap_fw_health_regs(struct bnxt * bp)10073 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
10074 {
10075 if (!bp->fw_health)
10076 return;
10077
10078 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
10079 bp->fw_health->status_reliable = true;
10080 bp->fw_health->resets_reliable = true;
10081 } else {
10082 bnxt_try_map_fw_health_reg(bp);
10083 }
10084 }
10085
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)10086 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10087 {
10088 struct bnxt_fw_health *fw_health = bp->fw_health;
10089 struct hwrm_error_recovery_qcfg_output *resp;
10090 struct hwrm_error_recovery_qcfg_input *req;
10091 int rc, i;
10092
10093 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10094 return 0;
10095
10096 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10097 if (rc)
10098 return rc;
10099
10100 resp = hwrm_req_hold(bp, req);
10101 rc = hwrm_req_send(bp, req);
10102 if (rc)
10103 goto err_recovery_out;
10104 fw_health->flags = le32_to_cpu(resp->flags);
10105 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10106 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10107 rc = -EINVAL;
10108 goto err_recovery_out;
10109 }
10110 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10111 fw_health->master_func_wait_dsecs =
10112 le32_to_cpu(resp->master_func_wait_period);
10113 fw_health->normal_func_wait_dsecs =
10114 le32_to_cpu(resp->normal_func_wait_period);
10115 fw_health->post_reset_wait_dsecs =
10116 le32_to_cpu(resp->master_func_wait_period_after_reset);
10117 fw_health->post_reset_max_wait_dsecs =
10118 le32_to_cpu(resp->max_bailout_time_after_reset);
10119 fw_health->regs[BNXT_FW_HEALTH_REG] =
10120 le32_to_cpu(resp->fw_health_status_reg);
10121 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10122 le32_to_cpu(resp->fw_heartbeat_reg);
10123 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10124 le32_to_cpu(resp->fw_reset_cnt_reg);
10125 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10126 le32_to_cpu(resp->reset_inprogress_reg);
10127 fw_health->fw_reset_inprog_reg_mask =
10128 le32_to_cpu(resp->reset_inprogress_reg_mask);
10129 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10130 if (fw_health->fw_reset_seq_cnt >= 16) {
10131 rc = -EINVAL;
10132 goto err_recovery_out;
10133 }
10134 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10135 fw_health->fw_reset_seq_regs[i] =
10136 le32_to_cpu(resp->reset_reg[i]);
10137 fw_health->fw_reset_seq_vals[i] =
10138 le32_to_cpu(resp->reset_reg_val[i]);
10139 fw_health->fw_reset_seq_delay_msec[i] =
10140 resp->delay_after_reset[i];
10141 }
10142 err_recovery_out:
10143 hwrm_req_drop(bp, req);
10144 if (!rc)
10145 rc = bnxt_map_fw_health_regs(bp);
10146 if (rc)
10147 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10148 return rc;
10149 }
10150
bnxt_hwrm_func_reset(struct bnxt * bp)10151 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10152 {
10153 struct hwrm_func_reset_input *req;
10154 int rc;
10155
10156 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10157 if (rc)
10158 return rc;
10159
10160 req->enables = 0;
10161 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10162 return hwrm_req_send(bp, req);
10163 }
10164
bnxt_nvm_cfg_ver_get(struct bnxt * bp)10165 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10166 {
10167 struct hwrm_nvm_get_dev_info_output nvm_info;
10168
10169 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10170 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10171 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10172 nvm_info.nvm_cfg_ver_upd);
10173 }
10174
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)10175 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10176 {
10177 struct hwrm_queue_qportcfg_output *resp;
10178 struct hwrm_queue_qportcfg_input *req;
10179 u8 i, j, *qptr;
10180 bool no_rdma;
10181 int rc = 0;
10182
10183 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10184 if (rc)
10185 return rc;
10186
10187 resp = hwrm_req_hold(bp, req);
10188 rc = hwrm_req_send(bp, req);
10189 if (rc)
10190 goto qportcfg_exit;
10191
10192 if (!resp->max_configurable_queues) {
10193 rc = -EINVAL;
10194 goto qportcfg_exit;
10195 }
10196 bp->max_tc = resp->max_configurable_queues;
10197 bp->max_lltc = resp->max_configurable_lossless_queues;
10198 if (bp->max_tc > BNXT_MAX_QUEUE)
10199 bp->max_tc = BNXT_MAX_QUEUE;
10200
10201 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10202 qptr = &resp->queue_id0;
10203 for (i = 0, j = 0; i < bp->max_tc; i++) {
10204 bp->q_info[j].queue_id = *qptr;
10205 bp->q_ids[i] = *qptr++;
10206 bp->q_info[j].queue_profile = *qptr++;
10207 bp->tc_to_qidx[j] = j;
10208 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10209 (no_rdma && BNXT_PF(bp)))
10210 j++;
10211 }
10212 bp->max_q = bp->max_tc;
10213 bp->max_tc = max_t(u8, j, 1);
10214
10215 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10216 bp->max_tc = 1;
10217
10218 if (bp->max_lltc > bp->max_tc)
10219 bp->max_lltc = bp->max_tc;
10220
10221 qportcfg_exit:
10222 hwrm_req_drop(bp, req);
10223 return rc;
10224 }
10225
bnxt_hwrm_poll(struct bnxt * bp)10226 static int bnxt_hwrm_poll(struct bnxt *bp)
10227 {
10228 struct hwrm_ver_get_input *req;
10229 int rc;
10230
10231 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10232 if (rc)
10233 return rc;
10234
10235 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10236 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10237 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10238
10239 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10240 rc = hwrm_req_send(bp, req);
10241 return rc;
10242 }
10243
bnxt_hwrm_ver_get(struct bnxt * bp)10244 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10245 {
10246 struct hwrm_ver_get_output *resp;
10247 struct hwrm_ver_get_input *req;
10248 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10249 u32 dev_caps_cfg, hwrm_ver;
10250 int rc, len, max_tmo_secs;
10251
10252 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10253 if (rc)
10254 return rc;
10255
10256 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10257 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10258 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10259 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10260 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10261
10262 resp = hwrm_req_hold(bp, req);
10263 rc = hwrm_req_send(bp, req);
10264 if (rc)
10265 goto hwrm_ver_get_exit;
10266
10267 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10268
10269 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10270 resp->hwrm_intf_min_8b << 8 |
10271 resp->hwrm_intf_upd_8b;
10272 if (resp->hwrm_intf_maj_8b < 1) {
10273 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10274 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10275 resp->hwrm_intf_upd_8b);
10276 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10277 }
10278
10279 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10280 HWRM_VERSION_UPDATE;
10281
10282 if (bp->hwrm_spec_code > hwrm_ver)
10283 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10284 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10285 HWRM_VERSION_UPDATE);
10286 else
10287 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10288 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10289 resp->hwrm_intf_upd_8b);
10290
10291 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10292 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10293 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10294 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10295 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10296 len = FW_VER_STR_LEN;
10297 } else {
10298 fw_maj = resp->hwrm_fw_maj_8b;
10299 fw_min = resp->hwrm_fw_min_8b;
10300 fw_bld = resp->hwrm_fw_bld_8b;
10301 fw_rsv = resp->hwrm_fw_rsvd_8b;
10302 len = BC_HWRM_STR_LEN;
10303 }
10304 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10305 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10306 fw_rsv);
10307
10308 if (strlen(resp->active_pkg_name)) {
10309 int fw_ver_len = strlen(bp->fw_ver_str);
10310
10311 snprintf(bp->fw_ver_str + fw_ver_len,
10312 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10313 resp->active_pkg_name);
10314 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10315 }
10316
10317 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10318 if (!bp->hwrm_cmd_timeout)
10319 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10320 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10321 if (!bp->hwrm_cmd_max_timeout)
10322 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10323 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10324 #ifdef CONFIG_DETECT_HUNG_TASK
10325 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10326 max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10327 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10328 max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10329 }
10330 #endif
10331
10332 if (resp->hwrm_intf_maj_8b >= 1) {
10333 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10334 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10335 }
10336 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10337 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10338
10339 bp->chip_num = le16_to_cpu(resp->chip_num);
10340 bp->chip_rev = resp->chip_rev;
10341 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10342 !resp->chip_metal)
10343 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10344
10345 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10346 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10347 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10348 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10349
10350 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10351 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10352
10353 if (dev_caps_cfg &
10354 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10355 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10356
10357 if (dev_caps_cfg &
10358 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10359 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10360
10361 if (dev_caps_cfg &
10362 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10363 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10364
10365 hwrm_ver_get_exit:
10366 hwrm_req_drop(bp, req);
10367 return rc;
10368 }
10369
bnxt_hwrm_fw_set_time(struct bnxt * bp)10370 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10371 {
10372 struct hwrm_fw_set_time_input *req;
10373 struct tm tm;
10374 time64_t now = ktime_get_real_seconds();
10375 int rc;
10376
10377 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10378 bp->hwrm_spec_code < 0x10400)
10379 return -EOPNOTSUPP;
10380
10381 time64_to_tm(now, 0, &tm);
10382 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10383 if (rc)
10384 return rc;
10385
10386 req->year = cpu_to_le16(1900 + tm.tm_year);
10387 req->month = 1 + tm.tm_mon;
10388 req->day = tm.tm_mday;
10389 req->hour = tm.tm_hour;
10390 req->minute = tm.tm_min;
10391 req->second = tm.tm_sec;
10392 return hwrm_req_send(bp, req);
10393 }
10394
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10395 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10396 {
10397 u64 sw_tmp;
10398
10399 hw &= mask;
10400 sw_tmp = (*sw & ~mask) | hw;
10401 if (hw < (*sw & mask))
10402 sw_tmp += mask + 1;
10403 WRITE_ONCE(*sw, sw_tmp);
10404 }
10405
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10406 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10407 int count, bool ignore_zero)
10408 {
10409 int i;
10410
10411 for (i = 0; i < count; i++) {
10412 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10413
10414 if (ignore_zero && !hw)
10415 continue;
10416
10417 if (masks[i] == -1ULL)
10418 sw_stats[i] = hw;
10419 else
10420 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10421 }
10422 }
10423
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10424 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10425 {
10426 if (!stats->hw_stats)
10427 return;
10428
10429 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10430 stats->hw_masks, stats->len / 8, false);
10431 }
10432
bnxt_accumulate_all_stats(struct bnxt * bp)10433 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10434 {
10435 struct bnxt_stats_mem *ring0_stats;
10436 bool ignore_zero = false;
10437 int i;
10438
10439 /* Chip bug. Counter intermittently becomes 0. */
10440 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10441 ignore_zero = true;
10442
10443 for (i = 0; i < bp->cp_nr_rings; i++) {
10444 struct bnxt_napi *bnapi = bp->bnapi[i];
10445 struct bnxt_cp_ring_info *cpr;
10446 struct bnxt_stats_mem *stats;
10447
10448 cpr = &bnapi->cp_ring;
10449 stats = &cpr->stats;
10450 if (!i)
10451 ring0_stats = stats;
10452 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10453 ring0_stats->hw_masks,
10454 ring0_stats->len / 8, ignore_zero);
10455 }
10456 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10457 struct bnxt_stats_mem *stats = &bp->port_stats;
10458 __le64 *hw_stats = stats->hw_stats;
10459 u64 *sw_stats = stats->sw_stats;
10460 u64 *masks = stats->hw_masks;
10461 int cnt;
10462
10463 cnt = sizeof(struct rx_port_stats) / 8;
10464 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10465
10466 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10467 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10468 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10469 cnt = sizeof(struct tx_port_stats) / 8;
10470 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10471 }
10472 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10473 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10474 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10475 }
10476 }
10477
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10478 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10479 {
10480 struct hwrm_port_qstats_input *req;
10481 struct bnxt_pf_info *pf = &bp->pf;
10482 int rc;
10483
10484 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10485 return 0;
10486
10487 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10488 return -EOPNOTSUPP;
10489
10490 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10491 if (rc)
10492 return rc;
10493
10494 req->flags = flags;
10495 req->port_id = cpu_to_le16(pf->port_id);
10496 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10497 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10498 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10499 return hwrm_req_send(bp, req);
10500 }
10501
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10502 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10503 {
10504 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10505 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10506 struct hwrm_port_qstats_ext_output *resp_qs;
10507 struct hwrm_port_qstats_ext_input *req_qs;
10508 struct bnxt_pf_info *pf = &bp->pf;
10509 u32 tx_stat_size;
10510 int rc;
10511
10512 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10513 return 0;
10514
10515 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10516 return -EOPNOTSUPP;
10517
10518 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10519 if (rc)
10520 return rc;
10521
10522 req_qs->flags = flags;
10523 req_qs->port_id = cpu_to_le16(pf->port_id);
10524 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10525 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10526 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10527 sizeof(struct tx_port_stats_ext) : 0;
10528 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10529 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10530 resp_qs = hwrm_req_hold(bp, req_qs);
10531 rc = hwrm_req_send(bp, req_qs);
10532 if (!rc) {
10533 bp->fw_rx_stats_ext_size =
10534 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10535 if (BNXT_FW_MAJ(bp) < 220 &&
10536 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10537 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10538
10539 bp->fw_tx_stats_ext_size = tx_stat_size ?
10540 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10541 } else {
10542 bp->fw_rx_stats_ext_size = 0;
10543 bp->fw_tx_stats_ext_size = 0;
10544 }
10545 hwrm_req_drop(bp, req_qs);
10546
10547 if (flags)
10548 return rc;
10549
10550 if (bp->fw_tx_stats_ext_size <=
10551 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10552 bp->pri2cos_valid = 0;
10553 return rc;
10554 }
10555
10556 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10557 if (rc)
10558 return rc;
10559
10560 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10561
10562 resp_qc = hwrm_req_hold(bp, req_qc);
10563 rc = hwrm_req_send(bp, req_qc);
10564 if (!rc) {
10565 u8 *pri2cos;
10566 int i, j;
10567
10568 pri2cos = &resp_qc->pri0_cos_queue_id;
10569 for (i = 0; i < 8; i++) {
10570 u8 queue_id = pri2cos[i];
10571 u8 queue_idx;
10572
10573 /* Per port queue IDs start from 0, 10, 20, etc */
10574 queue_idx = queue_id % 10;
10575 if (queue_idx > BNXT_MAX_QUEUE) {
10576 bp->pri2cos_valid = false;
10577 hwrm_req_drop(bp, req_qc);
10578 return rc;
10579 }
10580 for (j = 0; j < bp->max_q; j++) {
10581 if (bp->q_ids[j] == queue_id)
10582 bp->pri2cos_idx[i] = queue_idx;
10583 }
10584 }
10585 bp->pri2cos_valid = true;
10586 }
10587 hwrm_req_drop(bp, req_qc);
10588
10589 return rc;
10590 }
10591
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10592 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10593 {
10594 bnxt_hwrm_tunnel_dst_port_free(bp,
10595 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10596 bnxt_hwrm_tunnel_dst_port_free(bp,
10597 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10598 }
10599
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10600 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10601 {
10602 int rc, i;
10603 u32 tpa_flags = 0;
10604
10605 if (set_tpa)
10606 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10607 else if (BNXT_NO_FW_ACCESS(bp))
10608 return 0;
10609 for (i = 0; i < bp->nr_vnics; i++) {
10610 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10611 if (rc) {
10612 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10613 i, rc);
10614 return rc;
10615 }
10616 }
10617 return 0;
10618 }
10619
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10620 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10621 {
10622 int i;
10623
10624 for (i = 0; i < bp->nr_vnics; i++)
10625 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10626 }
10627
bnxt_clear_vnic(struct bnxt * bp)10628 static void bnxt_clear_vnic(struct bnxt *bp)
10629 {
10630 if (!bp->vnic_info)
10631 return;
10632
10633 bnxt_hwrm_clear_vnic_filter(bp);
10634 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10635 /* clear all RSS setting before free vnic ctx */
10636 bnxt_hwrm_clear_vnic_rss(bp);
10637 bnxt_hwrm_vnic_ctx_free(bp);
10638 }
10639 /* before free the vnic, undo the vnic tpa settings */
10640 if (bp->flags & BNXT_FLAG_TPA)
10641 bnxt_set_tpa(bp, false);
10642 bnxt_hwrm_vnic_free(bp);
10643 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10644 bnxt_hwrm_vnic_ctx_free(bp);
10645 }
10646
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10647 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10648 bool irq_re_init)
10649 {
10650 bnxt_clear_vnic(bp);
10651 bnxt_hwrm_ring_free(bp, close_path);
10652 bnxt_hwrm_ring_grp_free(bp);
10653 if (irq_re_init) {
10654 bnxt_hwrm_stat_ctx_free(bp);
10655 bnxt_hwrm_free_tunnel_ports(bp);
10656 }
10657 }
10658
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10659 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10660 {
10661 struct hwrm_func_cfg_input *req;
10662 u8 evb_mode;
10663 int rc;
10664
10665 if (br_mode == BRIDGE_MODE_VEB)
10666 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10667 else if (br_mode == BRIDGE_MODE_VEPA)
10668 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10669 else
10670 return -EINVAL;
10671
10672 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10673 if (rc)
10674 return rc;
10675
10676 req->fid = cpu_to_le16(0xffff);
10677 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10678 req->evb_mode = evb_mode;
10679 return hwrm_req_send(bp, req);
10680 }
10681
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10682 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10683 {
10684 struct hwrm_func_cfg_input *req;
10685 int rc;
10686
10687 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10688 return 0;
10689
10690 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10691 if (rc)
10692 return rc;
10693
10694 req->fid = cpu_to_le16(0xffff);
10695 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10696 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10697 if (size == 128)
10698 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10699
10700 return hwrm_req_send(bp, req);
10701 }
10702
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10703 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10704 {
10705 int rc;
10706
10707 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10708 goto skip_rss_ctx;
10709
10710 /* allocate context for vnic */
10711 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10712 if (rc) {
10713 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10714 vnic->vnic_id, rc);
10715 goto vnic_setup_err;
10716 }
10717 bp->rsscos_nr_ctxs++;
10718
10719 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10720 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10721 if (rc) {
10722 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10723 vnic->vnic_id, rc);
10724 goto vnic_setup_err;
10725 }
10726 bp->rsscos_nr_ctxs++;
10727 }
10728
10729 skip_rss_ctx:
10730 /* configure default vnic, ring grp */
10731 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10732 if (rc) {
10733 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10734 vnic->vnic_id, rc);
10735 goto vnic_setup_err;
10736 }
10737
10738 /* Enable RSS hashing on vnic */
10739 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10740 if (rc) {
10741 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10742 vnic->vnic_id, rc);
10743 goto vnic_setup_err;
10744 }
10745
10746 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10747 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10748 if (rc) {
10749 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10750 vnic->vnic_id, rc);
10751 }
10752 }
10753
10754 vnic_setup_err:
10755 return rc;
10756 }
10757
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10758 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10759 u8 valid)
10760 {
10761 struct hwrm_vnic_update_input *req;
10762 int rc;
10763
10764 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10765 if (rc)
10766 return rc;
10767
10768 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10769
10770 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10771 req->mru = cpu_to_le16(vnic->mru);
10772
10773 req->enables = cpu_to_le32(valid);
10774
10775 return hwrm_req_send(bp, req);
10776 }
10777
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10778 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10779 {
10780 int rc;
10781
10782 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10783 if (rc) {
10784 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10785 vnic->vnic_id, rc);
10786 return rc;
10787 }
10788 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10789 if (rc)
10790 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10791 vnic->vnic_id, rc);
10792 return rc;
10793 }
10794
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10795 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10796 {
10797 int rc, i, nr_ctxs;
10798
10799 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10800 for (i = 0; i < nr_ctxs; i++) {
10801 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10802 if (rc) {
10803 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10804 vnic->vnic_id, i, rc);
10805 break;
10806 }
10807 bp->rsscos_nr_ctxs++;
10808 }
10809 if (i < nr_ctxs)
10810 return -ENOMEM;
10811
10812 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10813 if (rc)
10814 return rc;
10815
10816 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10817 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10818 if (rc) {
10819 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10820 vnic->vnic_id, rc);
10821 }
10822 }
10823 return rc;
10824 }
10825
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10826 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10827 {
10828 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10829 return __bnxt_setup_vnic_p5(bp, vnic);
10830 else
10831 return __bnxt_setup_vnic(bp, vnic);
10832 }
10833
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10834 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10835 struct bnxt_vnic_info *vnic,
10836 u16 start_rx_ring_idx, int rx_rings)
10837 {
10838 int rc;
10839
10840 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10841 if (rc) {
10842 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10843 vnic->vnic_id, rc);
10844 return rc;
10845 }
10846 return bnxt_setup_vnic(bp, vnic);
10847 }
10848
bnxt_alloc_rfs_vnics(struct bnxt * bp)10849 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10850 {
10851 struct bnxt_vnic_info *vnic;
10852 int i, rc = 0;
10853
10854 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10855 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10856 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10857 }
10858
10859 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10860 return 0;
10861
10862 for (i = 0; i < bp->rx_nr_rings; i++) {
10863 u16 vnic_id = i + 1;
10864 u16 ring_id = i;
10865
10866 if (vnic_id >= bp->nr_vnics)
10867 break;
10868
10869 vnic = &bp->vnic_info[vnic_id];
10870 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10871 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10872 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10873 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10874 break;
10875 }
10876 return rc;
10877 }
10878
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10879 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10880 bool all)
10881 {
10882 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10883 struct bnxt_filter_base *usr_fltr, *tmp;
10884 struct bnxt_ntuple_filter *ntp_fltr;
10885 int i;
10886
10887 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10888 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10889 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10890 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10891 }
10892 if (!all)
10893 return;
10894
10895 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10896 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10897 usr_fltr->fw_vnic_id == rss_ctx->index) {
10898 ntp_fltr = container_of(usr_fltr,
10899 struct bnxt_ntuple_filter,
10900 base);
10901 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10902 bnxt_del_ntp_filter(bp, ntp_fltr);
10903 bnxt_del_one_usr_fltr(bp, usr_fltr);
10904 }
10905 }
10906
10907 if (vnic->rss_table)
10908 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10909 vnic->rss_table,
10910 vnic->rss_table_dma_addr);
10911 bp->num_rss_ctx--;
10912 }
10913
bnxt_vnic_has_rx_ring(struct bnxt * bp,struct bnxt_vnic_info * vnic,int rxr_id)10914 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10915 int rxr_id)
10916 {
10917 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10918 int i, vnic_rx;
10919
10920 /* Ntuple VNIC always has all the rx rings. Any change of ring id
10921 * must be updated because a future filter may use it.
10922 */
10923 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10924 return true;
10925
10926 for (i = 0; i < tbl_size; i++) {
10927 if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10928 vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10929 else
10930 vnic_rx = bp->rss_indir_tbl[i];
10931
10932 if (rxr_id == vnic_rx)
10933 return true;
10934 }
10935
10936 return false;
10937 }
10938
bnxt_set_vnic_mru_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 mru,int rxr_id)10939 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10940 u16 mru, int rxr_id)
10941 {
10942 int rc;
10943
10944 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10945 return 0;
10946
10947 if (mru) {
10948 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10949 if (rc) {
10950 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10951 vnic->vnic_id, rc);
10952 return rc;
10953 }
10954 }
10955 vnic->mru = mru;
10956 bnxt_hwrm_vnic_update(bp, vnic,
10957 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10958
10959 return 0;
10960 }
10961
bnxt_set_rss_ctx_vnic_mru(struct bnxt * bp,u16 mru,int rxr_id)10962 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10963 {
10964 struct ethtool_rxfh_context *ctx;
10965 unsigned long context;
10966 int rc;
10967
10968 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10969 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10970 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10971
10972 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10973 if (rc)
10974 return rc;
10975 }
10976
10977 return 0;
10978 }
10979
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)10980 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10981 {
10982 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10983 struct ethtool_rxfh_context *ctx;
10984 unsigned long context;
10985
10986 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10987 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10988 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10989
10990 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10991 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10992 __bnxt_setup_vnic_p5(bp, vnic)) {
10993 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10994 rss_ctx->index);
10995 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10996 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10997 }
10998 }
10999 }
11000
bnxt_clear_rss_ctxs(struct bnxt * bp)11001 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
11002 {
11003 struct ethtool_rxfh_context *ctx;
11004 unsigned long context;
11005
11006 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11007 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11008
11009 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
11010 }
11011 }
11012
11013 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)11014 static bool bnxt_promisc_ok(struct bnxt *bp)
11015 {
11016 #ifdef CONFIG_BNXT_SRIOV
11017 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
11018 return false;
11019 #endif
11020 return true;
11021 }
11022
bnxt_setup_nitroa0_vnic(struct bnxt * bp)11023 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
11024 {
11025 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
11026 unsigned int rc = 0;
11027
11028 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
11029 if (rc) {
11030 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11031 rc);
11032 return rc;
11033 }
11034
11035 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
11036 if (rc) {
11037 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11038 rc);
11039 return rc;
11040 }
11041 return rc;
11042 }
11043
11044 static int bnxt_cfg_rx_mode(struct bnxt *);
11045 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
11046
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)11047 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
11048 {
11049 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
11050 int rc = 0;
11051 unsigned int rx_nr_rings = bp->rx_nr_rings;
11052
11053 if (irq_re_init) {
11054 rc = bnxt_hwrm_stat_ctx_alloc(bp);
11055 if (rc) {
11056 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
11057 rc);
11058 goto err_out;
11059 }
11060 }
11061
11062 rc = bnxt_hwrm_ring_alloc(bp);
11063 if (rc) {
11064 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
11065 goto err_out;
11066 }
11067
11068 rc = bnxt_hwrm_ring_grp_alloc(bp);
11069 if (rc) {
11070 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
11071 goto err_out;
11072 }
11073
11074 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11075 rx_nr_rings--;
11076
11077 /* default vnic 0 */
11078 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
11079 if (rc) {
11080 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
11081 goto err_out;
11082 }
11083
11084 if (BNXT_VF(bp))
11085 bnxt_hwrm_func_qcfg(bp);
11086
11087 rc = bnxt_setup_vnic(bp, vnic);
11088 if (rc)
11089 goto err_out;
11090 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11091 bnxt_hwrm_update_rss_hash_cfg(bp);
11092
11093 if (bp->flags & BNXT_FLAG_RFS) {
11094 rc = bnxt_alloc_rfs_vnics(bp);
11095 if (rc)
11096 goto err_out;
11097 }
11098
11099 if (bp->flags & BNXT_FLAG_TPA) {
11100 rc = bnxt_set_tpa(bp, true);
11101 if (rc)
11102 goto err_out;
11103 }
11104
11105 if (BNXT_VF(bp))
11106 bnxt_update_vf_mac(bp);
11107
11108 /* Filter for default vnic 0 */
11109 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11110 if (rc) {
11111 if (BNXT_VF(bp) && rc == -ENODEV)
11112 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11113 else
11114 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11115 goto err_out;
11116 }
11117 vnic->uc_filter_count = 1;
11118
11119 vnic->rx_mask = 0;
11120 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11121 goto skip_rx_mask;
11122
11123 if (bp->dev->flags & IFF_BROADCAST)
11124 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11125
11126 if (bp->dev->flags & IFF_PROMISC)
11127 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11128
11129 if (bp->dev->flags & IFF_ALLMULTI) {
11130 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11131 vnic->mc_list_count = 0;
11132 } else if (bp->dev->flags & IFF_MULTICAST) {
11133 u32 mask = 0;
11134
11135 bnxt_mc_list_updated(bp, &mask);
11136 vnic->rx_mask |= mask;
11137 }
11138
11139 rc = bnxt_cfg_rx_mode(bp);
11140 if (rc)
11141 goto err_out;
11142
11143 skip_rx_mask:
11144 rc = bnxt_hwrm_set_coal(bp);
11145 if (rc)
11146 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11147 rc);
11148
11149 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11150 rc = bnxt_setup_nitroa0_vnic(bp);
11151 if (rc)
11152 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11153 rc);
11154 }
11155
11156 if (BNXT_VF(bp)) {
11157 bnxt_hwrm_func_qcfg(bp);
11158 netdev_update_features(bp->dev);
11159 }
11160
11161 return 0;
11162
11163 err_out:
11164 bnxt_hwrm_resource_free(bp, 0, true);
11165
11166 return rc;
11167 }
11168
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)11169 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11170 {
11171 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11172 return 0;
11173 }
11174
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)11175 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11176 {
11177 bnxt_init_cp_rings(bp);
11178 bnxt_init_rx_rings(bp);
11179 bnxt_init_tx_rings(bp);
11180 bnxt_init_ring_grps(bp, irq_re_init);
11181 bnxt_init_vnics(bp);
11182
11183 return bnxt_init_chip(bp, irq_re_init);
11184 }
11185
bnxt_set_real_num_queues(struct bnxt * bp)11186 static int bnxt_set_real_num_queues(struct bnxt *bp)
11187 {
11188 int rc;
11189 struct net_device *dev = bp->dev;
11190
11191 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11192 bp->tx_nr_rings_xdp);
11193 if (rc)
11194 return rc;
11195
11196 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11197 if (rc)
11198 return rc;
11199
11200 #ifdef CONFIG_RFS_ACCEL
11201 if (bp->flags & BNXT_FLAG_RFS)
11202 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11203 #endif
11204
11205 return rc;
11206 }
11207
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)11208 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11209 bool shared)
11210 {
11211 int _rx = *rx, _tx = *tx;
11212
11213 if (shared) {
11214 *rx = min_t(int, _rx, max);
11215 *tx = min_t(int, _tx, max);
11216 } else {
11217 if (max < 2)
11218 return -ENOMEM;
11219
11220 while (_rx + _tx > max) {
11221 if (_rx > _tx && _rx > 1)
11222 _rx--;
11223 else if (_tx > 1)
11224 _tx--;
11225 }
11226 *rx = _rx;
11227 *tx = _tx;
11228 }
11229 return 0;
11230 }
11231
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)11232 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11233 {
11234 return (tx - tx_xdp) / tx_sets + tx_xdp;
11235 }
11236
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)11237 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11238 {
11239 int tcs = bp->num_tc;
11240
11241 if (!tcs)
11242 tcs = 1;
11243 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11244 }
11245
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)11246 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11247 {
11248 int tcs = bp->num_tc;
11249
11250 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11251 bp->tx_nr_rings_xdp;
11252 }
11253
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)11254 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11255 bool sh)
11256 {
11257 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11258
11259 if (tx_cp != *tx) {
11260 int tx_saved = tx_cp, rc;
11261
11262 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11263 if (rc)
11264 return rc;
11265 if (tx_cp != tx_saved)
11266 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
11267 return 0;
11268 }
11269 return __bnxt_trim_rings(bp, rx, tx, max, sh);
11270 }
11271
bnxt_setup_msix(struct bnxt * bp)11272 static void bnxt_setup_msix(struct bnxt *bp)
11273 {
11274 const int len = sizeof(bp->irq_tbl[0].name);
11275 struct net_device *dev = bp->dev;
11276 int tcs, i;
11277
11278 tcs = bp->num_tc;
11279 if (tcs) {
11280 int i, off, count;
11281
11282 for (i = 0; i < tcs; i++) {
11283 count = bp->tx_nr_rings_per_tc;
11284 off = BNXT_TC_TO_RING_BASE(bp, i);
11285 netdev_set_tc_queue(dev, i, count, off);
11286 }
11287 }
11288
11289 for (i = 0; i < bp->cp_nr_rings; i++) {
11290 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11291 char *attr;
11292
11293 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11294 attr = "TxRx";
11295 else if (i < bp->rx_nr_rings)
11296 attr = "rx";
11297 else
11298 attr = "tx";
11299
11300 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11301 attr, i);
11302 bp->irq_tbl[map_idx].handler = bnxt_msix;
11303 }
11304 }
11305
11306 static int bnxt_init_int_mode(struct bnxt *bp);
11307
bnxt_change_msix(struct bnxt * bp,int total)11308 static int bnxt_change_msix(struct bnxt *bp, int total)
11309 {
11310 struct msi_map map;
11311 int i;
11312
11313 /* add MSIX to the end if needed */
11314 for (i = bp->total_irqs; i < total; i++) {
11315 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11316 if (map.index < 0)
11317 return bp->total_irqs;
11318 bp->irq_tbl[i].vector = map.virq;
11319 bp->total_irqs++;
11320 }
11321
11322 /* trim MSIX from the end if needed */
11323 for (i = bp->total_irqs; i > total; i--) {
11324 map.index = i - 1;
11325 map.virq = bp->irq_tbl[i - 1].vector;
11326 pci_msix_free_irq(bp->pdev, map);
11327 bp->total_irqs--;
11328 }
11329 return bp->total_irqs;
11330 }
11331
bnxt_setup_int_mode(struct bnxt * bp)11332 static int bnxt_setup_int_mode(struct bnxt *bp)
11333 {
11334 int rc;
11335
11336 if (!bp->irq_tbl) {
11337 rc = bnxt_init_int_mode(bp);
11338 if (rc || !bp->irq_tbl)
11339 return rc ?: -ENODEV;
11340 }
11341
11342 bnxt_setup_msix(bp);
11343
11344 rc = bnxt_set_real_num_queues(bp);
11345 return rc;
11346 }
11347
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11348 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11349 {
11350 return bp->hw_resc.max_rsscos_ctxs;
11351 }
11352
bnxt_get_max_func_vnics(struct bnxt * bp)11353 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11354 {
11355 return bp->hw_resc.max_vnics;
11356 }
11357
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11358 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11359 {
11360 return bp->hw_resc.max_stat_ctxs;
11361 }
11362
bnxt_get_max_func_cp_rings(struct bnxt * bp)11363 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11364 {
11365 return bp->hw_resc.max_cp_rings;
11366 }
11367
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11368 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11369 {
11370 unsigned int cp = bp->hw_resc.max_cp_rings;
11371
11372 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11373 cp -= bnxt_get_ulp_msix_num(bp);
11374
11375 return cp;
11376 }
11377
bnxt_get_max_func_irqs(struct bnxt * bp)11378 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11379 {
11380 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11381
11382 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11383 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11384
11385 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11386 }
11387
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11388 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11389 {
11390 bp->hw_resc.max_irqs = max_irqs;
11391 }
11392
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11393 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11394 {
11395 unsigned int cp;
11396
11397 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11398 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11399 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11400 else
11401 return cp - bp->cp_nr_rings;
11402 }
11403
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11404 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11405 {
11406 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11407 }
11408
bnxt_get_avail_msix(struct bnxt * bp,int num)11409 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11410 {
11411 int max_irq = bnxt_get_max_func_irqs(bp);
11412 int total_req = bp->cp_nr_rings + num;
11413
11414 if (max_irq < total_req) {
11415 num = max_irq - bp->cp_nr_rings;
11416 if (num <= 0)
11417 return 0;
11418 }
11419 return num;
11420 }
11421
bnxt_get_num_msix(struct bnxt * bp)11422 static int bnxt_get_num_msix(struct bnxt *bp)
11423 {
11424 if (!BNXT_NEW_RM(bp))
11425 return bnxt_get_max_func_irqs(bp);
11426
11427 return bnxt_nq_rings_in_use(bp);
11428 }
11429
bnxt_init_int_mode(struct bnxt * bp)11430 static int bnxt_init_int_mode(struct bnxt *bp)
11431 {
11432 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11433
11434 total_vecs = bnxt_get_num_msix(bp);
11435 max = bnxt_get_max_func_irqs(bp);
11436 if (total_vecs > max)
11437 total_vecs = max;
11438
11439 if (!total_vecs)
11440 return 0;
11441
11442 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11443 min = 2;
11444
11445 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11446 PCI_IRQ_MSIX);
11447 ulp_msix = bnxt_get_ulp_msix_num(bp);
11448 if (total_vecs < 0 || total_vecs < ulp_msix) {
11449 rc = -ENODEV;
11450 goto msix_setup_exit;
11451 }
11452
11453 tbl_size = total_vecs;
11454 if (pci_msix_can_alloc_dyn(bp->pdev))
11455 tbl_size = max;
11456 bp->irq_tbl = kzalloc_objs(*bp->irq_tbl, tbl_size);
11457 if (bp->irq_tbl) {
11458 for (i = 0; i < total_vecs; i++)
11459 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11460
11461 bp->total_irqs = total_vecs;
11462 /* Trim rings based upon num of vectors allocated */
11463 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11464 total_vecs - ulp_msix, min == 1);
11465 if (rc)
11466 goto msix_setup_exit;
11467
11468 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11469 bp->cp_nr_rings = (min == 1) ?
11470 max_t(int, tx_cp, bp->rx_nr_rings) :
11471 tx_cp + bp->rx_nr_rings;
11472
11473 } else {
11474 rc = -ENOMEM;
11475 goto msix_setup_exit;
11476 }
11477 return 0;
11478
11479 msix_setup_exit:
11480 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11481 kfree(bp->irq_tbl);
11482 bp->irq_tbl = NULL;
11483 pci_free_irq_vectors(bp->pdev);
11484 return rc;
11485 }
11486
bnxt_clear_int_mode(struct bnxt * bp)11487 static void bnxt_clear_int_mode(struct bnxt *bp)
11488 {
11489 pci_free_irq_vectors(bp->pdev);
11490
11491 kfree(bp->irq_tbl);
11492 bp->irq_tbl = NULL;
11493 }
11494
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11495 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11496 {
11497 bool irq_cleared = false;
11498 bool irq_change = false;
11499 int tcs = bp->num_tc;
11500 int irqs_required;
11501 int rc;
11502
11503 if (!bnxt_need_reserve_rings(bp))
11504 return 0;
11505
11506 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11507 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11508
11509 if (ulp_msix > bp->ulp_num_msix_want)
11510 ulp_msix = bp->ulp_num_msix_want;
11511 irqs_required = ulp_msix + bp->cp_nr_rings;
11512 } else {
11513 irqs_required = bnxt_get_num_msix(bp);
11514 }
11515
11516 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11517 irq_change = true;
11518 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11519 bnxt_ulp_irq_stop(bp);
11520 bnxt_clear_int_mode(bp);
11521 irq_cleared = true;
11522 }
11523 }
11524 rc = __bnxt_reserve_rings(bp);
11525 if (irq_cleared) {
11526 if (!rc)
11527 rc = bnxt_init_int_mode(bp);
11528 bnxt_ulp_irq_restart(bp, rc);
11529 } else if (irq_change && !rc) {
11530 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11531 rc = -ENOSPC;
11532 }
11533 if (rc) {
11534 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11535 return rc;
11536 }
11537 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11538 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11539 netdev_err(bp->dev, "tx ring reservation failure\n");
11540 netdev_reset_tc(bp->dev);
11541 bp->num_tc = 0;
11542 if (bp->tx_nr_rings_xdp)
11543 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11544 else
11545 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11546 return -ENOMEM;
11547 }
11548 return 0;
11549 }
11550
bnxt_tx_queue_stop(struct bnxt * bp,int idx)11551 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11552 {
11553 struct bnxt_tx_ring_info *txr;
11554 struct netdev_queue *txq;
11555 struct bnxt_napi *bnapi;
11556 int i;
11557
11558 bnapi = bp->bnapi[idx];
11559 bnxt_for_each_napi_tx(i, bnapi, txr) {
11560 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11561 synchronize_net();
11562
11563 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11564 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11565 if (txq) {
11566 __netif_tx_lock_bh(txq);
11567 netif_tx_stop_queue(txq);
11568 __netif_tx_unlock_bh(txq);
11569 }
11570 }
11571
11572 if (!bp->tph_mode)
11573 continue;
11574
11575 bnxt_hwrm_tx_ring_free(bp, txr, true);
11576 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11577 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11578 bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11579 }
11580 }
11581
bnxt_tx_queue_start(struct bnxt * bp,int idx)11582 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11583 {
11584 struct bnxt_tx_ring_info *txr;
11585 struct netdev_queue *txq;
11586 struct bnxt_napi *bnapi;
11587 int rc, i;
11588
11589 bnapi = bp->bnapi[idx];
11590 /* All rings have been reserved and previously allocated.
11591 * Reallocating with the same parameters should never fail.
11592 */
11593 bnxt_for_each_napi_tx(i, bnapi, txr) {
11594 if (!bp->tph_mode)
11595 goto start_tx;
11596
11597 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11598 if (rc)
11599 return rc;
11600
11601 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11602 if (rc)
11603 return rc;
11604
11605 txr->tx_prod = 0;
11606 txr->tx_cons = 0;
11607 txr->tx_hw_cons = 0;
11608 start_tx:
11609 WRITE_ONCE(txr->dev_state, 0);
11610 synchronize_net();
11611
11612 if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11613 continue;
11614
11615 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11616 if (txq)
11617 netif_tx_start_queue(txq);
11618 }
11619
11620 return 0;
11621 }
11622
bnxt_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)11623 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11624 const cpumask_t *mask)
11625 {
11626 struct bnxt_irq *irq;
11627 u16 tag;
11628 int err;
11629
11630 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11631
11632 if (!irq->bp->tph_mode)
11633 return;
11634
11635 cpumask_copy(irq->cpu_mask, mask);
11636
11637 if (irq->ring_nr >= irq->bp->rx_nr_rings)
11638 return;
11639
11640 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11641 cpumask_first(irq->cpu_mask), &tag))
11642 return;
11643
11644 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11645 return;
11646
11647 netdev_lock(irq->bp->dev);
11648 if (netif_running(irq->bp->dev)) {
11649 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11650 if (err)
11651 netdev_err(irq->bp->dev,
11652 "RX queue restart failed: err=%d\n", err);
11653 }
11654 netdev_unlock(irq->bp->dev);
11655 }
11656
bnxt_irq_affinity_release(struct kref * ref)11657 static void bnxt_irq_affinity_release(struct kref *ref)
11658 {
11659 struct irq_affinity_notify *notify =
11660 container_of(ref, struct irq_affinity_notify, kref);
11661 struct bnxt_irq *irq;
11662
11663 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11664
11665 if (!irq->bp->tph_mode)
11666 return;
11667
11668 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11669 netdev_err(irq->bp->dev,
11670 "Setting ST=0 for MSIX entry %d failed\n",
11671 irq->msix_nr);
11672 return;
11673 }
11674 }
11675
bnxt_release_irq_notifier(struct bnxt_irq * irq)11676 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11677 {
11678 irq_set_affinity_notifier(irq->vector, NULL);
11679 }
11680
bnxt_register_irq_notifier(struct bnxt * bp,struct bnxt_irq * irq)11681 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11682 {
11683 struct irq_affinity_notify *notify;
11684
11685 irq->bp = bp;
11686
11687 /* Nothing to do if TPH is not enabled */
11688 if (!bp->tph_mode)
11689 return;
11690
11691 /* Register IRQ affinity notifier */
11692 notify = &irq->affinity_notify;
11693 notify->irq = irq->vector;
11694 notify->notify = bnxt_irq_affinity_notify;
11695 notify->release = bnxt_irq_affinity_release;
11696
11697 irq_set_affinity_notifier(irq->vector, notify);
11698 }
11699
bnxt_free_irq(struct bnxt * bp)11700 static void bnxt_free_irq(struct bnxt *bp)
11701 {
11702 struct bnxt_irq *irq;
11703 int i;
11704
11705 #ifdef CONFIG_RFS_ACCEL
11706 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11707 bp->dev->rx_cpu_rmap = NULL;
11708 #endif
11709 if (!bp->irq_tbl || !bp->bnapi)
11710 return;
11711
11712 for (i = 0; i < bp->cp_nr_rings; i++) {
11713 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11714
11715 irq = &bp->irq_tbl[map_idx];
11716 if (irq->requested) {
11717 if (irq->have_cpumask) {
11718 irq_update_affinity_hint(irq->vector, NULL);
11719 free_cpumask_var(irq->cpu_mask);
11720 irq->have_cpumask = 0;
11721 }
11722
11723 bnxt_release_irq_notifier(irq);
11724
11725 free_irq(irq->vector, bp->bnapi[i]);
11726 }
11727
11728 irq->requested = 0;
11729 }
11730
11731 /* Disable TPH support */
11732 pcie_disable_tph(bp->pdev);
11733 bp->tph_mode = 0;
11734 }
11735
bnxt_request_irq(struct bnxt * bp)11736 static int bnxt_request_irq(struct bnxt *bp)
11737 {
11738 struct cpu_rmap *rmap = NULL;
11739 int i, j, rc = 0;
11740 unsigned long flags = 0;
11741
11742 rc = bnxt_setup_int_mode(bp);
11743 if (rc) {
11744 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11745 rc);
11746 return rc;
11747 }
11748 #ifdef CONFIG_RFS_ACCEL
11749 rmap = bp->dev->rx_cpu_rmap;
11750 #endif
11751
11752 /* Enable TPH support as part of IRQ request */
11753 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11754 if (!rc)
11755 bp->tph_mode = PCI_TPH_ST_IV_MODE;
11756
11757 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11758 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11759 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11760
11761 if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11762 rmap && bp->bnapi[i]->rx_ring) {
11763 rc = irq_cpu_rmap_add(rmap, irq->vector);
11764 if (rc)
11765 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11766 j);
11767 j++;
11768 }
11769
11770 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11771 bp->bnapi[i]);
11772 if (rc)
11773 break;
11774
11775 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11776 irq->requested = 1;
11777
11778 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11779 int numa_node = dev_to_node(&bp->pdev->dev);
11780 u16 tag;
11781
11782 irq->have_cpumask = 1;
11783 irq->msix_nr = map_idx;
11784 irq->ring_nr = i;
11785 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11786 irq->cpu_mask);
11787 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11788 if (rc) {
11789 netdev_warn(bp->dev,
11790 "Update affinity hint failed, IRQ = %d\n",
11791 irq->vector);
11792 break;
11793 }
11794
11795 bnxt_register_irq_notifier(bp, irq);
11796
11797 /* Init ST table entry */
11798 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11799 cpumask_first(irq->cpu_mask),
11800 &tag))
11801 continue;
11802
11803 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11804 }
11805 }
11806 return rc;
11807 }
11808
bnxt_del_napi(struct bnxt * bp)11809 static void bnxt_del_napi(struct bnxt *bp)
11810 {
11811 int i;
11812
11813 if (!bp->bnapi)
11814 return;
11815
11816 for (i = 0; i < bp->rx_nr_rings; i++)
11817 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11818 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11819 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11820
11821 for (i = 0; i < bp->cp_nr_rings; i++) {
11822 struct bnxt_napi *bnapi = bp->bnapi[i];
11823
11824 __netif_napi_del_locked(&bnapi->napi);
11825 }
11826 /* We called __netif_napi_del_locked(), we need
11827 * to respect an RCU grace period before freeing napi structures.
11828 */
11829 synchronize_net();
11830 }
11831
bnxt_init_napi(struct bnxt * bp)11832 static void bnxt_init_napi(struct bnxt *bp)
11833 {
11834 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11835 unsigned int cp_nr_rings = bp->cp_nr_rings;
11836 struct bnxt_napi *bnapi;
11837 int i;
11838
11839 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11840 poll_fn = bnxt_poll_p5;
11841 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11842 cp_nr_rings--;
11843
11844 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11845
11846 for (i = 0; i < cp_nr_rings; i++) {
11847 bnapi = bp->bnapi[i];
11848 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11849 bnapi->index);
11850 }
11851 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11852 bnapi = bp->bnapi[cp_nr_rings];
11853 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11854 }
11855 }
11856
bnxt_disable_napi(struct bnxt * bp)11857 static void bnxt_disable_napi(struct bnxt *bp)
11858 {
11859 int i;
11860
11861 if (!bp->bnapi ||
11862 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11863 return;
11864
11865 for (i = 0; i < bp->cp_nr_rings; i++) {
11866 struct bnxt_napi *bnapi = bp->bnapi[i];
11867 struct bnxt_cp_ring_info *cpr;
11868
11869 cpr = &bnapi->cp_ring;
11870 if (bnapi->tx_fault)
11871 cpr->sw_stats->tx.tx_resets++;
11872 if (bnapi->in_reset)
11873 cpr->sw_stats->rx.rx_resets++;
11874 napi_disable_locked(&bnapi->napi);
11875 }
11876 }
11877
bnxt_enable_napi(struct bnxt * bp)11878 static void bnxt_enable_napi(struct bnxt *bp)
11879 {
11880 int i;
11881
11882 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11883 for (i = 0; i < bp->cp_nr_rings; i++) {
11884 struct bnxt_napi *bnapi = bp->bnapi[i];
11885 struct bnxt_cp_ring_info *cpr;
11886
11887 bnapi->tx_fault = 0;
11888
11889 cpr = &bnapi->cp_ring;
11890 bnapi->in_reset = false;
11891
11892 if (bnapi->rx_ring) {
11893 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11894 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11895 }
11896 napi_enable_locked(&bnapi->napi);
11897 }
11898 }
11899
bnxt_tx_disable(struct bnxt * bp)11900 void bnxt_tx_disable(struct bnxt *bp)
11901 {
11902 int i;
11903 struct bnxt_tx_ring_info *txr;
11904
11905 if (bp->tx_ring) {
11906 for (i = 0; i < bp->tx_nr_rings; i++) {
11907 txr = &bp->tx_ring[i];
11908 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11909 }
11910 }
11911 /* Make sure napi polls see @dev_state change */
11912 synchronize_net();
11913 /* Drop carrier first to prevent TX timeout */
11914 netif_carrier_off(bp->dev);
11915 /* Stop all TX queues */
11916 netif_tx_disable(bp->dev);
11917 }
11918
bnxt_tx_enable(struct bnxt * bp)11919 void bnxt_tx_enable(struct bnxt *bp)
11920 {
11921 int i;
11922 struct bnxt_tx_ring_info *txr;
11923
11924 for (i = 0; i < bp->tx_nr_rings; i++) {
11925 txr = &bp->tx_ring[i];
11926 WRITE_ONCE(txr->dev_state, 0);
11927 }
11928 /* Make sure napi polls see @dev_state change */
11929 synchronize_net();
11930 netif_tx_wake_all_queues(bp->dev);
11931 if (BNXT_LINK_IS_UP(bp))
11932 netif_carrier_on(bp->dev);
11933 }
11934
bnxt_report_fec(struct bnxt_link_info * link_info)11935 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11936 {
11937 u8 active_fec = link_info->active_fec_sig_mode &
11938 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11939
11940 switch (active_fec) {
11941 default:
11942 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11943 return "None";
11944 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11945 return "Clause 74 BaseR";
11946 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11947 return "Clause 91 RS(528,514)";
11948 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11949 return "Clause 91 RS544_1XN";
11950 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11951 return "Clause 91 RS(544,514)";
11952 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11953 return "Clause 91 RS272_1XN";
11954 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11955 return "Clause 91 RS(272,257)";
11956 }
11957 }
11958
bnxt_link_down_reason(struct bnxt_link_info * link_info)11959 static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
11960 {
11961 u8 reason = link_info->link_down_reason;
11962
11963 /* Multiple bits can be set, we report 1 bit only in order of
11964 * priority.
11965 */
11966 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
11967 return "(Remote fault)";
11968 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
11969 return "(OTP Speed limit violation)";
11970 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
11971 return "(Cable removed)";
11972 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
11973 return "(Module fault)";
11974 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
11975 return "(BMC request down)";
11976 return "";
11977 }
11978
bnxt_report_link(struct bnxt * bp)11979 void bnxt_report_link(struct bnxt *bp)
11980 {
11981 if (BNXT_LINK_IS_UP(bp)) {
11982 const char *signal = "";
11983 const char *flow_ctrl;
11984 const char *duplex;
11985 u32 speed;
11986 u16 fec;
11987
11988 netif_carrier_on(bp->dev);
11989 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11990 if (speed == SPEED_UNKNOWN) {
11991 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11992 return;
11993 }
11994 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11995 duplex = "full";
11996 else
11997 duplex = "half";
11998 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11999 flow_ctrl = "ON - receive & transmit";
12000 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
12001 flow_ctrl = "ON - transmit";
12002 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
12003 flow_ctrl = "ON - receive";
12004 else
12005 flow_ctrl = "none";
12006 if (bp->link_info.phy_qcfg_resp.option_flags &
12007 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
12008 u8 sig_mode = bp->link_info.active_fec_sig_mode &
12009 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
12010 switch (sig_mode) {
12011 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
12012 signal = "(NRZ) ";
12013 break;
12014 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
12015 signal = "(PAM4 56Gbps) ";
12016 break;
12017 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
12018 signal = "(PAM4 112Gbps) ";
12019 break;
12020 default:
12021 break;
12022 }
12023 }
12024 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
12025 speed, signal, duplex, flow_ctrl);
12026 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
12027 netdev_info(bp->dev, "EEE is %s\n",
12028 bp->eee.eee_active ? "active" :
12029 "not active");
12030 fec = bp->link_info.fec_cfg;
12031 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
12032 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
12033 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
12034 bnxt_report_fec(&bp->link_info));
12035 } else {
12036 char *str = bnxt_link_down_reason(&bp->link_info);
12037
12038 netif_carrier_off(bp->dev);
12039 netdev_err(bp->dev, "NIC Link is Down %s\n", str);
12040 }
12041 }
12042
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)12043 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
12044 {
12045 if (!resp->supported_speeds_auto_mode &&
12046 !resp->supported_speeds_force_mode &&
12047 !resp->supported_pam4_speeds_auto_mode &&
12048 !resp->supported_pam4_speeds_force_mode &&
12049 !resp->supported_speeds2_auto_mode &&
12050 !resp->supported_speeds2_force_mode)
12051 return true;
12052 return false;
12053 }
12054
bnxt_hwrm_phy_qcaps(struct bnxt * bp)12055 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
12056 {
12057 struct bnxt_link_info *link_info = &bp->link_info;
12058 struct hwrm_port_phy_qcaps_output *resp;
12059 struct hwrm_port_phy_qcaps_input *req;
12060 int rc = 0;
12061
12062 if (bp->hwrm_spec_code < 0x10201)
12063 return 0;
12064
12065 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
12066 if (rc)
12067 return rc;
12068
12069 resp = hwrm_req_hold(bp, req);
12070 rc = hwrm_req_send(bp, req);
12071 if (rc)
12072 goto hwrm_phy_qcaps_exit;
12073
12074 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
12075 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
12076 struct ethtool_keee *eee = &bp->eee;
12077 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
12078
12079 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
12080 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
12081 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
12082 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
12083 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
12084 }
12085
12086 if (bp->hwrm_spec_code >= 0x10a01) {
12087 if (bnxt_phy_qcaps_no_speed(resp)) {
12088 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
12089 netdev_warn(bp->dev, "Ethernet link disabled\n");
12090 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
12091 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
12092 netdev_info(bp->dev, "Ethernet link enabled\n");
12093 /* Phy re-enabled, reprobe the speeds */
12094 link_info->support_auto_speeds = 0;
12095 link_info->support_pam4_auto_speeds = 0;
12096 link_info->support_auto_speeds2 = 0;
12097 }
12098 }
12099 if (resp->supported_speeds_auto_mode)
12100 link_info->support_auto_speeds =
12101 le16_to_cpu(resp->supported_speeds_auto_mode);
12102 if (resp->supported_pam4_speeds_auto_mode)
12103 link_info->support_pam4_auto_speeds =
12104 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12105 if (resp->supported_speeds2_auto_mode)
12106 link_info->support_auto_speeds2 =
12107 le16_to_cpu(resp->supported_speeds2_auto_mode);
12108
12109 bp->port_count = resp->port_cnt;
12110
12111 hwrm_phy_qcaps_exit:
12112 hwrm_req_drop(bp, req);
12113 return rc;
12114 }
12115
bnxt_hwrm_mac_qcaps(struct bnxt * bp)12116 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12117 {
12118 struct hwrm_port_mac_qcaps_output *resp;
12119 struct hwrm_port_mac_qcaps_input *req;
12120 int rc;
12121
12122 if (bp->hwrm_spec_code < 0x10a03)
12123 return;
12124
12125 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12126 if (rc)
12127 return;
12128
12129 resp = hwrm_req_hold(bp, req);
12130 rc = hwrm_req_send_silent(bp, req);
12131 if (!rc)
12132 bp->mac_flags = resp->flags;
12133 hwrm_req_drop(bp, req);
12134 }
12135
bnxt_support_dropped(u16 advertising,u16 supported)12136 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12137 {
12138 u16 diff = advertising ^ supported;
12139
12140 return ((supported | diff) != supported);
12141 }
12142
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)12143 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12144 {
12145 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12146
12147 /* Check if any advertised speeds are no longer supported. The caller
12148 * holds the link_lock mutex, so we can modify link_info settings.
12149 */
12150 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12151 if (bnxt_support_dropped(link_info->advertising,
12152 link_info->support_auto_speeds2)) {
12153 link_info->advertising = link_info->support_auto_speeds2;
12154 return true;
12155 }
12156 return false;
12157 }
12158 if (bnxt_support_dropped(link_info->advertising,
12159 link_info->support_auto_speeds)) {
12160 link_info->advertising = link_info->support_auto_speeds;
12161 return true;
12162 }
12163 if (bnxt_support_dropped(link_info->advertising_pam4,
12164 link_info->support_pam4_auto_speeds)) {
12165 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12166 return true;
12167 }
12168 return false;
12169 }
12170
bnxt_update_link(struct bnxt * bp,bool chng_link_state)12171 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12172 {
12173 struct bnxt_link_info *link_info = &bp->link_info;
12174 struct hwrm_port_phy_qcfg_output *resp;
12175 struct hwrm_port_phy_qcfg_input *req;
12176 u8 link_state = link_info->link_state;
12177 bool support_changed;
12178 int rc;
12179
12180 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12181 if (rc)
12182 return rc;
12183
12184 resp = hwrm_req_hold(bp, req);
12185 rc = hwrm_req_send(bp, req);
12186 if (rc) {
12187 hwrm_req_drop(bp, req);
12188 if (BNXT_VF(bp) && rc == -ENODEV) {
12189 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12190 rc = 0;
12191 }
12192 return rc;
12193 }
12194
12195 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12196 link_info->phy_link_status = resp->link;
12197 link_info->duplex = resp->duplex_cfg;
12198 if (bp->hwrm_spec_code >= 0x10800)
12199 link_info->duplex = resp->duplex_state;
12200 link_info->pause = resp->pause;
12201 link_info->auto_mode = resp->auto_mode;
12202 link_info->auto_pause_setting = resp->auto_pause;
12203 link_info->lp_pause = resp->link_partner_adv_pause;
12204 link_info->force_pause_setting = resp->force_pause;
12205 link_info->duplex_setting = resp->duplex_cfg;
12206 if (link_info->phy_link_status == BNXT_LINK_LINK) {
12207 link_info->link_speed = le16_to_cpu(resp->link_speed);
12208 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12209 link_info->active_lanes = resp->active_lanes;
12210 } else {
12211 link_info->link_speed = 0;
12212 link_info->active_lanes = 0;
12213 }
12214 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12215 link_info->force_pam4_link_speed =
12216 le16_to_cpu(resp->force_pam4_link_speed);
12217 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12218 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12219 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12220 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12221 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12222 link_info->auto_pam4_link_speeds =
12223 le16_to_cpu(resp->auto_pam4_link_speed_mask);
12224 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12225 link_info->lp_auto_link_speeds =
12226 le16_to_cpu(resp->link_partner_adv_speeds);
12227 link_info->lp_auto_pam4_link_speeds =
12228 resp->link_partner_pam4_adv_speeds;
12229 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12230 link_info->phy_ver[0] = resp->phy_maj;
12231 link_info->phy_ver[1] = resp->phy_min;
12232 link_info->phy_ver[2] = resp->phy_bld;
12233 link_info->media_type = resp->media_type;
12234 link_info->phy_type = resp->phy_type;
12235 link_info->transceiver = resp->xcvr_pkg_type;
12236 link_info->phy_addr = resp->eee_config_phy_addr &
12237 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12238 link_info->module_status = resp->module_status;
12239 link_info->link_down_reason = resp->link_down_reason;
12240
12241 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12242 struct ethtool_keee *eee = &bp->eee;
12243 u16 fw_speeds;
12244
12245 eee->eee_active = 0;
12246 if (resp->eee_config_phy_addr &
12247 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12248 eee->eee_active = 1;
12249 fw_speeds = le16_to_cpu(
12250 resp->link_partner_adv_eee_link_speed_mask);
12251 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12252 }
12253
12254 /* Pull initial EEE config */
12255 if (!chng_link_state) {
12256 if (resp->eee_config_phy_addr &
12257 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12258 eee->eee_enabled = 1;
12259
12260 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12261 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12262
12263 if (resp->eee_config_phy_addr &
12264 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12265 __le32 tmr;
12266
12267 eee->tx_lpi_enabled = 1;
12268 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12269 eee->tx_lpi_timer = le32_to_cpu(tmr) &
12270 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12271 }
12272 }
12273 }
12274
12275 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12276 if (bp->hwrm_spec_code >= 0x10504) {
12277 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12278 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12279 }
12280 /* TODO: need to add more logic to report VF link */
12281 if (chng_link_state) {
12282 if (link_info->phy_link_status == BNXT_LINK_LINK)
12283 link_info->link_state = BNXT_LINK_STATE_UP;
12284 else
12285 link_info->link_state = BNXT_LINK_STATE_DOWN;
12286 if (link_state != link_info->link_state)
12287 bnxt_report_link(bp);
12288 } else {
12289 /* always link down if not require to update link state */
12290 link_info->link_state = BNXT_LINK_STATE_DOWN;
12291 }
12292 hwrm_req_drop(bp, req);
12293
12294 if (!BNXT_PHY_CFG_ABLE(bp))
12295 return 0;
12296
12297 support_changed = bnxt_support_speed_dropped(link_info);
12298 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12299 bnxt_hwrm_set_link_setting(bp, true, false);
12300 return 0;
12301 }
12302
bnxt_get_port_module_status(struct bnxt * bp)12303 static void bnxt_get_port_module_status(struct bnxt *bp)
12304 {
12305 struct bnxt_link_info *link_info = &bp->link_info;
12306 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12307 u8 module_status;
12308
12309 if (bnxt_update_link(bp, true))
12310 return;
12311
12312 module_status = link_info->module_status;
12313 switch (module_status) {
12314 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12315 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12316 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12317 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12318 bp->pf.port_id);
12319 if (bp->hwrm_spec_code >= 0x10201) {
12320 netdev_warn(bp->dev, "Module part number %s\n",
12321 resp->phy_vendor_partnumber);
12322 }
12323 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12324 netdev_warn(bp->dev, "TX is disabled\n");
12325 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12326 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12327 }
12328 }
12329
12330 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12331 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12332 {
12333 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12334 if (bp->hwrm_spec_code >= 0x10201)
12335 req->auto_pause =
12336 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12337 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12338 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12339 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12340 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12341 req->enables |=
12342 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12343 } else {
12344 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12345 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12346 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12347 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12348 req->enables |=
12349 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12350 if (bp->hwrm_spec_code >= 0x10201) {
12351 req->auto_pause = req->force_pause;
12352 req->enables |= cpu_to_le32(
12353 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12354 }
12355 }
12356 }
12357
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12358 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12359 {
12360 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12361 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12362 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12363 req->enables |=
12364 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12365 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12366 } else if (bp->link_info.advertising) {
12367 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12368 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12369 }
12370 if (bp->link_info.advertising_pam4) {
12371 req->enables |=
12372 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12373 req->auto_link_pam4_speed_mask =
12374 cpu_to_le16(bp->link_info.advertising_pam4);
12375 }
12376 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12377 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12378 } else {
12379 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12380 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12381 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12382 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12383 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12384 (u32)bp->link_info.req_link_speed);
12385 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12386 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12387 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12388 } else {
12389 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12390 }
12391 }
12392
12393 /* tell chimp that the setting takes effect immediately */
12394 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12395 }
12396
bnxt_hwrm_set_pause(struct bnxt * bp)12397 int bnxt_hwrm_set_pause(struct bnxt *bp)
12398 {
12399 struct hwrm_port_phy_cfg_input *req;
12400 int rc;
12401
12402 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12403 if (rc)
12404 return rc;
12405
12406 bnxt_hwrm_set_pause_common(bp, req);
12407
12408 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12409 bp->link_info.force_link_chng)
12410 bnxt_hwrm_set_link_common(bp, req);
12411
12412 rc = hwrm_req_send(bp, req);
12413 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12414 /* since changing of pause setting doesn't trigger any link
12415 * change event, the driver needs to update the current pause
12416 * result upon successfully return of the phy_cfg command
12417 */
12418 bp->link_info.pause =
12419 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12420 bp->link_info.auto_pause_setting = 0;
12421 if (!bp->link_info.force_link_chng)
12422 bnxt_report_link(bp);
12423 }
12424 bp->link_info.force_link_chng = false;
12425 return rc;
12426 }
12427
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12428 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12429 struct hwrm_port_phy_cfg_input *req)
12430 {
12431 struct ethtool_keee *eee = &bp->eee;
12432
12433 if (eee->eee_enabled) {
12434 u16 eee_speeds;
12435 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12436
12437 if (eee->tx_lpi_enabled)
12438 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12439 else
12440 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12441
12442 req->flags |= cpu_to_le32(flags);
12443 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12444 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12445 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12446 } else {
12447 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12448 }
12449 }
12450
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)12451 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12452 {
12453 struct hwrm_port_phy_cfg_input *req;
12454 int rc;
12455
12456 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12457 if (rc)
12458 return rc;
12459
12460 if (set_pause)
12461 bnxt_hwrm_set_pause_common(bp, req);
12462
12463 bnxt_hwrm_set_link_common(bp, req);
12464
12465 if (set_eee)
12466 bnxt_hwrm_set_eee(bp, req);
12467 return hwrm_req_send(bp, req);
12468 }
12469
bnxt_hwrm_shutdown_link(struct bnxt * bp)12470 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12471 {
12472 struct hwrm_port_phy_cfg_input *req;
12473 int rc;
12474
12475 if (!BNXT_SINGLE_PF(bp))
12476 return 0;
12477
12478 if (pci_num_vf(bp->pdev) &&
12479 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12480 return 0;
12481
12482 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12483 if (rc)
12484 return rc;
12485
12486 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12487 rc = hwrm_req_send(bp, req);
12488 if (!rc) {
12489 mutex_lock(&bp->link_lock);
12490 /* Device is not obliged link down in certain scenarios, even
12491 * when forced. Setting the state unknown is consistent with
12492 * driver startup and will force link state to be reported
12493 * during subsequent open based on PORT_PHY_QCFG.
12494 */
12495 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12496 mutex_unlock(&bp->link_lock);
12497 }
12498 return rc;
12499 }
12500
bnxt_fw_reset_via_optee(struct bnxt * bp)12501 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12502 {
12503 #ifdef CONFIG_TEE_BNXT_FW
12504 int rc = tee_bnxt_fw_load();
12505
12506 if (rc)
12507 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12508
12509 return rc;
12510 #else
12511 netdev_err(bp->dev, "OP-TEE not supported\n");
12512 return -ENODEV;
12513 #endif
12514 }
12515
bnxt_try_recover_fw(struct bnxt * bp)12516 static int bnxt_try_recover_fw(struct bnxt *bp)
12517 {
12518 if (bp->fw_health && bp->fw_health->status_reliable) {
12519 int retry = 0, rc;
12520 u32 sts;
12521
12522 do {
12523 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12524 rc = bnxt_hwrm_poll(bp);
12525 if (!BNXT_FW_IS_BOOTING(sts) &&
12526 !BNXT_FW_IS_RECOVERING(sts))
12527 break;
12528 retry++;
12529 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12530
12531 if (!BNXT_FW_IS_HEALTHY(sts)) {
12532 netdev_err(bp->dev,
12533 "Firmware not responding, status: 0x%x\n",
12534 sts);
12535 rc = -ENODEV;
12536 }
12537 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12538 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12539 return bnxt_fw_reset_via_optee(bp);
12540 }
12541 return rc;
12542 }
12543
12544 return -ENODEV;
12545 }
12546
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12547 void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12548 {
12549 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12550
12551 if (!BNXT_NEW_RM(bp))
12552 return; /* no resource reservations required */
12553
12554 hw_resc->resv_cp_rings = 0;
12555 hw_resc->resv_stat_ctxs = 0;
12556 hw_resc->resv_irqs = 0;
12557 hw_resc->resv_tx_rings = 0;
12558 hw_resc->resv_rx_rings = 0;
12559 hw_resc->resv_hw_ring_grps = 0;
12560 hw_resc->resv_vnics = 0;
12561 hw_resc->resv_rsscos_ctxs = 0;
12562 if (!fw_reset) {
12563 bp->tx_nr_rings = 0;
12564 bp->rx_nr_rings = 0;
12565 }
12566 }
12567
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12568 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12569 {
12570 int rc;
12571
12572 if (!BNXT_NEW_RM(bp))
12573 return 0; /* no resource reservations required */
12574
12575 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12576 if (rc)
12577 netdev_err(bp->dev, "resc_qcaps failed\n");
12578
12579 bnxt_clear_reservations(bp, fw_reset);
12580
12581 return rc;
12582 }
12583
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12584 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12585 {
12586 struct hwrm_func_drv_if_change_output *resp;
12587 struct hwrm_func_drv_if_change_input *req;
12588 bool resc_reinit = false;
12589 bool caps_change = false;
12590 int rc, retry = 0;
12591 bool fw_reset;
12592 u32 flags = 0;
12593
12594 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12595 bp->fw_reset_state = 0;
12596
12597 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12598 return 0;
12599
12600 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12601 if (rc)
12602 return rc;
12603
12604 if (up)
12605 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12606 resp = hwrm_req_hold(bp, req);
12607
12608 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12609 while (retry < BNXT_FW_IF_RETRY) {
12610 rc = hwrm_req_send(bp, req);
12611 if (rc != -EAGAIN)
12612 break;
12613
12614 msleep(50);
12615 retry++;
12616 }
12617
12618 if (rc == -EAGAIN) {
12619 hwrm_req_drop(bp, req);
12620 return rc;
12621 } else if (!rc) {
12622 flags = le32_to_cpu(resp->flags);
12623 } else if (up) {
12624 rc = bnxt_try_recover_fw(bp);
12625 fw_reset = true;
12626 }
12627 hwrm_req_drop(bp, req);
12628 if (rc)
12629 return rc;
12630
12631 if (!up) {
12632 bnxt_inv_fw_health_reg(bp);
12633 return 0;
12634 }
12635
12636 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12637 resc_reinit = true;
12638 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12639 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12640 fw_reset = true;
12641 else
12642 bnxt_remap_fw_health_regs(bp);
12643
12644 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12645 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12646 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12647 return -ENODEV;
12648 }
12649 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12650 caps_change = true;
12651
12652 if (resc_reinit || fw_reset || caps_change) {
12653 if (fw_reset || caps_change) {
12654 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12655 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12656 bnxt_ulp_irq_stop(bp);
12657 bnxt_free_ctx_mem(bp, false);
12658 bnxt_dcb_free(bp);
12659 rc = bnxt_fw_init_one(bp);
12660 if (rc) {
12661 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12662 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12663 return rc;
12664 }
12665 /* IRQ will be initialized later in bnxt_request_irq()*/
12666 bnxt_clear_int_mode(bp);
12667 }
12668 rc = bnxt_cancel_reservations(bp, fw_reset);
12669 }
12670 return rc;
12671 }
12672
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12673 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12674 {
12675 struct hwrm_port_led_qcaps_output *resp;
12676 struct hwrm_port_led_qcaps_input *req;
12677 struct bnxt_pf_info *pf = &bp->pf;
12678 int rc;
12679
12680 bp->num_leds = 0;
12681 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12682 return 0;
12683
12684 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12685 if (rc)
12686 return rc;
12687
12688 req->port_id = cpu_to_le16(pf->port_id);
12689 resp = hwrm_req_hold(bp, req);
12690 rc = hwrm_req_send(bp, req);
12691 if (rc) {
12692 hwrm_req_drop(bp, req);
12693 return rc;
12694 }
12695 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12696 int i;
12697
12698 bp->num_leds = resp->num_leds;
12699 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12700 bp->num_leds);
12701 for (i = 0; i < bp->num_leds; i++) {
12702 struct bnxt_led_info *led = &bp->leds[i];
12703 __le16 caps = led->led_state_caps;
12704
12705 if (!led->led_group_id ||
12706 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12707 bp->num_leds = 0;
12708 break;
12709 }
12710 }
12711 }
12712 hwrm_req_drop(bp, req);
12713 return 0;
12714 }
12715
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12716 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12717 {
12718 struct hwrm_wol_filter_alloc_output *resp;
12719 struct hwrm_wol_filter_alloc_input *req;
12720 int rc;
12721
12722 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12723 if (rc)
12724 return rc;
12725
12726 req->port_id = cpu_to_le16(bp->pf.port_id);
12727 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12728 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12729 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12730
12731 resp = hwrm_req_hold(bp, req);
12732 rc = hwrm_req_send(bp, req);
12733 if (!rc)
12734 bp->wol_filter_id = resp->wol_filter_id;
12735 hwrm_req_drop(bp, req);
12736 return rc;
12737 }
12738
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12739 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12740 {
12741 struct hwrm_wol_filter_free_input *req;
12742 int rc;
12743
12744 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12745 if (rc)
12746 return rc;
12747
12748 req->port_id = cpu_to_le16(bp->pf.port_id);
12749 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12750 req->wol_filter_id = bp->wol_filter_id;
12751
12752 return hwrm_req_send(bp, req);
12753 }
12754
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12755 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12756 {
12757 struct hwrm_wol_filter_qcfg_output *resp;
12758 struct hwrm_wol_filter_qcfg_input *req;
12759 u16 next_handle = 0;
12760 int rc;
12761
12762 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12763 if (rc)
12764 return rc;
12765
12766 req->port_id = cpu_to_le16(bp->pf.port_id);
12767 req->handle = cpu_to_le16(handle);
12768 resp = hwrm_req_hold(bp, req);
12769 rc = hwrm_req_send(bp, req);
12770 if (!rc) {
12771 next_handle = le16_to_cpu(resp->next_handle);
12772 if (next_handle != 0) {
12773 if (resp->wol_type ==
12774 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12775 bp->wol = 1;
12776 bp->wol_filter_id = resp->wol_filter_id;
12777 }
12778 }
12779 }
12780 hwrm_req_drop(bp, req);
12781 return next_handle;
12782 }
12783
bnxt_get_wol_settings(struct bnxt * bp)12784 static void bnxt_get_wol_settings(struct bnxt *bp)
12785 {
12786 u16 handle = 0;
12787
12788 bp->wol = 0;
12789 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12790 return;
12791
12792 do {
12793 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12794 } while (handle && handle != 0xffff);
12795 }
12796
bnxt_eee_config_ok(struct bnxt * bp)12797 static bool bnxt_eee_config_ok(struct bnxt *bp)
12798 {
12799 struct ethtool_keee *eee = &bp->eee;
12800 struct bnxt_link_info *link_info = &bp->link_info;
12801
12802 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12803 return true;
12804
12805 if (eee->eee_enabled) {
12806 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12807 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12808
12809 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12810
12811 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12812 eee->eee_enabled = 0;
12813 return false;
12814 }
12815 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12816 linkmode_and(eee->advertised, advertising,
12817 eee->supported);
12818 return false;
12819 }
12820 }
12821 return true;
12822 }
12823
bnxt_update_phy_setting(struct bnxt * bp)12824 static int bnxt_update_phy_setting(struct bnxt *bp)
12825 {
12826 int rc;
12827 bool update_link = false;
12828 bool update_pause = false;
12829 bool update_eee = false;
12830 struct bnxt_link_info *link_info = &bp->link_info;
12831
12832 rc = bnxt_update_link(bp, true);
12833 if (rc) {
12834 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12835 rc);
12836 return rc;
12837 }
12838 if (!BNXT_SINGLE_PF(bp))
12839 return 0;
12840
12841 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12842 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12843 link_info->req_flow_ctrl)
12844 update_pause = true;
12845 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12846 link_info->force_pause_setting != link_info->req_flow_ctrl)
12847 update_pause = true;
12848 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12849 if (BNXT_AUTO_MODE(link_info->auto_mode))
12850 update_link = true;
12851 if (bnxt_force_speed_updated(link_info))
12852 update_link = true;
12853 if (link_info->req_duplex != link_info->duplex_setting)
12854 update_link = true;
12855 } else {
12856 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12857 update_link = true;
12858 if (bnxt_auto_speed_updated(link_info))
12859 update_link = true;
12860 }
12861
12862 /* The last close may have shutdown the link, so need to call
12863 * PHY_CFG to bring it back up.
12864 */
12865 if (!BNXT_LINK_IS_UP(bp))
12866 update_link = true;
12867
12868 if (!bnxt_eee_config_ok(bp))
12869 update_eee = true;
12870
12871 if (update_link)
12872 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12873 else if (update_pause)
12874 rc = bnxt_hwrm_set_pause(bp);
12875 if (rc) {
12876 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12877 rc);
12878 return rc;
12879 }
12880
12881 return rc;
12882 }
12883
12884 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12885
bnxt_reinit_after_abort(struct bnxt * bp)12886 static int bnxt_reinit_after_abort(struct bnxt *bp)
12887 {
12888 int rc;
12889
12890 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12891 return -EBUSY;
12892
12893 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12894 return -ENODEV;
12895
12896 rc = bnxt_fw_init_one(bp);
12897 if (!rc) {
12898 bnxt_clear_int_mode(bp);
12899 rc = bnxt_init_int_mode(bp);
12900 if (!rc) {
12901 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12902 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12903 }
12904 }
12905 return rc;
12906 }
12907
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)12908 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12909 {
12910 struct bnxt_ntuple_filter *ntp_fltr;
12911 struct bnxt_l2_filter *l2_fltr;
12912
12913 if (list_empty(&fltr->list))
12914 return;
12915
12916 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12917 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12918 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12919 atomic_inc(&l2_fltr->refcnt);
12920 ntp_fltr->l2_fltr = l2_fltr;
12921 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12922 bnxt_del_ntp_filter(bp, ntp_fltr);
12923 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12924 fltr->sw_id);
12925 }
12926 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12927 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12928 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12929 bnxt_del_l2_filter(bp, l2_fltr);
12930 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12931 fltr->sw_id);
12932 }
12933 }
12934 }
12935
bnxt_cfg_usr_fltrs(struct bnxt * bp)12936 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12937 {
12938 struct bnxt_filter_base *usr_fltr, *tmp;
12939
12940 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12941 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12942 }
12943
bnxt_set_xps_mapping(struct bnxt * bp)12944 static int bnxt_set_xps_mapping(struct bnxt *bp)
12945 {
12946 int numa_node = dev_to_node(&bp->pdev->dev);
12947 unsigned int q_idx, map_idx, cpu, i;
12948 const struct cpumask *cpu_mask_ptr;
12949 int nr_cpus = num_online_cpus();
12950 cpumask_t *q_map;
12951 int rc = 0;
12952
12953 q_map = kzalloc_objs(*q_map, bp->tx_nr_rings_per_tc);
12954 if (!q_map)
12955 return -ENOMEM;
12956
12957 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12958 * Each TC has the same number of TX queues. The nth TX queue for each
12959 * TC will have the same CPU mask.
12960 */
12961 for (i = 0; i < nr_cpus; i++) {
12962 map_idx = i % bp->tx_nr_rings_per_tc;
12963 cpu = cpumask_local_spread(i, numa_node);
12964 cpu_mask_ptr = get_cpu_mask(cpu);
12965 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12966 }
12967
12968 /* Register CPU mask for each TX queue except the ones marked for XDP */
12969 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12970 map_idx = q_idx % bp->tx_nr_rings_per_tc;
12971 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12972 if (rc) {
12973 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12974 q_idx);
12975 break;
12976 }
12977 }
12978
12979 kfree(q_map);
12980
12981 return rc;
12982 }
12983
bnxt_tx_nr_rings(struct bnxt * bp)12984 static int bnxt_tx_nr_rings(struct bnxt *bp)
12985 {
12986 return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
12987 bp->tx_nr_rings_per_tc;
12988 }
12989
bnxt_tx_nr_rings_per_tc(struct bnxt * bp)12990 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
12991 {
12992 return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
12993 }
12994
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12995 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12996 {
12997 int rc = 0;
12998
12999 netif_carrier_off(bp->dev);
13000 if (irq_re_init) {
13001 /* Reserve rings now if none were reserved at driver probe. */
13002 rc = bnxt_init_dflt_ring_mode(bp);
13003 if (rc) {
13004 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
13005 return rc;
13006 }
13007 }
13008 rc = bnxt_reserve_rings(bp, irq_re_init);
13009 if (rc)
13010 return rc;
13011
13012 /* Make adjustments if reserved TX rings are less than requested */
13013 bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
13014 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
13015 if (bp->tx_nr_rings_xdp) {
13016 bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
13017 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
13018 }
13019 rc = bnxt_alloc_mem(bp, irq_re_init);
13020 if (rc) {
13021 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13022 goto open_err_free_mem;
13023 }
13024
13025 if (irq_re_init) {
13026 bnxt_init_napi(bp);
13027 rc = bnxt_request_irq(bp);
13028 if (rc) {
13029 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
13030 goto open_err_irq;
13031 }
13032 }
13033
13034 rc = bnxt_init_nic(bp, irq_re_init);
13035 if (rc) {
13036 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13037 goto open_err_irq;
13038 }
13039
13040 bnxt_enable_napi(bp);
13041 bnxt_debug_dev_init(bp);
13042
13043 if (link_re_init) {
13044 mutex_lock(&bp->link_lock);
13045 rc = bnxt_update_phy_setting(bp);
13046 mutex_unlock(&bp->link_lock);
13047 if (rc) {
13048 netdev_warn(bp->dev, "failed to update phy settings\n");
13049 if (BNXT_SINGLE_PF(bp)) {
13050 bp->link_info.phy_retry = true;
13051 bp->link_info.phy_retry_expires =
13052 jiffies + 5 * HZ;
13053 }
13054 }
13055 }
13056
13057 if (irq_re_init) {
13058 udp_tunnel_nic_reset_ntf(bp->dev);
13059 rc = bnxt_set_xps_mapping(bp);
13060 if (rc)
13061 netdev_warn(bp->dev, "failed to set xps mapping\n");
13062 }
13063
13064 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
13065 if (!static_key_enabled(&bnxt_xdp_locking_key))
13066 static_branch_enable(&bnxt_xdp_locking_key);
13067 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
13068 static_branch_disable(&bnxt_xdp_locking_key);
13069 }
13070 set_bit(BNXT_STATE_OPEN, &bp->state);
13071 bnxt_enable_int(bp);
13072 /* Enable TX queues */
13073 bnxt_tx_enable(bp);
13074 mod_timer(&bp->timer, jiffies + bp->current_interval);
13075 /* Poll link status and check for SFP+ module status */
13076 mutex_lock(&bp->link_lock);
13077 bnxt_get_port_module_status(bp);
13078 mutex_unlock(&bp->link_lock);
13079
13080 /* VF-reps may need to be re-opened after the PF is re-opened */
13081 if (BNXT_PF(bp))
13082 bnxt_vf_reps_open(bp);
13083 bnxt_ptp_init_rtc(bp, true);
13084 bnxt_ptp_cfg_tstamp_filters(bp);
13085 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13086 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
13087 bnxt_cfg_usr_fltrs(bp);
13088 return 0;
13089
13090 open_err_irq:
13091 bnxt_del_napi(bp);
13092
13093 open_err_free_mem:
13094 bnxt_free_skbs(bp);
13095 bnxt_free_irq(bp);
13096 bnxt_free_mem(bp, true);
13097 return rc;
13098 }
13099
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13100 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13101 {
13102 int rc = 0;
13103
13104 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13105 rc = -EIO;
13106 if (!rc)
13107 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13108 if (rc) {
13109 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13110 netif_close(bp->dev);
13111 }
13112 return rc;
13113 }
13114
13115 /* netdev instance lock held, open the NIC half way by allocating all
13116 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
13117 * for offline self tests.
13118 */
bnxt_half_open_nic(struct bnxt * bp)13119 int bnxt_half_open_nic(struct bnxt *bp)
13120 {
13121 int rc = 0;
13122
13123 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13124 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13125 rc = -ENODEV;
13126 goto half_open_err;
13127 }
13128
13129 rc = bnxt_alloc_mem(bp, true);
13130 if (rc) {
13131 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13132 goto half_open_err;
13133 }
13134 bnxt_init_napi(bp);
13135 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13136 rc = bnxt_init_nic(bp, true);
13137 if (rc) {
13138 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13139 bnxt_del_napi(bp);
13140 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13141 goto half_open_err;
13142 }
13143 return 0;
13144
13145 half_open_err:
13146 bnxt_free_skbs(bp);
13147 bnxt_free_mem(bp, true);
13148 netif_close(bp->dev);
13149 return rc;
13150 }
13151
13152 /* netdev instance lock held, this call can only be made after a previous
13153 * successful call to bnxt_half_open_nic().
13154 */
bnxt_half_close_nic(struct bnxt * bp)13155 void bnxt_half_close_nic(struct bnxt *bp)
13156 {
13157 bnxt_hwrm_resource_free(bp, false, true);
13158 bnxt_del_napi(bp);
13159 bnxt_free_skbs(bp);
13160 bnxt_free_mem(bp, true);
13161 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13162 }
13163
bnxt_reenable_sriov(struct bnxt * bp)13164 void bnxt_reenable_sriov(struct bnxt *bp)
13165 {
13166 if (BNXT_PF(bp)) {
13167 struct bnxt_pf_info *pf = &bp->pf;
13168 int n = pf->active_vfs;
13169
13170 if (n)
13171 bnxt_cfg_hw_sriov(bp, &n, true);
13172 }
13173 }
13174
bnxt_open(struct net_device * dev)13175 static int bnxt_open(struct net_device *dev)
13176 {
13177 struct bnxt *bp = netdev_priv(dev);
13178 int rc;
13179
13180 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13181 rc = bnxt_reinit_after_abort(bp);
13182 if (rc) {
13183 if (rc == -EBUSY)
13184 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13185 else
13186 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13187 return -ENODEV;
13188 }
13189 }
13190
13191 rc = bnxt_hwrm_if_change(bp, true);
13192 if (rc)
13193 return rc;
13194
13195 rc = __bnxt_open_nic(bp, true, true);
13196 if (rc) {
13197 bnxt_hwrm_if_change(bp, false);
13198 } else {
13199 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13200 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13201 bnxt_queue_sp_work(bp,
13202 BNXT_RESTART_ULP_SP_EVENT);
13203 }
13204 }
13205
13206 return rc;
13207 }
13208
bnxt_drv_busy(struct bnxt * bp)13209 static bool bnxt_drv_busy(struct bnxt *bp)
13210 {
13211 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13212 test_bit(BNXT_STATE_READ_STATS, &bp->state));
13213 }
13214
13215 static void bnxt_get_ring_stats(struct bnxt *bp,
13216 struct rtnl_link_stats64 *stats);
13217
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13218 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13219 bool link_re_init)
13220 {
13221 /* Close the VF-reps before closing PF */
13222 if (BNXT_PF(bp))
13223 bnxt_vf_reps_close(bp);
13224
13225 /* Change device state to avoid TX queue wake up's */
13226 bnxt_tx_disable(bp);
13227
13228 clear_bit(BNXT_STATE_OPEN, &bp->state);
13229 smp_mb__after_atomic();
13230 while (bnxt_drv_busy(bp))
13231 msleep(20);
13232
13233 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13234 bnxt_clear_rss_ctxs(bp);
13235 /* Flush rings and disable interrupts */
13236 bnxt_shutdown_nic(bp, irq_re_init);
13237
13238 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13239
13240 bnxt_debug_dev_exit(bp);
13241 bnxt_disable_napi(bp);
13242 timer_delete_sync(&bp->timer);
13243 bnxt_free_skbs(bp);
13244
13245 /* Save ring stats before shutdown */
13246 if (bp->bnapi && irq_re_init) {
13247 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13248 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
13249 }
13250 if (irq_re_init) {
13251 bnxt_free_irq(bp);
13252 bnxt_del_napi(bp);
13253 }
13254 bnxt_free_mem(bp, irq_re_init);
13255 }
13256
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13257 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13258 {
13259 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13260 /* If we get here, it means firmware reset is in progress
13261 * while we are trying to close. We can safely proceed with
13262 * the close because we are holding netdev instance lock.
13263 * Some firmware messages may fail as we proceed to close.
13264 * We set the ABORT_ERR flag here so that the FW reset thread
13265 * will later abort when it gets the netdev instance lock
13266 * and sees the flag.
13267 */
13268 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13269 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13270 }
13271
13272 #ifdef CONFIG_BNXT_SRIOV
13273 if (bp->sriov_cfg) {
13274 int rc;
13275
13276 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13277 !bp->sriov_cfg,
13278 BNXT_SRIOV_CFG_WAIT_TMO);
13279 if (!rc)
13280 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13281 else if (rc < 0)
13282 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13283 }
13284 #endif
13285 __bnxt_close_nic(bp, irq_re_init, link_re_init);
13286 }
13287
bnxt_close(struct net_device * dev)13288 static int bnxt_close(struct net_device *dev)
13289 {
13290 struct bnxt *bp = netdev_priv(dev);
13291
13292 bnxt_close_nic(bp, true, true);
13293 bnxt_hwrm_shutdown_link(bp);
13294 bnxt_hwrm_if_change(bp, false);
13295 return 0;
13296 }
13297
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)13298 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13299 u16 *val)
13300 {
13301 struct hwrm_port_phy_mdio_read_output *resp;
13302 struct hwrm_port_phy_mdio_read_input *req;
13303 int rc;
13304
13305 if (bp->hwrm_spec_code < 0x10a00)
13306 return -EOPNOTSUPP;
13307
13308 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13309 if (rc)
13310 return rc;
13311
13312 req->port_id = cpu_to_le16(bp->pf.port_id);
13313 req->phy_addr = phy_addr;
13314 req->reg_addr = cpu_to_le16(reg & 0x1f);
13315 if (mdio_phy_id_is_c45(phy_addr)) {
13316 req->cl45_mdio = 1;
13317 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13318 req->dev_addr = mdio_phy_id_devad(phy_addr);
13319 req->reg_addr = cpu_to_le16(reg);
13320 }
13321
13322 resp = hwrm_req_hold(bp, req);
13323 rc = hwrm_req_send(bp, req);
13324 if (!rc)
13325 *val = le16_to_cpu(resp->reg_data);
13326 hwrm_req_drop(bp, req);
13327 return rc;
13328 }
13329
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)13330 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13331 u16 val)
13332 {
13333 struct hwrm_port_phy_mdio_write_input *req;
13334 int rc;
13335
13336 if (bp->hwrm_spec_code < 0x10a00)
13337 return -EOPNOTSUPP;
13338
13339 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13340 if (rc)
13341 return rc;
13342
13343 req->port_id = cpu_to_le16(bp->pf.port_id);
13344 req->phy_addr = phy_addr;
13345 req->reg_addr = cpu_to_le16(reg & 0x1f);
13346 if (mdio_phy_id_is_c45(phy_addr)) {
13347 req->cl45_mdio = 1;
13348 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13349 req->dev_addr = mdio_phy_id_devad(phy_addr);
13350 req->reg_addr = cpu_to_le16(reg);
13351 }
13352 req->reg_data = cpu_to_le16(val);
13353
13354 return hwrm_req_send(bp, req);
13355 }
13356
13357 /* netdev instance lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13358 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13359 {
13360 struct mii_ioctl_data *mdio = if_mii(ifr);
13361 struct bnxt *bp = netdev_priv(dev);
13362 int rc;
13363
13364 switch (cmd) {
13365 case SIOCGMIIPHY:
13366 mdio->phy_id = bp->link_info.phy_addr;
13367
13368 fallthrough;
13369 case SIOCGMIIREG: {
13370 u16 mii_regval = 0;
13371
13372 if (!netif_running(dev))
13373 return -EAGAIN;
13374
13375 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13376 &mii_regval);
13377 mdio->val_out = mii_regval;
13378 return rc;
13379 }
13380
13381 case SIOCSMIIREG:
13382 if (!netif_running(dev))
13383 return -EAGAIN;
13384
13385 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13386 mdio->val_in);
13387
13388 default:
13389 /* do nothing */
13390 break;
13391 }
13392 return -EOPNOTSUPP;
13393 }
13394
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13395 static void bnxt_get_ring_stats(struct bnxt *bp,
13396 struct rtnl_link_stats64 *stats)
13397 {
13398 int i;
13399
13400 for (i = 0; i < bp->cp_nr_rings; i++) {
13401 struct bnxt_napi *bnapi = bp->bnapi[i];
13402 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13403 u64 *sw = cpr->stats.sw_stats;
13404
13405 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13406 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13407 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13408
13409 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13410 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13411 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13412
13413 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13414 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13415 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13416
13417 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13418 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13419 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13420
13421 stats->rx_missed_errors +=
13422 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13423
13424 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13425
13426 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13427
13428 stats->rx_dropped +=
13429 cpr->sw_stats->rx.rx_netpoll_discards +
13430 cpr->sw_stats->rx.rx_oom_discards;
13431 }
13432 }
13433
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13434 static void bnxt_add_prev_stats(struct bnxt *bp,
13435 struct rtnl_link_stats64 *stats)
13436 {
13437 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13438
13439 stats->rx_packets += prev_stats->rx_packets;
13440 stats->tx_packets += prev_stats->tx_packets;
13441 stats->rx_bytes += prev_stats->rx_bytes;
13442 stats->tx_bytes += prev_stats->tx_bytes;
13443 stats->rx_missed_errors += prev_stats->rx_missed_errors;
13444 stats->multicast += prev_stats->multicast;
13445 stats->rx_dropped += prev_stats->rx_dropped;
13446 stats->tx_dropped += prev_stats->tx_dropped;
13447 }
13448
13449 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)13450 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13451 {
13452 struct bnxt *bp = netdev_priv(dev);
13453
13454 set_bit(BNXT_STATE_READ_STATS, &bp->state);
13455 /* Make sure bnxt_close_nic() sees that we are reading stats before
13456 * we check the BNXT_STATE_OPEN flag.
13457 */
13458 smp_mb__after_atomic();
13459 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13460 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13461 *stats = bp->net_stats_prev;
13462 return;
13463 }
13464
13465 bnxt_get_ring_stats(bp, stats);
13466 bnxt_add_prev_stats(bp, stats);
13467
13468 if (bp->flags & BNXT_FLAG_PORT_STATS) {
13469 u64 *rx = bp->port_stats.sw_stats;
13470 u64 *tx = bp->port_stats.sw_stats +
13471 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13472
13473 stats->rx_crc_errors =
13474 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13475 stats->rx_frame_errors =
13476 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13477 stats->rx_length_errors =
13478 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13479 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13480 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13481 stats->rx_errors =
13482 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13483 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13484 stats->collisions =
13485 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13486 stats->tx_fifo_errors =
13487 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13488 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13489 }
13490 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13491 }
13492
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)13493 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
13494 struct bnxt_total_ring_err_stats *stats,
13495 struct bnxt_cp_ring_info *cpr)
13496 {
13497 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13498 u64 *hw_stats = cpr->stats.sw_stats;
13499
13500 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13501 stats->rx_total_resets += sw_stats->rx.rx_resets;
13502 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13503 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13504 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13505 stats->rx_total_ring_discards +=
13506 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13507 stats->rx_total_hw_gro_packets += sw_stats->rx.rx_hw_gro_packets;
13508 stats->rx_total_hw_gro_wire_packets += sw_stats->rx.rx_hw_gro_wire_packets;
13509 stats->tx_total_resets += sw_stats->tx.tx_resets;
13510 stats->tx_total_ring_discards +=
13511 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13512 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13513 }
13514
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)13515 void bnxt_get_ring_err_stats(struct bnxt *bp,
13516 struct bnxt_total_ring_err_stats *stats)
13517 {
13518 int i;
13519
13520 for (i = 0; i < bp->cp_nr_rings; i++)
13521 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13522 }
13523
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)13524 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13525 {
13526 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13527 struct net_device *dev = bp->dev;
13528 struct netdev_hw_addr *ha;
13529 u8 *haddr;
13530 int mc_count = 0;
13531 bool update = false;
13532 int off = 0;
13533
13534 netdev_for_each_mc_addr(ha, dev) {
13535 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13536 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13537 vnic->mc_list_count = 0;
13538 return false;
13539 }
13540 haddr = ha->addr;
13541 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13542 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13543 update = true;
13544 }
13545 off += ETH_ALEN;
13546 mc_count++;
13547 }
13548 if (mc_count)
13549 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13550
13551 if (mc_count != vnic->mc_list_count) {
13552 vnic->mc_list_count = mc_count;
13553 update = true;
13554 }
13555 return update;
13556 }
13557
bnxt_uc_list_updated(struct bnxt * bp)13558 static bool bnxt_uc_list_updated(struct bnxt *bp)
13559 {
13560 struct net_device *dev = bp->dev;
13561 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13562 struct netdev_hw_addr *ha;
13563 int off = 0;
13564
13565 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13566 return true;
13567
13568 netdev_for_each_uc_addr(ha, dev) {
13569 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13570 return true;
13571
13572 off += ETH_ALEN;
13573 }
13574 return false;
13575 }
13576
bnxt_set_rx_mode(struct net_device * dev)13577 static void bnxt_set_rx_mode(struct net_device *dev)
13578 {
13579 struct bnxt *bp = netdev_priv(dev);
13580 struct bnxt_vnic_info *vnic;
13581 bool mc_update = false;
13582 bool uc_update;
13583 u32 mask;
13584
13585 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13586 return;
13587
13588 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13589 mask = vnic->rx_mask;
13590 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13591 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13592 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13593 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13594
13595 if (dev->flags & IFF_PROMISC)
13596 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13597
13598 uc_update = bnxt_uc_list_updated(bp);
13599
13600 if (dev->flags & IFF_BROADCAST)
13601 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13602 if (dev->flags & IFF_ALLMULTI) {
13603 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13604 vnic->mc_list_count = 0;
13605 } else if (dev->flags & IFF_MULTICAST) {
13606 mc_update = bnxt_mc_list_updated(bp, &mask);
13607 }
13608
13609 if (mask != vnic->rx_mask || uc_update || mc_update) {
13610 vnic->rx_mask = mask;
13611
13612 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13613 }
13614 }
13615
bnxt_cfg_rx_mode(struct bnxt * bp)13616 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13617 {
13618 struct net_device *dev = bp->dev;
13619 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13620 struct netdev_hw_addr *ha;
13621 int i, off = 0, rc;
13622 bool uc_update;
13623
13624 netif_addr_lock_bh(dev);
13625 uc_update = bnxt_uc_list_updated(bp);
13626 netif_addr_unlock_bh(dev);
13627
13628 if (!uc_update)
13629 goto skip_uc;
13630
13631 for (i = 1; i < vnic->uc_filter_count; i++) {
13632 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13633
13634 bnxt_hwrm_l2_filter_free(bp, fltr);
13635 bnxt_del_l2_filter(bp, fltr);
13636 }
13637
13638 vnic->uc_filter_count = 1;
13639
13640 netif_addr_lock_bh(dev);
13641 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13642 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13643 } else {
13644 netdev_for_each_uc_addr(ha, dev) {
13645 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13646 off += ETH_ALEN;
13647 vnic->uc_filter_count++;
13648 }
13649 }
13650 netif_addr_unlock_bh(dev);
13651
13652 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13653 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13654 if (rc) {
13655 if (BNXT_VF(bp) && rc == -ENODEV) {
13656 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13657 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13658 else
13659 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13660 rc = 0;
13661 } else {
13662 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13663 }
13664 vnic->uc_filter_count = i;
13665 return rc;
13666 }
13667 }
13668 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13669 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13670
13671 skip_uc:
13672 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13673 !bnxt_promisc_ok(bp))
13674 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13675 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13676 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13677 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13678 rc);
13679 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13680 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13681 vnic->mc_list_count = 0;
13682 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13683 }
13684 if (rc)
13685 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13686 rc);
13687
13688 return rc;
13689 }
13690
bnxt_can_reserve_rings(struct bnxt * bp)13691 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13692 {
13693 #ifdef CONFIG_BNXT_SRIOV
13694 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13695 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13696
13697 /* No minimum rings were provisioned by the PF. Don't
13698 * reserve rings by default when device is down.
13699 */
13700 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13701 return true;
13702
13703 if (!netif_running(bp->dev))
13704 return false;
13705 }
13706 #endif
13707 return true;
13708 }
13709
13710 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13711 static bool bnxt_rfs_supported(struct bnxt *bp)
13712 {
13713 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13714 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13715 return true;
13716 return false;
13717 }
13718 /* 212 firmware is broken for aRFS */
13719 if (BNXT_FW_MAJ(bp) == 212)
13720 return false;
13721 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13722 return true;
13723 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13724 return true;
13725 return false;
13726 }
13727
13728 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13729 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13730 {
13731 struct bnxt_hw_rings hwr = {0};
13732 int max_vnics, max_rss_ctxs;
13733
13734 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13735 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13736 return bnxt_rfs_supported(bp);
13737
13738 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13739 return false;
13740
13741 hwr.grp = bp->rx_nr_rings;
13742 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13743 if (new_rss_ctx)
13744 hwr.vnic++;
13745 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13746 max_vnics = bnxt_get_max_func_vnics(bp);
13747 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13748
13749 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13750 if (bp->rx_nr_rings > 1)
13751 netdev_warn(bp->dev,
13752 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13753 min(max_rss_ctxs - 1, max_vnics - 1));
13754 return false;
13755 }
13756
13757 if (!BNXT_NEW_RM(bp))
13758 return true;
13759
13760 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13761 * issue that will mess up the default VNIC if we reduce the
13762 * reservations.
13763 */
13764 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13765 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13766 return true;
13767
13768 bnxt_hwrm_reserve_rings(bp, &hwr);
13769 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13770 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13771 return true;
13772
13773 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13774 hwr.vnic = 1;
13775 hwr.rss_ctx = 0;
13776 bnxt_hwrm_reserve_rings(bp, &hwr);
13777 return false;
13778 }
13779
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13780 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13781 netdev_features_t features)
13782 {
13783 struct bnxt *bp = netdev_priv(dev);
13784 netdev_features_t vlan_features;
13785
13786 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13787 features &= ~NETIF_F_NTUPLE;
13788
13789 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13790 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13791
13792 if (!(features & NETIF_F_GRO))
13793 features &= ~NETIF_F_GRO_HW;
13794
13795 if (features & NETIF_F_GRO_HW)
13796 features &= ~NETIF_F_LRO;
13797
13798 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13799 * turned on or off together.
13800 */
13801 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13802 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13803 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13804 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13805 else if (vlan_features)
13806 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13807 }
13808 #ifdef CONFIG_BNXT_SRIOV
13809 if (BNXT_VF(bp) && bp->vf.vlan)
13810 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13811 #endif
13812 return features;
13813 }
13814
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13815 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13816 bool link_re_init, u32 flags, bool update_tpa)
13817 {
13818 bnxt_close_nic(bp, irq_re_init, link_re_init);
13819 bp->flags = flags;
13820 if (update_tpa)
13821 bnxt_set_ring_params(bp);
13822 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13823 }
13824
bnxt_set_features(struct net_device * dev,netdev_features_t features)13825 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13826 {
13827 bool update_tpa = false, update_ntuple = false;
13828 struct bnxt *bp = netdev_priv(dev);
13829 u32 flags = bp->flags;
13830 u32 changes;
13831 int rc = 0;
13832 bool re_init = false;
13833
13834 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13835 if (features & NETIF_F_GRO_HW)
13836 flags |= BNXT_FLAG_GRO;
13837 else if (features & NETIF_F_LRO)
13838 flags |= BNXT_FLAG_LRO;
13839
13840 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13841 flags &= ~BNXT_FLAG_TPA;
13842
13843 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13844 flags |= BNXT_FLAG_STRIP_VLAN;
13845
13846 if (features & NETIF_F_NTUPLE)
13847 flags |= BNXT_FLAG_RFS;
13848 else
13849 bnxt_clear_usr_fltrs(bp, true);
13850
13851 changes = flags ^ bp->flags;
13852 if (changes & BNXT_FLAG_TPA) {
13853 update_tpa = true;
13854 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13855 (flags & BNXT_FLAG_TPA) == 0 ||
13856 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13857 re_init = true;
13858 }
13859
13860 if (changes & ~BNXT_FLAG_TPA)
13861 re_init = true;
13862
13863 if (changes & BNXT_FLAG_RFS)
13864 update_ntuple = true;
13865
13866 if (flags != bp->flags) {
13867 u32 old_flags = bp->flags;
13868
13869 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13870 bp->flags = flags;
13871 if (update_tpa)
13872 bnxt_set_ring_params(bp);
13873 return rc;
13874 }
13875
13876 if (update_ntuple)
13877 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13878
13879 if (re_init)
13880 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13881
13882 if (update_tpa) {
13883 bp->flags = flags;
13884 rc = bnxt_set_tpa(bp,
13885 (flags & BNXT_FLAG_TPA) ?
13886 true : false);
13887 if (rc)
13888 bp->flags = old_flags;
13889 }
13890 }
13891 return rc;
13892 }
13893
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)13894 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13895 u8 **nextp)
13896 {
13897 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13898 int hdr_count = 0;
13899 u8 *nexthdr;
13900 int start;
13901
13902 /* Check that there are at most 2 IPv6 extension headers, no
13903 * fragment header, and each is <= 64 bytes.
13904 */
13905 start = nw_off + sizeof(*ip6h);
13906 nexthdr = &ip6h->nexthdr;
13907 while (ipv6_ext_hdr(*nexthdr)) {
13908 struct ipv6_opt_hdr *hp;
13909 int hdrlen;
13910
13911 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13912 *nexthdr == NEXTHDR_FRAGMENT)
13913 return false;
13914 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13915 skb_headlen(skb), NULL);
13916 if (!hp)
13917 return false;
13918 if (*nexthdr == NEXTHDR_AUTH)
13919 hdrlen = ipv6_authlen(hp);
13920 else
13921 hdrlen = ipv6_optlen(hp);
13922
13923 if (hdrlen > 64)
13924 return false;
13925
13926 hdr_count++;
13927 nexthdr = &hp->nexthdr;
13928 start += hdrlen;
13929 }
13930 if (nextp) {
13931 /* Caller will check inner protocol */
13932 if (skb->encapsulation) {
13933 *nextp = nexthdr;
13934 return true;
13935 }
13936 *nextp = NULL;
13937 }
13938 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13939 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13940 }
13941
13942 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)13943 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13944 {
13945 struct udphdr *uh = udp_hdr(skb);
13946 __be16 udp_port = uh->dest;
13947
13948 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13949 udp_port != bp->vxlan_gpe_port)
13950 return false;
13951 if (skb->inner_protocol == htons(ETH_P_TEB)) {
13952 struct ethhdr *eh = inner_eth_hdr(skb);
13953
13954 switch (eh->h_proto) {
13955 case htons(ETH_P_IP):
13956 return true;
13957 case htons(ETH_P_IPV6):
13958 return bnxt_exthdr_check(bp, skb,
13959 skb_inner_network_offset(skb),
13960 NULL);
13961 }
13962 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13963 return true;
13964 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13965 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13966 NULL);
13967 }
13968 return false;
13969 }
13970
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)13971 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13972 {
13973 switch (l4_proto) {
13974 case IPPROTO_UDP:
13975 return bnxt_udp_tunl_check(bp, skb);
13976 case IPPROTO_IPIP:
13977 return true;
13978 case IPPROTO_GRE: {
13979 switch (skb->inner_protocol) {
13980 default:
13981 return false;
13982 case htons(ETH_P_IP):
13983 return true;
13984 case htons(ETH_P_IPV6):
13985 fallthrough;
13986 }
13987 }
13988 case IPPROTO_IPV6:
13989 /* Check ext headers of inner ipv6 */
13990 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13991 NULL);
13992 }
13993 return false;
13994 }
13995
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)13996 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13997 struct net_device *dev,
13998 netdev_features_t features)
13999 {
14000 struct bnxt *bp = netdev_priv(dev);
14001 u8 *l4_proto;
14002
14003 features = vlan_features_check(skb, features);
14004 switch (vlan_get_protocol(skb)) {
14005 case htons(ETH_P_IP):
14006 if (!skb->encapsulation)
14007 return features;
14008 l4_proto = &ip_hdr(skb)->protocol;
14009 if (bnxt_tunl_check(bp, skb, *l4_proto))
14010 return features;
14011 break;
14012 case htons(ETH_P_IPV6):
14013 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
14014 &l4_proto))
14015 break;
14016 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
14017 return features;
14018 break;
14019 }
14020 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
14021 }
14022
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)14023 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
14024 u32 *reg_buf)
14025 {
14026 struct hwrm_dbg_read_direct_output *resp;
14027 struct hwrm_dbg_read_direct_input *req;
14028 __le32 *dbg_reg_buf;
14029 dma_addr_t mapping;
14030 int rc, i;
14031
14032 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
14033 if (rc)
14034 return rc;
14035
14036 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
14037 &mapping);
14038 if (!dbg_reg_buf) {
14039 rc = -ENOMEM;
14040 goto dbg_rd_reg_exit;
14041 }
14042
14043 req->host_dest_addr = cpu_to_le64(mapping);
14044
14045 resp = hwrm_req_hold(bp, req);
14046 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
14047 req->read_len32 = cpu_to_le32(num_words);
14048
14049 rc = hwrm_req_send(bp, req);
14050 if (rc || resp->error_code) {
14051 rc = -EIO;
14052 goto dbg_rd_reg_exit;
14053 }
14054 for (i = 0; i < num_words; i++)
14055 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
14056
14057 dbg_rd_reg_exit:
14058 hwrm_req_drop(bp, req);
14059 return rc;
14060 }
14061
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)14062 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
14063 u32 ring_id, u32 *prod, u32 *cons)
14064 {
14065 struct hwrm_dbg_ring_info_get_output *resp;
14066 struct hwrm_dbg_ring_info_get_input *req;
14067 int rc;
14068
14069 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
14070 if (rc)
14071 return rc;
14072
14073 req->ring_type = ring_type;
14074 req->fw_ring_id = cpu_to_le32(ring_id);
14075 resp = hwrm_req_hold(bp, req);
14076 rc = hwrm_req_send(bp, req);
14077 if (!rc) {
14078 *prod = le32_to_cpu(resp->producer_index);
14079 *cons = le32_to_cpu(resp->consumer_index);
14080 }
14081 hwrm_req_drop(bp, req);
14082 return rc;
14083 }
14084
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)14085 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
14086 {
14087 struct bnxt_tx_ring_info *txr;
14088 int i = bnapi->index, j;
14089
14090 bnxt_for_each_napi_tx(j, bnapi, txr)
14091 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14092 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14093 txr->tx_cons);
14094 }
14095
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)14096 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14097 {
14098 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14099 int i = bnapi->index;
14100
14101 if (!rxr)
14102 return;
14103
14104 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14105 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14106 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14107 rxr->rx_sw_agg_prod);
14108 }
14109
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)14110 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14111 {
14112 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
14113 int i = bnapi->index, j;
14114
14115 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14116 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14117 for (j = 0; j < cpr->cp_ring_count; j++) {
14118 cpr2 = &cpr->cp_ring_arr[j];
14119 if (!cpr2->bnapi)
14120 continue;
14121 netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
14122 i, j, cpr2->cp_ring_struct.fw_ring_id,
14123 cpr2->cp_raw_cons);
14124 }
14125 }
14126
bnxt_dbg_dump_states(struct bnxt * bp)14127 static void bnxt_dbg_dump_states(struct bnxt *bp)
14128 {
14129 int i;
14130 struct bnxt_napi *bnapi;
14131
14132 for (i = 0; i < bp->cp_nr_rings; i++) {
14133 bnapi = bp->bnapi[i];
14134 if (netif_msg_drv(bp)) {
14135 bnxt_dump_tx_sw_state(bnapi);
14136 bnxt_dump_rx_sw_state(bnapi);
14137 bnxt_dump_cp_sw_state(bnapi);
14138 }
14139 }
14140 }
14141
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)14142 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14143 {
14144 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14145 struct hwrm_ring_reset_input *req;
14146 struct bnxt_napi *bnapi = rxr->bnapi;
14147 struct bnxt_cp_ring_info *cpr;
14148 u16 cp_ring_id;
14149 int rc;
14150
14151 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14152 if (rc)
14153 return rc;
14154
14155 cpr = &bnapi->cp_ring;
14156 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14157 req->cmpl_ring = cpu_to_le16(cp_ring_id);
14158 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14159 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14160 return hwrm_req_send_silent(bp, req);
14161 }
14162
bnxt_reset_task(struct bnxt * bp,bool silent)14163 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14164 {
14165 if (!silent)
14166 bnxt_dbg_dump_states(bp);
14167 if (netif_running(bp->dev)) {
14168 bnxt_close_nic(bp, !silent, false);
14169 bnxt_open_nic(bp, !silent, false);
14170 }
14171 }
14172
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)14173 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14174 {
14175 struct bnxt *bp = netdev_priv(dev);
14176
14177 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
14178 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14179 }
14180
bnxt_fw_health_check(struct bnxt * bp)14181 static void bnxt_fw_health_check(struct bnxt *bp)
14182 {
14183 struct bnxt_fw_health *fw_health = bp->fw_health;
14184 struct pci_dev *pdev = bp->pdev;
14185 u32 val;
14186
14187 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14188 return;
14189
14190 /* Make sure it is enabled before checking the tmr_counter. */
14191 smp_rmb();
14192 if (fw_health->tmr_counter) {
14193 fw_health->tmr_counter--;
14194 return;
14195 }
14196
14197 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14198 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14199 fw_health->arrests++;
14200 goto fw_reset;
14201 }
14202
14203 fw_health->last_fw_heartbeat = val;
14204
14205 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14206 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14207 fw_health->discoveries++;
14208 goto fw_reset;
14209 }
14210
14211 fw_health->tmr_counter = fw_health->tmr_multiplier;
14212 return;
14213
14214 fw_reset:
14215 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14216 }
14217
bnxt_timer(struct timer_list * t)14218 static void bnxt_timer(struct timer_list *t)
14219 {
14220 struct bnxt *bp = timer_container_of(bp, t, timer);
14221 struct net_device *dev = bp->dev;
14222
14223 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14224 return;
14225
14226 if (atomic_read(&bp->intr_sem) != 0)
14227 goto bnxt_restart_timer;
14228
14229 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14230 bnxt_fw_health_check(bp);
14231
14232 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14233 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14234
14235 if (bnxt_tc_flower_enabled(bp))
14236 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14237
14238 #ifdef CONFIG_RFS_ACCEL
14239 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14240 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14241 #endif /*CONFIG_RFS_ACCEL*/
14242
14243 if (bp->link_info.phy_retry) {
14244 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14245 bp->link_info.phy_retry = false;
14246 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14247 } else {
14248 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14249 }
14250 }
14251
14252 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14253 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14254
14255 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14256 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14257
14258 bnxt_restart_timer:
14259 mod_timer(&bp->timer, jiffies + bp->current_interval);
14260 }
14261
bnxt_lock_sp(struct bnxt * bp)14262 static void bnxt_lock_sp(struct bnxt *bp)
14263 {
14264 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14265 * set. If the device is being closed, bnxt_close() may be holding
14266 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14267 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14268 * instance lock.
14269 */
14270 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14271 netdev_lock(bp->dev);
14272 }
14273
bnxt_unlock_sp(struct bnxt * bp)14274 static void bnxt_unlock_sp(struct bnxt *bp)
14275 {
14276 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14277 netdev_unlock(bp->dev);
14278 }
14279
14280 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)14281 static void bnxt_reset(struct bnxt *bp, bool silent)
14282 {
14283 bnxt_lock_sp(bp);
14284 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14285 bnxt_reset_task(bp, silent);
14286 bnxt_unlock_sp(bp);
14287 }
14288
14289 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)14290 static void bnxt_rx_ring_reset(struct bnxt *bp)
14291 {
14292 int i;
14293
14294 bnxt_lock_sp(bp);
14295 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14296 bnxt_unlock_sp(bp);
14297 return;
14298 }
14299 /* Disable and flush TPA before resetting the RX ring */
14300 if (bp->flags & BNXT_FLAG_TPA)
14301 bnxt_set_tpa(bp, false);
14302 for (i = 0; i < bp->rx_nr_rings; i++) {
14303 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14304 struct bnxt_cp_ring_info *cpr;
14305 int rc;
14306
14307 if (!rxr->bnapi->in_reset)
14308 continue;
14309
14310 rc = bnxt_hwrm_rx_ring_reset(bp, i);
14311 if (rc) {
14312 if (rc == -EINVAL || rc == -EOPNOTSUPP)
14313 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14314 else
14315 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14316 rc);
14317 bnxt_reset_task(bp, true);
14318 break;
14319 }
14320 bnxt_free_one_rx_ring_skbs(bp, rxr);
14321 rxr->rx_prod = 0;
14322 rxr->rx_agg_prod = 0;
14323 rxr->rx_sw_agg_prod = 0;
14324 rxr->rx_next_cons = 0;
14325 rxr->bnapi->in_reset = false;
14326 bnxt_alloc_one_rx_ring(bp, i);
14327 cpr = &rxr->bnapi->cp_ring;
14328 cpr->sw_stats->rx.rx_resets++;
14329 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14330 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14331 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14332 }
14333 if (bp->flags & BNXT_FLAG_TPA)
14334 bnxt_set_tpa(bp, true);
14335 bnxt_unlock_sp(bp);
14336 }
14337
bnxt_fw_fatal_close(struct bnxt * bp)14338 static void bnxt_fw_fatal_close(struct bnxt *bp)
14339 {
14340 bnxt_tx_disable(bp);
14341 bnxt_disable_napi(bp);
14342 bnxt_disable_int_sync(bp);
14343 bnxt_free_irq(bp);
14344 bnxt_clear_int_mode(bp);
14345 pci_disable_device(bp->pdev);
14346 }
14347
bnxt_fw_reset_close(struct bnxt * bp)14348 static void bnxt_fw_reset_close(struct bnxt *bp)
14349 {
14350 /* When firmware is in fatal state, quiesce device and disable
14351 * bus master to prevent any potential bad DMAs before freeing
14352 * kernel memory.
14353 */
14354 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14355 u16 val = 0;
14356
14357 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14358 if (val == 0xffff)
14359 bp->fw_reset_min_dsecs = 0;
14360 bnxt_fw_fatal_close(bp);
14361 }
14362 __bnxt_close_nic(bp, true, false);
14363 bnxt_vf_reps_free(bp);
14364 bnxt_clear_int_mode(bp);
14365 bnxt_hwrm_func_drv_unrgtr(bp);
14366 if (pci_is_enabled(bp->pdev))
14367 pci_disable_device(bp->pdev);
14368 bnxt_free_ctx_mem(bp, false);
14369 }
14370
is_bnxt_fw_ok(struct bnxt * bp)14371 static bool is_bnxt_fw_ok(struct bnxt *bp)
14372 {
14373 struct bnxt_fw_health *fw_health = bp->fw_health;
14374 bool no_heartbeat = false, has_reset = false;
14375 u32 val;
14376
14377 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14378 if (val == fw_health->last_fw_heartbeat)
14379 no_heartbeat = true;
14380
14381 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14382 if (val != fw_health->last_fw_reset_cnt)
14383 has_reset = true;
14384
14385 if (!no_heartbeat && has_reset)
14386 return true;
14387
14388 return false;
14389 }
14390
14391 /* netdev instance lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)14392 static void bnxt_force_fw_reset(struct bnxt *bp)
14393 {
14394 struct bnxt_fw_health *fw_health = bp->fw_health;
14395 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14396 u32 wait_dsecs;
14397
14398 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14399 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14400 return;
14401
14402 /* we have to serialize with bnxt_refclk_read()*/
14403 if (ptp) {
14404 unsigned long flags;
14405
14406 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14407 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14408 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14409 } else {
14410 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14411 }
14412 bnxt_fw_reset_close(bp);
14413 wait_dsecs = fw_health->master_func_wait_dsecs;
14414 if (fw_health->primary) {
14415 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14416 wait_dsecs = 0;
14417 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14418 } else {
14419 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14420 wait_dsecs = fw_health->normal_func_wait_dsecs;
14421 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14422 }
14423
14424 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14425 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14426 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14427 }
14428
bnxt_fw_exception(struct bnxt * bp)14429 void bnxt_fw_exception(struct bnxt *bp)
14430 {
14431 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14432 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14433 bnxt_ulp_stop(bp);
14434 bnxt_lock_sp(bp);
14435 bnxt_force_fw_reset(bp);
14436 bnxt_unlock_sp(bp);
14437 }
14438
14439 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14440 * < 0 on error.
14441 */
bnxt_get_registered_vfs(struct bnxt * bp)14442 static int bnxt_get_registered_vfs(struct bnxt *bp)
14443 {
14444 #ifdef CONFIG_BNXT_SRIOV
14445 int rc;
14446
14447 if (!BNXT_PF(bp))
14448 return 0;
14449
14450 rc = bnxt_hwrm_func_qcfg(bp);
14451 if (rc) {
14452 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14453 return rc;
14454 }
14455 if (bp->pf.registered_vfs)
14456 return bp->pf.registered_vfs;
14457 if (bp->sriov_cfg)
14458 return 1;
14459 #endif
14460 return 0;
14461 }
14462
bnxt_fw_reset(struct bnxt * bp)14463 void bnxt_fw_reset(struct bnxt *bp)
14464 {
14465 bnxt_ulp_stop(bp);
14466 bnxt_lock_sp(bp);
14467 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14468 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14469 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14470 int n = 0, tmo;
14471
14472 /* we have to serialize with bnxt_refclk_read()*/
14473 if (ptp) {
14474 unsigned long flags;
14475
14476 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14477 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14478 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14479 } else {
14480 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14481 }
14482 if (bp->pf.active_vfs &&
14483 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14484 n = bnxt_get_registered_vfs(bp);
14485 if (n < 0) {
14486 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14487 n);
14488 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14489 netif_close(bp->dev);
14490 goto fw_reset_exit;
14491 } else if (n > 0) {
14492 u16 vf_tmo_dsecs = n * 10;
14493
14494 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14495 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14496 bp->fw_reset_state =
14497 BNXT_FW_RESET_STATE_POLL_VF;
14498 bnxt_queue_fw_reset_work(bp, HZ / 10);
14499 goto fw_reset_exit;
14500 }
14501 bnxt_fw_reset_close(bp);
14502 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14503 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14504 tmo = HZ / 10;
14505 } else {
14506 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14507 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14508 }
14509 bnxt_queue_fw_reset_work(bp, tmo);
14510 }
14511 fw_reset_exit:
14512 bnxt_unlock_sp(bp);
14513 }
14514
bnxt_chk_missed_irq(struct bnxt * bp)14515 static void bnxt_chk_missed_irq(struct bnxt *bp)
14516 {
14517 int i;
14518
14519 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14520 return;
14521
14522 for (i = 0; i < bp->cp_nr_rings; i++) {
14523 struct bnxt_napi *bnapi = bp->bnapi[i];
14524 struct bnxt_cp_ring_info *cpr;
14525 u32 fw_ring_id;
14526 int j;
14527
14528 if (!bnapi)
14529 continue;
14530
14531 cpr = &bnapi->cp_ring;
14532 for (j = 0; j < cpr->cp_ring_count; j++) {
14533 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14534 u32 val[2];
14535
14536 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14537 continue;
14538
14539 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14540 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14541 continue;
14542 }
14543 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14544 bnxt_dbg_hwrm_ring_info_get(bp,
14545 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14546 fw_ring_id, &val[0], &val[1]);
14547 cpr->sw_stats->cmn.missed_irqs++;
14548 }
14549 }
14550 }
14551
14552 static void bnxt_cfg_ntp_filters(struct bnxt *);
14553
bnxt_init_ethtool_link_settings(struct bnxt * bp)14554 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14555 {
14556 struct bnxt_link_info *link_info = &bp->link_info;
14557
14558 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14559 link_info->autoneg = BNXT_AUTONEG_SPEED;
14560 if (bp->hwrm_spec_code >= 0x10201) {
14561 if (link_info->auto_pause_setting &
14562 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14563 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14564 } else {
14565 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14566 }
14567 bnxt_set_auto_speed(link_info);
14568 } else {
14569 bnxt_set_force_speed(link_info);
14570 link_info->req_duplex = link_info->duplex_setting;
14571 }
14572 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14573 link_info->req_flow_ctrl =
14574 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14575 else
14576 link_info->req_flow_ctrl = link_info->force_pause_setting;
14577 }
14578
bnxt_fw_echo_reply(struct bnxt * bp)14579 static void bnxt_fw_echo_reply(struct bnxt *bp)
14580 {
14581 struct bnxt_fw_health *fw_health = bp->fw_health;
14582 struct hwrm_func_echo_response_input *req;
14583 int rc;
14584
14585 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14586 if (rc)
14587 return;
14588 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14589 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14590 hwrm_req_send(bp, req);
14591 }
14592
bnxt_ulp_restart(struct bnxt * bp)14593 static void bnxt_ulp_restart(struct bnxt *bp)
14594 {
14595 bnxt_ulp_stop(bp);
14596 bnxt_ulp_start(bp, 0);
14597 }
14598
bnxt_sp_task(struct work_struct * work)14599 static void bnxt_sp_task(struct work_struct *work)
14600 {
14601 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14602
14603 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14604 smp_mb__after_atomic();
14605 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14606 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14607 return;
14608 }
14609
14610 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14611 bnxt_ulp_restart(bp);
14612 bnxt_reenable_sriov(bp);
14613 }
14614
14615 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14616 bnxt_cfg_rx_mode(bp);
14617
14618 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14619 bnxt_cfg_ntp_filters(bp);
14620 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14621 bnxt_hwrm_exec_fwd_req(bp);
14622 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14623 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14624 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14625 bnxt_hwrm_port_qstats(bp, 0);
14626 bnxt_hwrm_port_qstats_ext(bp, 0);
14627 bnxt_accumulate_all_stats(bp);
14628 }
14629
14630 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14631 int rc;
14632
14633 mutex_lock(&bp->link_lock);
14634 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14635 &bp->sp_event))
14636 bnxt_hwrm_phy_qcaps(bp);
14637
14638 rc = bnxt_update_link(bp, true);
14639 if (rc)
14640 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14641 rc);
14642
14643 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14644 &bp->sp_event))
14645 bnxt_init_ethtool_link_settings(bp);
14646 mutex_unlock(&bp->link_lock);
14647 }
14648 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14649 int rc;
14650
14651 mutex_lock(&bp->link_lock);
14652 rc = bnxt_update_phy_setting(bp);
14653 mutex_unlock(&bp->link_lock);
14654 if (rc) {
14655 netdev_warn(bp->dev, "update phy settings retry failed\n");
14656 } else {
14657 bp->link_info.phy_retry = false;
14658 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14659 }
14660 }
14661 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14662 mutex_lock(&bp->link_lock);
14663 bnxt_get_port_module_status(bp);
14664 mutex_unlock(&bp->link_lock);
14665 }
14666
14667 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14668 bnxt_tc_flow_stats_work(bp);
14669
14670 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14671 bnxt_chk_missed_irq(bp);
14672
14673 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14674 bnxt_fw_echo_reply(bp);
14675
14676 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14677 bnxt_hwmon_notify_event(bp);
14678
14679 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14680 * must be the last functions to be called before exiting.
14681 */
14682 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14683 bnxt_reset(bp, false);
14684
14685 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14686 bnxt_reset(bp, true);
14687
14688 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14689 bnxt_rx_ring_reset(bp);
14690
14691 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14692 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14693 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14694 bnxt_devlink_health_fw_report(bp);
14695 else
14696 bnxt_fw_reset(bp);
14697 }
14698
14699 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14700 if (!is_bnxt_fw_ok(bp))
14701 bnxt_devlink_health_fw_report(bp);
14702 }
14703
14704 smp_mb__before_atomic();
14705 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14706 }
14707
14708 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14709 int *max_cp);
14710
14711 /* Under netdev instance lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14712 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14713 int tx_xdp)
14714 {
14715 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14716 struct bnxt_hw_rings hwr = {0};
14717 int rx_rings = rx;
14718 int rc;
14719
14720 if (tcs)
14721 tx_sets = tcs;
14722
14723 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14724
14725 if (max_rx < rx_rings)
14726 return -ENOMEM;
14727
14728 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14729 rx_rings <<= 1;
14730
14731 hwr.rx = rx_rings;
14732 hwr.tx = tx * tx_sets + tx_xdp;
14733 if (max_tx < hwr.tx)
14734 return -ENOMEM;
14735
14736 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14737
14738 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14739 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14740 if (max_cp < hwr.cp)
14741 return -ENOMEM;
14742 hwr.stat = hwr.cp;
14743 if (BNXT_NEW_RM(bp)) {
14744 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14745 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14746 hwr.grp = rx;
14747 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14748 }
14749 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14750 hwr.cp_p5 = hwr.tx + rx;
14751 rc = bnxt_hwrm_check_rings(bp, &hwr);
14752 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14753 if (!bnxt_ulp_registered(bp->edev)) {
14754 hwr.cp += bnxt_get_ulp_msix_num(bp);
14755 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14756 }
14757 if (hwr.cp > bp->total_irqs) {
14758 int total_msix = bnxt_change_msix(bp, hwr.cp);
14759
14760 if (total_msix < hwr.cp) {
14761 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14762 hwr.cp, total_msix);
14763 rc = -ENOSPC;
14764 }
14765 }
14766 }
14767 return rc;
14768 }
14769
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14770 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14771 {
14772 if (bp->bar2) {
14773 pci_iounmap(pdev, bp->bar2);
14774 bp->bar2 = NULL;
14775 }
14776
14777 if (bp->bar1) {
14778 pci_iounmap(pdev, bp->bar1);
14779 bp->bar1 = NULL;
14780 }
14781
14782 if (bp->bar0) {
14783 pci_iounmap(pdev, bp->bar0);
14784 bp->bar0 = NULL;
14785 }
14786 }
14787
bnxt_cleanup_pci(struct bnxt * bp)14788 static void bnxt_cleanup_pci(struct bnxt *bp)
14789 {
14790 bnxt_unmap_bars(bp, bp->pdev);
14791 pci_release_regions(bp->pdev);
14792 if (pci_is_enabled(bp->pdev))
14793 pci_disable_device(bp->pdev);
14794 }
14795
bnxt_init_dflt_coal(struct bnxt * bp)14796 static void bnxt_init_dflt_coal(struct bnxt *bp)
14797 {
14798 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14799 struct bnxt_coal *coal;
14800 u16 flags = 0;
14801
14802 if (coal_cap->cmpl_params &
14803 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14804 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14805
14806 /* Tick values in micro seconds.
14807 * 1 coal_buf x bufs_per_record = 1 completion record.
14808 */
14809 coal = &bp->rx_coal;
14810 coal->coal_ticks = 10;
14811 coal->coal_bufs = 30;
14812 coal->coal_ticks_irq = 1;
14813 coal->coal_bufs_irq = 2;
14814 coal->idle_thresh = 50;
14815 coal->bufs_per_record = 2;
14816 coal->budget = 64; /* NAPI budget */
14817 coal->flags = flags;
14818
14819 coal = &bp->tx_coal;
14820 coal->coal_ticks = 28;
14821 coal->coal_bufs = 30;
14822 coal->coal_ticks_irq = 2;
14823 coal->coal_bufs_irq = 2;
14824 coal->bufs_per_record = 1;
14825 coal->flags = flags;
14826
14827 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14828 }
14829
14830 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14831 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14832 {
14833 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14834
14835 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14836 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14837 return true;
14838 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14839 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14840 return true;
14841 return false;
14842 }
14843
bnxt_hwrm_pfcwd_qcaps(struct bnxt * bp)14844 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14845 {
14846 struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14847 struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14848 int rc;
14849
14850 bp->max_pfcwd_tmo_ms = 0;
14851 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14852 if (rc)
14853 return;
14854 resp = hwrm_req_hold(bp, req);
14855 rc = hwrm_req_send_silent(bp, req);
14856 if (!rc)
14857 bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14858 hwrm_req_drop(bp, req);
14859 }
14860
bnxt_fw_init_one_p1(struct bnxt * bp)14861 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14862 {
14863 int rc;
14864
14865 bp->fw_cap = 0;
14866 rc = bnxt_hwrm_ver_get(bp);
14867 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14868 * so wait before continuing with recovery.
14869 */
14870 if (rc)
14871 msleep(100);
14872 bnxt_try_map_fw_health_reg(bp);
14873 if (rc) {
14874 rc = bnxt_try_recover_fw(bp);
14875 if (rc)
14876 return rc;
14877 rc = bnxt_hwrm_ver_get(bp);
14878 if (rc)
14879 return rc;
14880 }
14881
14882 bnxt_nvm_cfg_ver_get(bp);
14883
14884 rc = bnxt_hwrm_func_reset(bp);
14885 if (rc)
14886 return -ENODEV;
14887
14888 bnxt_hwrm_fw_set_time(bp);
14889 return 0;
14890 }
14891
bnxt_fw_init_one_p2(struct bnxt * bp)14892 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14893 {
14894 int rc;
14895
14896 /* Get the MAX capabilities for this function */
14897 rc = bnxt_hwrm_func_qcaps(bp);
14898 if (rc) {
14899 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14900 rc);
14901 return -ENODEV;
14902 }
14903
14904 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14905 if (rc)
14906 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14907 rc);
14908
14909 if (bnxt_alloc_fw_health(bp)) {
14910 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14911 } else {
14912 rc = bnxt_hwrm_error_recovery_qcfg(bp);
14913 if (rc)
14914 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14915 rc);
14916 }
14917
14918 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14919 if (rc)
14920 return -ENODEV;
14921
14922 rc = bnxt_alloc_crash_dump_mem(bp);
14923 if (rc)
14924 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14925 rc);
14926 if (!rc) {
14927 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14928 if (rc) {
14929 bnxt_free_crash_dump_mem(bp);
14930 netdev_warn(bp->dev,
14931 "hwrm crash dump mem failure rc: %d\n", rc);
14932 }
14933 }
14934
14935 if (bnxt_fw_pre_resv_vnics(bp))
14936 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14937
14938 bnxt_hwrm_pfcwd_qcaps(bp);
14939 bnxt_hwrm_func_qcfg(bp);
14940 bnxt_hwrm_vnic_qcaps(bp);
14941 bnxt_hwrm_port_led_qcaps(bp);
14942 bnxt_ethtool_init(bp);
14943 if (bp->fw_cap & BNXT_FW_CAP_PTP)
14944 __bnxt_hwrm_ptp_qcfg(bp);
14945 bnxt_dcb_init(bp);
14946 bnxt_hwmon_init(bp);
14947 return 0;
14948 }
14949
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)14950 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14951 {
14952 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14953 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14954 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14955 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14956 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14957 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14958 bp->rss_hash_delta = bp->rss_hash_cfg;
14959 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14960 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14961 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14962 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14963 }
14964 }
14965
bnxt_set_dflt_rfs(struct bnxt * bp)14966 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14967 {
14968 struct net_device *dev = bp->dev;
14969
14970 dev->hw_features &= ~NETIF_F_NTUPLE;
14971 dev->features &= ~NETIF_F_NTUPLE;
14972 bp->flags &= ~BNXT_FLAG_RFS;
14973 if (bnxt_rfs_supported(bp)) {
14974 dev->hw_features |= NETIF_F_NTUPLE;
14975 if (bnxt_rfs_capable(bp, false)) {
14976 bp->flags |= BNXT_FLAG_RFS;
14977 dev->features |= NETIF_F_NTUPLE;
14978 }
14979 }
14980 }
14981
bnxt_fw_init_one_p3(struct bnxt * bp)14982 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14983 {
14984 struct pci_dev *pdev = bp->pdev;
14985
14986 bnxt_set_dflt_rss_hash_type(bp);
14987 bnxt_set_dflt_rfs(bp);
14988
14989 bnxt_get_wol_settings(bp);
14990 if (bp->flags & BNXT_FLAG_WOL_CAP)
14991 device_set_wakeup_enable(&pdev->dev, bp->wol);
14992 else
14993 device_set_wakeup_capable(&pdev->dev, false);
14994
14995 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14996 bnxt_hwrm_coal_params_qcaps(bp);
14997 }
14998
14999 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
15000
bnxt_fw_init_one(struct bnxt * bp)15001 int bnxt_fw_init_one(struct bnxt *bp)
15002 {
15003 int rc;
15004
15005 rc = bnxt_fw_init_one_p1(bp);
15006 if (rc) {
15007 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
15008 return rc;
15009 }
15010 rc = bnxt_fw_init_one_p2(bp);
15011 if (rc) {
15012 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
15013 return rc;
15014 }
15015 rc = bnxt_probe_phy(bp, false);
15016 if (rc)
15017 return rc;
15018 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
15019 if (rc)
15020 return rc;
15021
15022 bnxt_fw_init_one_p3(bp);
15023 return 0;
15024 }
15025
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)15026 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
15027 {
15028 struct bnxt_fw_health *fw_health = bp->fw_health;
15029 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
15030 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
15031 u32 reg_type, reg_off, delay_msecs;
15032
15033 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
15034 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
15035 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
15036 switch (reg_type) {
15037 case BNXT_FW_HEALTH_REG_TYPE_CFG:
15038 pci_write_config_dword(bp->pdev, reg_off, val);
15039 break;
15040 case BNXT_FW_HEALTH_REG_TYPE_GRC:
15041 writel(reg_off & BNXT_GRC_BASE_MASK,
15042 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
15043 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
15044 fallthrough;
15045 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
15046 writel(val, bp->bar0 + reg_off);
15047 break;
15048 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
15049 writel(val, bp->bar1 + reg_off);
15050 break;
15051 }
15052 if (delay_msecs) {
15053 pci_read_config_dword(bp->pdev, 0, &val);
15054 msleep(delay_msecs);
15055 }
15056 }
15057
bnxt_hwrm_reset_permitted(struct bnxt * bp)15058 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
15059 {
15060 struct hwrm_func_qcfg_output *resp;
15061 struct hwrm_func_qcfg_input *req;
15062 bool result = true; /* firmware will enforce if unknown */
15063
15064 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
15065 return result;
15066
15067 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
15068 return result;
15069
15070 req->fid = cpu_to_le16(0xffff);
15071 resp = hwrm_req_hold(bp, req);
15072 if (!hwrm_req_send(bp, req))
15073 result = !!(le16_to_cpu(resp->flags) &
15074 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
15075 hwrm_req_drop(bp, req);
15076 return result;
15077 }
15078
bnxt_reset_all(struct bnxt * bp)15079 static void bnxt_reset_all(struct bnxt *bp)
15080 {
15081 struct bnxt_fw_health *fw_health = bp->fw_health;
15082 int i, rc;
15083
15084 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15085 bnxt_fw_reset_via_optee(bp);
15086 bp->fw_reset_timestamp = jiffies;
15087 return;
15088 }
15089
15090 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
15091 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
15092 bnxt_fw_reset_writel(bp, i);
15093 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
15094 struct hwrm_fw_reset_input *req;
15095
15096 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15097 if (!rc) {
15098 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15099 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15100 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15101 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15102 rc = hwrm_req_send(bp, req);
15103 }
15104 if (rc != -ENODEV)
15105 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15106 }
15107 bp->fw_reset_timestamp = jiffies;
15108 }
15109
bnxt_fw_reset_timeout(struct bnxt * bp)15110 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15111 {
15112 return time_after(jiffies, bp->fw_reset_timestamp +
15113 (bp->fw_reset_max_dsecs * HZ / 10));
15114 }
15115
bnxt_fw_reset_abort(struct bnxt * bp,int rc)15116 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15117 {
15118 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15119 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15120 bnxt_dl_health_fw_status_update(bp, false);
15121 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15122 netif_close(bp->dev);
15123 }
15124
bnxt_fw_reset_task(struct work_struct * work)15125 static void bnxt_fw_reset_task(struct work_struct *work)
15126 {
15127 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15128 int rc = 0;
15129
15130 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15131 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15132 return;
15133 }
15134
15135 switch (bp->fw_reset_state) {
15136 case BNXT_FW_RESET_STATE_POLL_VF: {
15137 int n = bnxt_get_registered_vfs(bp);
15138 int tmo;
15139
15140 if (n < 0) {
15141 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15142 n, jiffies_to_msecs(jiffies -
15143 bp->fw_reset_timestamp));
15144 goto fw_reset_abort;
15145 } else if (n > 0) {
15146 if (bnxt_fw_reset_timeout(bp)) {
15147 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15148 bp->fw_reset_state = 0;
15149 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15150 n);
15151 goto ulp_start;
15152 }
15153 bnxt_queue_fw_reset_work(bp, HZ / 10);
15154 return;
15155 }
15156 bp->fw_reset_timestamp = jiffies;
15157 netdev_lock(bp->dev);
15158 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15159 bnxt_fw_reset_abort(bp, rc);
15160 netdev_unlock(bp->dev);
15161 goto ulp_start;
15162 }
15163 bnxt_fw_reset_close(bp);
15164 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15165 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15166 tmo = HZ / 10;
15167 } else {
15168 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15169 tmo = bp->fw_reset_min_dsecs * HZ / 10;
15170 }
15171 netdev_unlock(bp->dev);
15172 bnxt_queue_fw_reset_work(bp, tmo);
15173 return;
15174 }
15175 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15176 u32 val;
15177
15178 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15179 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15180 !bnxt_fw_reset_timeout(bp)) {
15181 bnxt_queue_fw_reset_work(bp, HZ / 5);
15182 return;
15183 }
15184
15185 if (!bp->fw_health->primary) {
15186 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15187
15188 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15189 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15190 return;
15191 }
15192 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15193 }
15194 fallthrough;
15195 case BNXT_FW_RESET_STATE_RESET_FW:
15196 bnxt_reset_all(bp);
15197 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15198 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15199 return;
15200 case BNXT_FW_RESET_STATE_ENABLE_DEV:
15201 bnxt_inv_fw_health_reg(bp);
15202 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15203 !bp->fw_reset_min_dsecs) {
15204 u16 val;
15205
15206 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15207 if (val == 0xffff) {
15208 if (bnxt_fw_reset_timeout(bp)) {
15209 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15210 rc = -ETIMEDOUT;
15211 goto fw_reset_abort;
15212 }
15213 bnxt_queue_fw_reset_work(bp, HZ / 1000);
15214 return;
15215 }
15216 }
15217 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15218 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15219 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15220 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15221 bnxt_dl_remote_reload(bp);
15222 if (pci_enable_device(bp->pdev)) {
15223 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15224 rc = -ENODEV;
15225 goto fw_reset_abort;
15226 }
15227 pci_set_master(bp->pdev);
15228 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15229 fallthrough;
15230 case BNXT_FW_RESET_STATE_POLL_FW:
15231 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15232 rc = bnxt_hwrm_poll(bp);
15233 if (rc) {
15234 if (bnxt_fw_reset_timeout(bp)) {
15235 netdev_err(bp->dev, "Firmware reset aborted\n");
15236 goto fw_reset_abort_status;
15237 }
15238 bnxt_queue_fw_reset_work(bp, HZ / 5);
15239 return;
15240 }
15241 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15242 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15243 fallthrough;
15244 case BNXT_FW_RESET_STATE_OPENING:
15245 while (!netdev_trylock(bp->dev)) {
15246 bnxt_queue_fw_reset_work(bp, HZ / 10);
15247 return;
15248 }
15249 rc = bnxt_open(bp->dev);
15250 if (rc) {
15251 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15252 bnxt_fw_reset_abort(bp, rc);
15253 netdev_unlock(bp->dev);
15254 goto ulp_start;
15255 }
15256
15257 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15258 bp->fw_health->enabled) {
15259 bp->fw_health->last_fw_reset_cnt =
15260 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15261 }
15262 bp->fw_reset_state = 0;
15263 /* Make sure fw_reset_state is 0 before clearing the flag */
15264 smp_mb__before_atomic();
15265 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15266 bnxt_ptp_reapply_pps(bp);
15267 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15268 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15269 bnxt_dl_health_fw_recovery_done(bp);
15270 bnxt_dl_health_fw_status_update(bp, true);
15271 }
15272 netdev_unlock(bp->dev);
15273 bnxt_ulp_start(bp, 0);
15274 bnxt_reenable_sriov(bp);
15275 netdev_lock(bp->dev);
15276 bnxt_vf_reps_alloc(bp);
15277 bnxt_vf_reps_open(bp);
15278 netdev_unlock(bp->dev);
15279 break;
15280 }
15281 return;
15282
15283 fw_reset_abort_status:
15284 if (bp->fw_health->status_reliable ||
15285 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15286 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15287
15288 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15289 }
15290 fw_reset_abort:
15291 netdev_lock(bp->dev);
15292 bnxt_fw_reset_abort(bp, rc);
15293 netdev_unlock(bp->dev);
15294 ulp_start:
15295 bnxt_ulp_start(bp, rc);
15296 }
15297
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)15298 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15299 {
15300 int rc;
15301 struct bnxt *bp = netdev_priv(dev);
15302
15303 SET_NETDEV_DEV(dev, &pdev->dev);
15304
15305 /* enable device (incl. PCI PM wakeup), and bus-mastering */
15306 rc = pci_enable_device(pdev);
15307 if (rc) {
15308 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15309 goto init_err;
15310 }
15311
15312 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15313 dev_err(&pdev->dev,
15314 "Cannot find PCI device base address, aborting\n");
15315 rc = -ENODEV;
15316 goto init_err_disable;
15317 }
15318
15319 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15320 if (rc) {
15321 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15322 goto init_err_disable;
15323 }
15324
15325 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15326 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15327 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15328 rc = -EIO;
15329 goto init_err_release;
15330 }
15331
15332 pci_set_master(pdev);
15333
15334 bp->dev = dev;
15335 bp->pdev = pdev;
15336
15337 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15338 * determines the BAR size.
15339 */
15340 bp->bar0 = pci_ioremap_bar(pdev, 0);
15341 if (!bp->bar0) {
15342 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15343 rc = -ENOMEM;
15344 goto init_err_release;
15345 }
15346
15347 bp->bar2 = pci_ioremap_bar(pdev, 4);
15348 if (!bp->bar2) {
15349 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15350 rc = -ENOMEM;
15351 goto init_err_release;
15352 }
15353
15354 INIT_WORK(&bp->sp_task, bnxt_sp_task);
15355 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15356
15357 spin_lock_init(&bp->ntp_fltr_lock);
15358 #if BITS_PER_LONG == 32
15359 spin_lock_init(&bp->db_lock);
15360 #endif
15361
15362 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15363 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15364
15365 timer_setup(&bp->timer, bnxt_timer, 0);
15366 bp->current_interval = BNXT_TIMER_INTERVAL;
15367
15368 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15369 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15370
15371 clear_bit(BNXT_STATE_OPEN, &bp->state);
15372 return 0;
15373
15374 init_err_release:
15375 bnxt_unmap_bars(bp, pdev);
15376 pci_release_regions(pdev);
15377
15378 init_err_disable:
15379 pci_disable_device(pdev);
15380
15381 init_err:
15382 return rc;
15383 }
15384
bnxt_change_mac_addr(struct net_device * dev,void * p)15385 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15386 {
15387 struct sockaddr *addr = p;
15388 struct bnxt *bp = netdev_priv(dev);
15389 int rc = 0;
15390
15391 netdev_assert_locked(dev);
15392
15393 if (!is_valid_ether_addr(addr->sa_data))
15394 return -EADDRNOTAVAIL;
15395
15396 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15397 return 0;
15398
15399 rc = bnxt_approve_mac(bp, addr->sa_data, true);
15400 if (rc)
15401 return rc;
15402
15403 eth_hw_addr_set(dev, addr->sa_data);
15404 bnxt_clear_usr_fltrs(bp, true);
15405 if (netif_running(dev)) {
15406 bnxt_close_nic(bp, false, false);
15407 rc = bnxt_open_nic(bp, false, false);
15408 }
15409
15410 return rc;
15411 }
15412
bnxt_change_mtu(struct net_device * dev,int new_mtu)15413 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15414 {
15415 struct bnxt *bp = netdev_priv(dev);
15416
15417 netdev_assert_locked(dev);
15418
15419 if (netif_running(dev))
15420 bnxt_close_nic(bp, true, false);
15421
15422 WRITE_ONCE(dev->mtu, new_mtu);
15423
15424 /* MTU change may change the AGG ring settings if an XDP multi-buffer
15425 * program is attached. We need to set the AGG rings settings and
15426 * rx_skb_func accordingly.
15427 */
15428 if (READ_ONCE(bp->xdp_prog))
15429 bnxt_set_rx_skb_mode(bp, true);
15430
15431 bnxt_set_ring_params(bp);
15432
15433 if (netif_running(dev))
15434 return bnxt_open_nic(bp, true, false);
15435
15436 return 0;
15437 }
15438
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)15439 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15440 {
15441 struct bnxt *bp = netdev_priv(dev);
15442 bool sh = false;
15443 int rc, tx_cp;
15444
15445 if (tc > bp->max_tc) {
15446 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15447 tc, bp->max_tc);
15448 return -EINVAL;
15449 }
15450
15451 if (bp->num_tc == tc)
15452 return 0;
15453
15454 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15455 sh = true;
15456
15457 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15458 sh, tc, bp->tx_nr_rings_xdp);
15459 if (rc)
15460 return rc;
15461
15462 /* Needs to close the device and do hw resource re-allocations */
15463 if (netif_running(bp->dev))
15464 bnxt_close_nic(bp, true, false);
15465
15466 if (tc) {
15467 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15468 netdev_set_num_tc(dev, tc);
15469 bp->num_tc = tc;
15470 } else {
15471 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15472 netdev_reset_tc(dev);
15473 bp->num_tc = 0;
15474 }
15475 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15476 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15477 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15478 tx_cp + bp->rx_nr_rings;
15479
15480 if (netif_running(bp->dev))
15481 return bnxt_open_nic(bp, true, false);
15482
15483 return 0;
15484 }
15485
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)15486 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15487 void *cb_priv)
15488 {
15489 struct bnxt *bp = cb_priv;
15490
15491 if (!bnxt_tc_flower_enabled(bp) ||
15492 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15493 return -EOPNOTSUPP;
15494
15495 switch (type) {
15496 case TC_SETUP_CLSFLOWER:
15497 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15498 default:
15499 return -EOPNOTSUPP;
15500 }
15501 }
15502
15503 LIST_HEAD(bnxt_block_cb_list);
15504
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)15505 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15506 void *type_data)
15507 {
15508 struct bnxt *bp = netdev_priv(dev);
15509
15510 switch (type) {
15511 case TC_SETUP_BLOCK:
15512 return flow_block_cb_setup_simple(type_data,
15513 &bnxt_block_cb_list,
15514 bnxt_setup_tc_block_cb,
15515 bp, bp, true);
15516 case TC_SETUP_QDISC_MQPRIO: {
15517 struct tc_mqprio_qopt *mqprio = type_data;
15518
15519 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15520
15521 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15522 }
15523 default:
15524 return -EOPNOTSUPP;
15525 }
15526 }
15527
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15528 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15529 const struct sk_buff *skb)
15530 {
15531 struct bnxt_vnic_info *vnic;
15532
15533 if (skb)
15534 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15535
15536 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15537 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15538 }
15539
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15540 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15541 u32 idx)
15542 {
15543 struct hlist_head *head;
15544 int bit_id;
15545
15546 spin_lock_bh(&bp->ntp_fltr_lock);
15547 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15548 if (bit_id < 0) {
15549 spin_unlock_bh(&bp->ntp_fltr_lock);
15550 return -ENOMEM;
15551 }
15552
15553 fltr->base.sw_id = (u16)bit_id;
15554 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15555 fltr->base.flags |= BNXT_ACT_RING_DST;
15556 head = &bp->ntp_fltr_hash_tbl[idx];
15557 hlist_add_head_rcu(&fltr->base.hash, head);
15558 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15559 bnxt_insert_usr_fltr(bp, &fltr->base);
15560 bp->ntp_fltr_count++;
15561 spin_unlock_bh(&bp->ntp_fltr_lock);
15562 return 0;
15563 }
15564
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15565 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15566 struct bnxt_ntuple_filter *f2)
15567 {
15568 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15569 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15570 struct flow_keys *keys1 = &f1->fkeys;
15571 struct flow_keys *keys2 = &f2->fkeys;
15572
15573 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15574 keys1->basic.ip_proto != keys2->basic.ip_proto)
15575 return false;
15576
15577 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15578 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15579 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15580 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15581 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15582 return false;
15583 } else {
15584 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15585 &keys2->addrs.v6addrs.src) ||
15586 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15587 &masks2->addrs.v6addrs.src) ||
15588 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15589 &keys2->addrs.v6addrs.dst) ||
15590 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15591 &masks2->addrs.v6addrs.dst))
15592 return false;
15593 }
15594
15595 return keys1->ports.src == keys2->ports.src &&
15596 masks1->ports.src == masks2->ports.src &&
15597 keys1->ports.dst == keys2->ports.dst &&
15598 masks1->ports.dst == masks2->ports.dst &&
15599 keys1->control.flags == keys2->control.flags &&
15600 f1->l2_fltr == f2->l2_fltr;
15601 }
15602
15603 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15604 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15605 struct bnxt_ntuple_filter *fltr, u32 idx)
15606 {
15607 struct bnxt_ntuple_filter *f;
15608 struct hlist_head *head;
15609
15610 head = &bp->ntp_fltr_hash_tbl[idx];
15611 hlist_for_each_entry_rcu(f, head, base.hash) {
15612 if (bnxt_fltr_match(f, fltr))
15613 return f;
15614 }
15615 return NULL;
15616 }
15617
15618 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15619 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15620 u16 rxq_index, u32 flow_id)
15621 {
15622 struct bnxt *bp = netdev_priv(dev);
15623 struct bnxt_ntuple_filter *fltr, *new_fltr;
15624 struct flow_keys *fkeys;
15625 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15626 struct bnxt_l2_filter *l2_fltr;
15627 int rc = 0, idx;
15628 u32 flags;
15629
15630 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15631 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15632 atomic_inc(&l2_fltr->refcnt);
15633 } else {
15634 struct bnxt_l2_key key;
15635
15636 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15637 key.vlan = 0;
15638 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15639 if (!l2_fltr)
15640 return -EINVAL;
15641 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15642 bnxt_del_l2_filter(bp, l2_fltr);
15643 return -EINVAL;
15644 }
15645 }
15646 new_fltr = kzalloc_obj(*new_fltr, GFP_ATOMIC);
15647 if (!new_fltr) {
15648 bnxt_del_l2_filter(bp, l2_fltr);
15649 return -ENOMEM;
15650 }
15651
15652 fkeys = &new_fltr->fkeys;
15653 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15654 rc = -EPROTONOSUPPORT;
15655 goto err_free;
15656 }
15657
15658 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15659 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15660 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15661 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15662 rc = -EPROTONOSUPPORT;
15663 goto err_free;
15664 }
15665 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15666 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15667 if (bp->hwrm_spec_code < 0x10601) {
15668 rc = -EPROTONOSUPPORT;
15669 goto err_free;
15670 }
15671 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15672 }
15673 flags = fkeys->control.flags;
15674 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15675 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15676 rc = -EPROTONOSUPPORT;
15677 goto err_free;
15678 }
15679 new_fltr->l2_fltr = l2_fltr;
15680
15681 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15682 rcu_read_lock();
15683 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15684 if (fltr) {
15685 rc = fltr->base.sw_id;
15686 rcu_read_unlock();
15687 goto err_free;
15688 }
15689 rcu_read_unlock();
15690
15691 new_fltr->flow_id = flow_id;
15692 new_fltr->base.rxq = rxq_index;
15693 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15694 if (!rc) {
15695 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15696 return new_fltr->base.sw_id;
15697 }
15698
15699 err_free:
15700 bnxt_del_l2_filter(bp, l2_fltr);
15701 kfree(new_fltr);
15702 return rc;
15703 }
15704 #endif
15705
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15706 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15707 {
15708 spin_lock_bh(&bp->ntp_fltr_lock);
15709 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15710 spin_unlock_bh(&bp->ntp_fltr_lock);
15711 return;
15712 }
15713 hlist_del_rcu(&fltr->base.hash);
15714 bnxt_del_one_usr_fltr(bp, &fltr->base);
15715 bp->ntp_fltr_count--;
15716 spin_unlock_bh(&bp->ntp_fltr_lock);
15717 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15718 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15719 kfree_rcu(fltr, base.rcu);
15720 }
15721
bnxt_cfg_ntp_filters(struct bnxt * bp)15722 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15723 {
15724 #ifdef CONFIG_RFS_ACCEL
15725 int i;
15726
15727 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15728 struct hlist_head *head;
15729 struct hlist_node *tmp;
15730 struct bnxt_ntuple_filter *fltr;
15731 int rc;
15732
15733 head = &bp->ntp_fltr_hash_tbl[i];
15734 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15735 bool del = false;
15736
15737 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15738 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15739 continue;
15740 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15741 fltr->flow_id,
15742 fltr->base.sw_id)) {
15743 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15744 fltr);
15745 del = true;
15746 }
15747 } else {
15748 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15749 fltr);
15750 if (rc)
15751 del = true;
15752 else
15753 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15754 }
15755
15756 if (del)
15757 bnxt_del_ntp_filter(bp, fltr);
15758 }
15759 }
15760 #endif
15761 }
15762
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15763 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15764 unsigned int entry, struct udp_tunnel_info *ti)
15765 {
15766 struct bnxt *bp = netdev_priv(netdev);
15767 unsigned int cmd;
15768
15769 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15770 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15771 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15772 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15773 else
15774 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15775
15776 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15777 }
15778
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15779 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15780 unsigned int entry, struct udp_tunnel_info *ti)
15781 {
15782 struct bnxt *bp = netdev_priv(netdev);
15783 unsigned int cmd;
15784
15785 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15786 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15787 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15788 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15789 else
15790 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15791
15792 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15793 }
15794
15795 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15796 .set_port = bnxt_udp_tunnel_set_port,
15797 .unset_port = bnxt_udp_tunnel_unset_port,
15798 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15799 .tables = {
15800 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15801 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15802 },
15803 }, bnxt_udp_tunnels_p7 = {
15804 .set_port = bnxt_udp_tunnel_set_port,
15805 .unset_port = bnxt_udp_tunnel_unset_port,
15806 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15807 .tables = {
15808 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15809 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15810 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15811 },
15812 };
15813
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15814 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15815 struct net_device *dev, u32 filter_mask,
15816 int nlflags)
15817 {
15818 struct bnxt *bp = netdev_priv(dev);
15819
15820 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15821 nlflags, filter_mask, NULL);
15822 }
15823
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15824 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15825 u16 flags, struct netlink_ext_ack *extack)
15826 {
15827 struct bnxt *bp = netdev_priv(dev);
15828 struct nlattr *attr, *br_spec;
15829 int rem, rc = 0;
15830
15831 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15832 return -EOPNOTSUPP;
15833
15834 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15835 if (!br_spec)
15836 return -EINVAL;
15837
15838 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15839 u16 mode;
15840
15841 mode = nla_get_u16(attr);
15842 if (mode == bp->br_mode)
15843 break;
15844
15845 rc = bnxt_hwrm_set_br_mode(bp, mode);
15846 if (!rc)
15847 bp->br_mode = mode;
15848 break;
15849 }
15850 return rc;
15851 }
15852
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15853 int bnxt_get_port_parent_id(struct net_device *dev,
15854 struct netdev_phys_item_id *ppid)
15855 {
15856 struct bnxt *bp = netdev_priv(dev);
15857
15858 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15859 return -EOPNOTSUPP;
15860
15861 /* The PF and it's VF-reps only support the switchdev framework */
15862 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15863 return -EOPNOTSUPP;
15864
15865 ppid->id_len = sizeof(bp->dsn);
15866 memcpy(ppid->id, bp->dsn, ppid->id_len);
15867
15868 return 0;
15869 }
15870
15871 static const struct net_device_ops bnxt_netdev_ops = {
15872 .ndo_open = bnxt_open,
15873 .ndo_start_xmit = bnxt_start_xmit,
15874 .ndo_stop = bnxt_close,
15875 .ndo_get_stats64 = bnxt_get_stats64,
15876 .ndo_set_rx_mode = bnxt_set_rx_mode,
15877 .ndo_eth_ioctl = bnxt_ioctl,
15878 .ndo_validate_addr = eth_validate_addr,
15879 .ndo_set_mac_address = bnxt_change_mac_addr,
15880 .ndo_change_mtu = bnxt_change_mtu,
15881 .ndo_fix_features = bnxt_fix_features,
15882 .ndo_set_features = bnxt_set_features,
15883 .ndo_features_check = bnxt_features_check,
15884 .ndo_tx_timeout = bnxt_tx_timeout,
15885 #ifdef CONFIG_BNXT_SRIOV
15886 .ndo_get_vf_config = bnxt_get_vf_config,
15887 .ndo_set_vf_mac = bnxt_set_vf_mac,
15888 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
15889 .ndo_set_vf_rate = bnxt_set_vf_bw,
15890 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
15891 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
15892 .ndo_set_vf_trust = bnxt_set_vf_trust,
15893 #endif
15894 .ndo_setup_tc = bnxt_setup_tc,
15895 #ifdef CONFIG_RFS_ACCEL
15896 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
15897 #endif
15898 .ndo_bpf = bnxt_xdp,
15899 .ndo_xdp_xmit = bnxt_xdp_xmit,
15900 .ndo_bridge_getlink = bnxt_bridge_getlink,
15901 .ndo_bridge_setlink = bnxt_bridge_setlink,
15902 .ndo_hwtstamp_get = bnxt_hwtstamp_get,
15903 .ndo_hwtstamp_set = bnxt_hwtstamp_set,
15904 };
15905
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)15906 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15907 struct netdev_queue_stats_rx *stats)
15908 {
15909 struct bnxt *bp = netdev_priv(dev);
15910 struct bnxt_cp_ring_info *cpr;
15911 u64 *sw;
15912
15913 if (!bp->bnapi)
15914 return;
15915
15916 cpr = &bp->bnapi[i]->cp_ring;
15917 sw = cpr->stats.sw_stats;
15918
15919 stats->packets = 0;
15920 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15921 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15922 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15923
15924 stats->bytes = 0;
15925 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15926 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15927 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15928
15929 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15930 stats->hw_gro_packets = cpr->sw_stats->rx.rx_hw_gro_packets;
15931 stats->hw_gro_wire_packets = cpr->sw_stats->rx.rx_hw_gro_wire_packets;
15932 }
15933
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)15934 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15935 struct netdev_queue_stats_tx *stats)
15936 {
15937 struct bnxt *bp = netdev_priv(dev);
15938 struct bnxt_napi *bnapi;
15939 u64 *sw;
15940
15941 if (!bp->tx_ring)
15942 return;
15943
15944 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15945 sw = bnapi->cp_ring.stats.sw_stats;
15946
15947 stats->packets = 0;
15948 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15949 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15950 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15951
15952 stats->bytes = 0;
15953 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15954 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15955 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15956 }
15957
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)15958 static void bnxt_get_base_stats(struct net_device *dev,
15959 struct netdev_queue_stats_rx *rx,
15960 struct netdev_queue_stats_tx *tx)
15961 {
15962 struct bnxt *bp = netdev_priv(dev);
15963
15964 rx->packets = bp->net_stats_prev.rx_packets;
15965 rx->bytes = bp->net_stats_prev.rx_bytes;
15966 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15967 rx->hw_gro_packets = bp->ring_err_stats_prev.rx_total_hw_gro_packets;
15968 rx->hw_gro_wire_packets = bp->ring_err_stats_prev.rx_total_hw_gro_wire_packets;
15969
15970 tx->packets = bp->net_stats_prev.tx_packets;
15971 tx->bytes = bp->net_stats_prev.tx_bytes;
15972 }
15973
15974 static const struct netdev_stat_ops bnxt_stat_ops = {
15975 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
15976 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
15977 .get_base_stats = bnxt_get_base_stats,
15978 };
15979
bnxt_queue_default_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg)15980 static void bnxt_queue_default_qcfg(struct net_device *dev,
15981 struct netdev_queue_config *qcfg)
15982 {
15983 qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
15984 }
15985
bnxt_validate_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg,struct netlink_ext_ack * extack)15986 static int bnxt_validate_qcfg(struct net_device *dev,
15987 struct netdev_queue_config *qcfg,
15988 struct netlink_ext_ack *extack)
15989 {
15990 struct bnxt *bp = netdev_priv(dev);
15991
15992 /* Older chips need MSS calc so rx_page_size is not supported */
15993 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
15994 qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
15995 return -EINVAL;
15996
15997 if (!is_power_of_2(qcfg->rx_page_size))
15998 return -ERANGE;
15999
16000 if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
16001 qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
16002 return -ERANGE;
16003
16004 return 0;
16005 }
16006
bnxt_queue_mem_alloc(struct net_device * dev,struct netdev_queue_config * qcfg,void * qmem,int idx)16007 static int bnxt_queue_mem_alloc(struct net_device *dev,
16008 struct netdev_queue_config *qcfg,
16009 void *qmem, int idx)
16010 {
16011 struct bnxt_rx_ring_info *rxr, *clone;
16012 struct bnxt *bp = netdev_priv(dev);
16013 struct bnxt_ring_struct *ring;
16014 int rc;
16015
16016 if (!bp->rx_ring)
16017 return -ENETDOWN;
16018
16019 rxr = &bp->rx_ring[idx];
16020 clone = qmem;
16021 memcpy(clone, rxr, sizeof(*rxr));
16022 bnxt_init_rx_ring_struct(bp, clone);
16023 bnxt_reset_rx_ring_struct(bp, clone);
16024
16025 clone->rx_prod = 0;
16026 clone->rx_agg_prod = 0;
16027 clone->rx_sw_agg_prod = 0;
16028 clone->rx_next_cons = 0;
16029 clone->need_head_pool = false;
16030 clone->rx_page_size = qcfg->rx_page_size;
16031
16032 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
16033 if (rc)
16034 return rc;
16035
16036 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
16037 if (rc < 0)
16038 goto err_page_pool_destroy;
16039
16040 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
16041 MEM_TYPE_PAGE_POOL,
16042 clone->page_pool);
16043 if (rc)
16044 goto err_rxq_info_unreg;
16045
16046 ring = &clone->rx_ring_struct;
16047 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16048 if (rc)
16049 goto err_free_rx_ring;
16050
16051 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
16052 ring = &clone->rx_agg_ring_struct;
16053 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16054 if (rc)
16055 goto err_free_rx_agg_ring;
16056
16057 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
16058 if (rc)
16059 goto err_free_rx_agg_ring;
16060 }
16061
16062 if (bp->flags & BNXT_FLAG_TPA) {
16063 rc = bnxt_alloc_one_tpa_info(bp, clone);
16064 if (rc)
16065 goto err_free_tpa_info;
16066 }
16067
16068 bnxt_init_one_rx_ring_rxbd(bp, clone);
16069 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
16070
16071 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
16072 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16073 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
16074 if (bp->flags & BNXT_FLAG_TPA)
16075 bnxt_alloc_one_tpa_info_data(bp, clone);
16076
16077 return 0;
16078
16079 err_free_tpa_info:
16080 bnxt_free_one_tpa_info(bp, clone);
16081 err_free_rx_agg_ring:
16082 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
16083 err_free_rx_ring:
16084 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
16085 err_rxq_info_unreg:
16086 xdp_rxq_info_unreg(&clone->xdp_rxq);
16087 err_page_pool_destroy:
16088 page_pool_destroy(clone->page_pool);
16089 page_pool_destroy(clone->head_pool);
16090 clone->page_pool = NULL;
16091 clone->head_pool = NULL;
16092 return rc;
16093 }
16094
bnxt_queue_mem_free(struct net_device * dev,void * qmem)16095 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
16096 {
16097 struct bnxt_rx_ring_info *rxr = qmem;
16098 struct bnxt *bp = netdev_priv(dev);
16099 struct bnxt_ring_struct *ring;
16100
16101 bnxt_free_one_rx_ring_skbs(bp, rxr);
16102 bnxt_free_one_tpa_info(bp, rxr);
16103
16104 xdp_rxq_info_unreg(&rxr->xdp_rxq);
16105
16106 page_pool_destroy(rxr->page_pool);
16107 page_pool_destroy(rxr->head_pool);
16108 rxr->page_pool = NULL;
16109 rxr->head_pool = NULL;
16110
16111 ring = &rxr->rx_ring_struct;
16112 bnxt_free_ring(bp, &ring->ring_mem);
16113
16114 ring = &rxr->rx_agg_ring_struct;
16115 bnxt_free_ring(bp, &ring->ring_mem);
16116
16117 kfree(rxr->rx_agg_bmap);
16118 rxr->rx_agg_bmap = NULL;
16119 }
16120
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)16121 static void bnxt_copy_rx_ring(struct bnxt *bp,
16122 struct bnxt_rx_ring_info *dst,
16123 struct bnxt_rx_ring_info *src)
16124 {
16125 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
16126 struct bnxt_ring_struct *dst_ring, *src_ring;
16127 int i;
16128
16129 dst_ring = &dst->rx_ring_struct;
16130 dst_rmem = &dst_ring->ring_mem;
16131 src_ring = &src->rx_ring_struct;
16132 src_rmem = &src_ring->ring_mem;
16133
16134 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16135 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16136 WARN_ON(dst_rmem->flags != src_rmem->flags);
16137 WARN_ON(dst_rmem->depth != src_rmem->depth);
16138 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16139 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16140
16141 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16142 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16143 *dst_rmem->vmem = *src_rmem->vmem;
16144 for (i = 0; i < dst_rmem->nr_pages; i++) {
16145 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16146 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16147 }
16148
16149 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16150 return;
16151
16152 dst_ring = &dst->rx_agg_ring_struct;
16153 dst_rmem = &dst_ring->ring_mem;
16154 src_ring = &src->rx_agg_ring_struct;
16155 src_rmem = &src_ring->ring_mem;
16156
16157 dst->rx_page_size = src->rx_page_size;
16158
16159 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16160 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16161 WARN_ON(dst_rmem->flags != src_rmem->flags);
16162 WARN_ON(dst_rmem->depth != src_rmem->depth);
16163 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16164 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16165 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16166
16167 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16168 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16169 *dst_rmem->vmem = *src_rmem->vmem;
16170 for (i = 0; i < dst_rmem->nr_pages; i++) {
16171 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16172 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16173 }
16174
16175 dst->rx_agg_bmap = src->rx_agg_bmap;
16176 }
16177
bnxt_queue_start(struct net_device * dev,struct netdev_queue_config * qcfg,void * qmem,int idx)16178 static int bnxt_queue_start(struct net_device *dev,
16179 struct netdev_queue_config *qcfg,
16180 void *qmem, int idx)
16181 {
16182 struct bnxt *bp = netdev_priv(dev);
16183 struct bnxt_rx_ring_info *rxr, *clone;
16184 struct bnxt_cp_ring_info *cpr;
16185 struct bnxt_vnic_info *vnic;
16186 struct bnxt_napi *bnapi;
16187 int i, rc;
16188 u16 mru;
16189
16190 rxr = &bp->rx_ring[idx];
16191 clone = qmem;
16192
16193 rxr->rx_prod = clone->rx_prod;
16194 rxr->rx_agg_prod = clone->rx_agg_prod;
16195 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16196 rxr->rx_next_cons = clone->rx_next_cons;
16197 rxr->rx_tpa = clone->rx_tpa;
16198 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16199 rxr->page_pool = clone->page_pool;
16200 rxr->head_pool = clone->head_pool;
16201 rxr->xdp_rxq = clone->xdp_rxq;
16202 rxr->need_head_pool = clone->need_head_pool;
16203
16204 bnxt_copy_rx_ring(bp, rxr, clone);
16205
16206 bnapi = rxr->bnapi;
16207 cpr = &bnapi->cp_ring;
16208
16209 /* All rings have been reserved and previously allocated.
16210 * Reallocating with the same parameters should never fail.
16211 */
16212 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16213 if (rc)
16214 goto err_reset;
16215
16216 if (bp->tph_mode) {
16217 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16218 if (rc)
16219 goto err_reset;
16220 }
16221
16222 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16223 if (rc)
16224 goto err_reset;
16225
16226 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16227 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16228 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16229
16230 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16231 rc = bnxt_tx_queue_start(bp, idx);
16232 if (rc)
16233 goto err_reset;
16234 }
16235
16236 bnxt_enable_rx_page_pool(rxr);
16237 napi_enable_locked(&bnapi->napi);
16238 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16239
16240 mru = bp->dev->mtu + VLAN_ETH_HLEN;
16241 for (i = 0; i < bp->nr_vnics; i++) {
16242 vnic = &bp->vnic_info[i];
16243
16244 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16245 if (rc)
16246 return rc;
16247 }
16248 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16249
16250 err_reset:
16251 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16252 rc);
16253 napi_enable_locked(&bnapi->napi);
16254 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16255 bnxt_reset_task(bp, true);
16256 return rc;
16257 }
16258
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)16259 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16260 {
16261 struct bnxt *bp = netdev_priv(dev);
16262 struct bnxt_rx_ring_info *rxr;
16263 struct bnxt_cp_ring_info *cpr;
16264 struct bnxt_vnic_info *vnic;
16265 struct bnxt_napi *bnapi;
16266 int i;
16267
16268 for (i = 0; i < bp->nr_vnics; i++) {
16269 vnic = &bp->vnic_info[i];
16270
16271 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16272 }
16273 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16274 /* Make sure NAPI sees that the VNIC is disabled */
16275 synchronize_net();
16276 rxr = &bp->rx_ring[idx];
16277 bnapi = rxr->bnapi;
16278 cpr = &bnapi->cp_ring;
16279 cancel_work_sync(&cpr->dim.work);
16280 bnxt_hwrm_rx_ring_free(bp, rxr, false);
16281 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16282 page_pool_disable_direct_recycling(rxr->page_pool);
16283 if (bnxt_separate_head_pool(rxr))
16284 page_pool_disable_direct_recycling(rxr->head_pool);
16285
16286 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16287 bnxt_tx_queue_stop(bp, idx);
16288
16289 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16290 * completion is handled in NAPI to guarantee no more DMA on that ring
16291 * after seeing the completion.
16292 */
16293 napi_disable_locked(&bnapi->napi);
16294
16295 if (bp->tph_mode) {
16296 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16297 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16298 }
16299 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16300
16301 memcpy(qmem, rxr, sizeof(*rxr));
16302 bnxt_init_rx_ring_struct(bp, qmem);
16303
16304 return 0;
16305 }
16306
16307 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16308 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
16309 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
16310 .ndo_queue_mem_free = bnxt_queue_mem_free,
16311 .ndo_queue_start = bnxt_queue_start,
16312 .ndo_queue_stop = bnxt_queue_stop,
16313 .ndo_default_qcfg = bnxt_queue_default_qcfg,
16314 .ndo_validate_qcfg = bnxt_validate_qcfg,
16315 .supported_params = QCFG_RX_PAGE_SIZE,
16316 };
16317
16318 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
16319 .ndo_default_qcfg = bnxt_queue_default_qcfg,
16320 };
16321
bnxt_remove_one(struct pci_dev * pdev)16322 static void bnxt_remove_one(struct pci_dev *pdev)
16323 {
16324 struct net_device *dev = pci_get_drvdata(pdev);
16325 struct bnxt *bp = netdev_priv(dev);
16326
16327 if (BNXT_PF(bp))
16328 __bnxt_sriov_disable(bp);
16329
16330 bnxt_rdma_aux_device_del(bp);
16331
16332 unregister_netdev(dev);
16333 bnxt_ptp_clear(bp);
16334
16335 bnxt_rdma_aux_device_uninit(bp);
16336
16337 bnxt_free_l2_filters(bp, true);
16338 bnxt_free_ntp_fltrs(bp, true);
16339 WARN_ON(bp->num_rss_ctx);
16340 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16341 /* Flush any pending tasks */
16342 cancel_work_sync(&bp->sp_task);
16343 cancel_delayed_work_sync(&bp->fw_reset_task);
16344 bp->sp_event = 0;
16345
16346 bnxt_dl_fw_reporters_destroy(bp);
16347 bnxt_dl_unregister(bp);
16348 bnxt_shutdown_tc(bp);
16349
16350 bnxt_clear_int_mode(bp);
16351 bnxt_hwrm_func_drv_unrgtr(bp);
16352 bnxt_free_hwrm_resources(bp);
16353 bnxt_hwmon_uninit(bp);
16354 bnxt_ethtool_free(bp);
16355 bnxt_dcb_free(bp);
16356 kfree(bp->ptp_cfg);
16357 bp->ptp_cfg = NULL;
16358 kfree(bp->fw_health);
16359 bp->fw_health = NULL;
16360 bnxt_cleanup_pci(bp);
16361 bnxt_free_ctx_mem(bp, true);
16362 bnxt_free_crash_dump_mem(bp);
16363 kfree(bp->rss_indir_tbl);
16364 bp->rss_indir_tbl = NULL;
16365 bnxt_free_port_stats(bp);
16366 free_netdev(dev);
16367 }
16368
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)16369 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16370 {
16371 int rc = 0;
16372 struct bnxt_link_info *link_info = &bp->link_info;
16373
16374 bp->phy_flags = 0;
16375 rc = bnxt_hwrm_phy_qcaps(bp);
16376 if (rc) {
16377 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16378 rc);
16379 return rc;
16380 }
16381 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16382 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16383 else
16384 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16385
16386 bp->mac_flags = 0;
16387 bnxt_hwrm_mac_qcaps(bp);
16388
16389 if (!fw_dflt)
16390 return 0;
16391
16392 mutex_lock(&bp->link_lock);
16393 rc = bnxt_update_link(bp, false);
16394 if (rc) {
16395 mutex_unlock(&bp->link_lock);
16396 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16397 rc);
16398 return rc;
16399 }
16400
16401 /* Older firmware does not have supported_auto_speeds, so assume
16402 * that all supported speeds can be autonegotiated.
16403 */
16404 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16405 link_info->support_auto_speeds = link_info->support_speeds;
16406
16407 bnxt_init_ethtool_link_settings(bp);
16408 mutex_unlock(&bp->link_lock);
16409 return 0;
16410 }
16411
bnxt_get_max_irq(struct pci_dev * pdev)16412 static int bnxt_get_max_irq(struct pci_dev *pdev)
16413 {
16414 u16 ctrl;
16415
16416 if (!pdev->msix_cap)
16417 return 1;
16418
16419 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16420 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16421 }
16422
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)16423 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16424 int *max_cp)
16425 {
16426 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16427 int max_ring_grps = 0, max_irq;
16428
16429 *max_tx = hw_resc->max_tx_rings;
16430 *max_rx = hw_resc->max_rx_rings;
16431 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16432 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16433 bnxt_get_ulp_msix_num_in_use(bp),
16434 hw_resc->max_stat_ctxs -
16435 bnxt_get_ulp_stat_ctxs_in_use(bp));
16436 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16437 *max_cp = min_t(int, *max_cp, max_irq);
16438 max_ring_grps = hw_resc->max_hw_ring_grps;
16439 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16440 *max_cp -= 1;
16441 *max_rx -= 2;
16442 }
16443 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16444 *max_rx >>= 1;
16445 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16446 int rc;
16447
16448 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16449 if (rc) {
16450 *max_rx = 0;
16451 *max_tx = 0;
16452 }
16453 /* On P5 chips, max_cp output param should be available NQs */
16454 *max_cp = max_irq;
16455 }
16456 *max_rx = min_t(int, *max_rx, max_ring_grps);
16457 }
16458
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16459 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16460 {
16461 int rx, tx, cp;
16462
16463 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
16464 *max_rx = rx;
16465 *max_tx = tx;
16466 if (!rx || !tx || !cp)
16467 return -ENOMEM;
16468
16469 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16470 }
16471
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16472 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16473 bool shared)
16474 {
16475 int rc;
16476
16477 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16478 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16479 /* Not enough rings, try disabling agg rings. */
16480 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16481 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16482 if (rc) {
16483 /* set BNXT_FLAG_AGG_RINGS back for consistency */
16484 bp->flags |= BNXT_FLAG_AGG_RINGS;
16485 return rc;
16486 }
16487 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16488 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16489 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16490 bnxt_set_ring_params(bp);
16491 }
16492
16493 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16494 int max_cp, max_stat, max_irq;
16495
16496 /* Reserve minimum resources for RoCE */
16497 max_cp = bnxt_get_max_func_cp_rings(bp);
16498 max_stat = bnxt_get_max_func_stat_ctxs(bp);
16499 max_irq = bnxt_get_max_func_irqs(bp);
16500 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16501 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16502 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16503 return 0;
16504
16505 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16506 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16507 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16508 max_cp = min_t(int, max_cp, max_irq);
16509 max_cp = min_t(int, max_cp, max_stat);
16510 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16511 if (rc)
16512 rc = 0;
16513 }
16514 return rc;
16515 }
16516
16517 /* In initial default shared ring setting, each shared ring must have a
16518 * RX/TX ring pair.
16519 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)16520 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16521 {
16522 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16523 bp->rx_nr_rings = bp->cp_nr_rings;
16524 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16525 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16526 }
16527
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)16528 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16529 {
16530 int dflt_rings, max_rx_rings, max_tx_rings, rc;
16531 int avail_msix;
16532
16533 if (!bnxt_can_reserve_rings(bp))
16534 return 0;
16535
16536 if (sh)
16537 bp->flags |= BNXT_FLAG_SHARED_RINGS;
16538 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16539 /* Reduce default rings on multi-port cards so that total default
16540 * rings do not exceed CPU count.
16541 */
16542 if (bp->port_count > 1) {
16543 int max_rings =
16544 max_t(int, num_online_cpus() / bp->port_count, 1);
16545
16546 dflt_rings = min_t(int, dflt_rings, max_rings);
16547 }
16548 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16549 if (rc)
16550 return rc;
16551 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16552 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16553 if (sh)
16554 bnxt_trim_dflt_sh_rings(bp);
16555 else
16556 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16557 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16558
16559 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16560 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16561 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16562
16563 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16564 bnxt_set_dflt_ulp_stat_ctxs(bp);
16565 }
16566
16567 rc = __bnxt_reserve_rings(bp);
16568 if (rc && rc != -ENODEV)
16569 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16570 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16571 if (sh)
16572 bnxt_trim_dflt_sh_rings(bp);
16573
16574 /* Rings may have been trimmed, re-reserve the trimmed rings. */
16575 if (bnxt_need_reserve_rings(bp)) {
16576 rc = __bnxt_reserve_rings(bp);
16577 if (rc && rc != -ENODEV)
16578 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16579 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16580 }
16581 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16582 bp->rx_nr_rings++;
16583 bp->cp_nr_rings++;
16584 }
16585 if (rc) {
16586 bp->tx_nr_rings = 0;
16587 bp->rx_nr_rings = 0;
16588 }
16589 return rc;
16590 }
16591
bnxt_init_dflt_ring_mode(struct bnxt * bp)16592 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16593 {
16594 int rc;
16595
16596 if (bp->tx_nr_rings)
16597 return 0;
16598
16599 bnxt_ulp_irq_stop(bp);
16600 bnxt_clear_int_mode(bp);
16601 rc = bnxt_set_dflt_rings(bp, true);
16602 if (rc) {
16603 if (BNXT_VF(bp) && rc == -ENODEV)
16604 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16605 else
16606 netdev_err(bp->dev, "Not enough rings available.\n");
16607 goto init_dflt_ring_err;
16608 }
16609 rc = bnxt_init_int_mode(bp);
16610 if (rc)
16611 goto init_dflt_ring_err;
16612
16613 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16614
16615 bnxt_set_dflt_rfs(bp);
16616
16617 init_dflt_ring_err:
16618 bnxt_ulp_irq_restart(bp, rc);
16619 return rc;
16620 }
16621
bnxt_restore_pf_fw_resources(struct bnxt * bp)16622 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16623 {
16624 int rc;
16625
16626 netdev_ops_assert_locked(bp->dev);
16627 bnxt_hwrm_func_qcaps(bp);
16628
16629 if (netif_running(bp->dev))
16630 __bnxt_close_nic(bp, true, false);
16631
16632 bnxt_ulp_irq_stop(bp);
16633 bnxt_clear_int_mode(bp);
16634 rc = bnxt_init_int_mode(bp);
16635 bnxt_ulp_irq_restart(bp, rc);
16636
16637 if (netif_running(bp->dev)) {
16638 if (rc)
16639 netif_close(bp->dev);
16640 else
16641 rc = bnxt_open_nic(bp, true, false);
16642 }
16643
16644 return rc;
16645 }
16646
bnxt_init_mac_addr(struct bnxt * bp)16647 static int bnxt_init_mac_addr(struct bnxt *bp)
16648 {
16649 int rc = 0;
16650
16651 if (BNXT_PF(bp)) {
16652 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16653 } else {
16654 #ifdef CONFIG_BNXT_SRIOV
16655 struct bnxt_vf_info *vf = &bp->vf;
16656 bool strict_approval = true;
16657
16658 if (is_valid_ether_addr(vf->mac_addr)) {
16659 /* overwrite netdev dev_addr with admin VF MAC */
16660 eth_hw_addr_set(bp->dev, vf->mac_addr);
16661 /* Older PF driver or firmware may not approve this
16662 * correctly.
16663 */
16664 strict_approval = false;
16665 } else {
16666 eth_hw_addr_random(bp->dev);
16667 }
16668 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16669 #endif
16670 }
16671 return rc;
16672 }
16673
bnxt_vpd_read_info(struct bnxt * bp)16674 static void bnxt_vpd_read_info(struct bnxt *bp)
16675 {
16676 struct pci_dev *pdev = bp->pdev;
16677 unsigned int vpd_size, kw_len;
16678 int pos, size;
16679 u8 *vpd_data;
16680
16681 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16682 if (IS_ERR(vpd_data)) {
16683 pci_warn(pdev, "Unable to read VPD\n");
16684 return;
16685 }
16686
16687 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16688 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16689 if (pos < 0)
16690 goto read_sn;
16691
16692 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16693 memcpy(bp->board_partno, &vpd_data[pos], size);
16694
16695 read_sn:
16696 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16697 PCI_VPD_RO_KEYWORD_SERIALNO,
16698 &kw_len);
16699 if (pos < 0)
16700 goto exit;
16701
16702 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16703 memcpy(bp->board_serialno, &vpd_data[pos], size);
16704 exit:
16705 kfree(vpd_data);
16706 }
16707
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16708 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16709 {
16710 struct pci_dev *pdev = bp->pdev;
16711 u64 qword;
16712
16713 qword = pci_get_dsn(pdev);
16714 if (!qword) {
16715 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16716 return -EOPNOTSUPP;
16717 }
16718
16719 put_unaligned_le64(qword, dsn);
16720
16721 bp->flags |= BNXT_FLAG_DSN_VALID;
16722 return 0;
16723 }
16724
bnxt_map_db_bar(struct bnxt * bp)16725 static int bnxt_map_db_bar(struct bnxt *bp)
16726 {
16727 if (!bp->db_size)
16728 return -ENODEV;
16729 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16730 if (!bp->bar1)
16731 return -ENOMEM;
16732 return 0;
16733 }
16734
bnxt_print_device_info(struct bnxt * bp)16735 void bnxt_print_device_info(struct bnxt *bp)
16736 {
16737 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16738 board_info[bp->board_idx].name,
16739 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16740
16741 pcie_print_link_status(bp->pdev);
16742 }
16743
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16744 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16745 {
16746 struct bnxt_hw_resc *hw_resc;
16747 struct net_device *dev;
16748 struct bnxt *bp;
16749 int rc, max_irqs;
16750
16751 if (pci_is_bridge(pdev))
16752 return -ENODEV;
16753
16754 if (!pdev->msix_cap) {
16755 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16756 return -ENODEV;
16757 }
16758
16759 /* Clear any pending DMA transactions from crash kernel
16760 * while loading driver in capture kernel.
16761 */
16762 if (is_kdump_kernel()) {
16763 pci_clear_master(pdev);
16764 pcie_flr(pdev);
16765 }
16766
16767 max_irqs = bnxt_get_max_irq(pdev);
16768 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16769 max_irqs);
16770 if (!dev)
16771 return -ENOMEM;
16772
16773 bp = netdev_priv(dev);
16774 bp->board_idx = ent->driver_data;
16775 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16776 bnxt_set_max_func_irqs(bp, max_irqs);
16777
16778 if (bnxt_vf_pciid(bp->board_idx))
16779 bp->flags |= BNXT_FLAG_VF;
16780
16781 /* No devlink port registration in case of a VF */
16782 if (BNXT_PF(bp))
16783 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16784
16785 rc = bnxt_init_board(pdev, dev);
16786 if (rc < 0)
16787 goto init_err_free;
16788
16789 dev->netdev_ops = &bnxt_netdev_ops;
16790 dev->stat_ops = &bnxt_stat_ops;
16791 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16792 dev->ethtool_ops = &bnxt_ethtool_ops;
16793 pci_set_drvdata(pdev, dev);
16794
16795 rc = bnxt_alloc_hwrm_resources(bp);
16796 if (rc)
16797 goto init_err_pci_clean;
16798
16799 mutex_init(&bp->hwrm_cmd_lock);
16800 mutex_init(&bp->link_lock);
16801
16802 rc = bnxt_fw_init_one_p1(bp);
16803 if (rc)
16804 goto init_err_pci_clean;
16805
16806 if (BNXT_PF(bp))
16807 bnxt_vpd_read_info(bp);
16808
16809 if (BNXT_CHIP_P5_PLUS(bp)) {
16810 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16811 if (BNXT_CHIP_P7(bp))
16812 bp->flags |= BNXT_FLAG_CHIP_P7;
16813 }
16814
16815 rc = bnxt_alloc_rss_indir_tbl(bp);
16816 if (rc)
16817 goto init_err_pci_clean;
16818
16819 rc = bnxt_fw_init_one_p2(bp);
16820 if (rc)
16821 goto init_err_pci_clean;
16822
16823 rc = bnxt_map_db_bar(bp);
16824 if (rc) {
16825 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16826 rc);
16827 goto init_err_pci_clean;
16828 }
16829
16830 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16831 NETIF_F_TSO | NETIF_F_TSO6 |
16832 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16833 NETIF_F_GSO_IPXIP4 |
16834 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16835 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16836 NETIF_F_RXCSUM | NETIF_F_GRO;
16837 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16838 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16839
16840 if (BNXT_SUPPORTS_TPA(bp))
16841 dev->hw_features |= NETIF_F_LRO;
16842
16843 dev->hw_enc_features =
16844 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16845 NETIF_F_TSO | NETIF_F_TSO6 |
16846 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16847 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16848 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16849 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16850 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16851 if (bp->flags & BNXT_FLAG_CHIP_P7)
16852 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16853 else
16854 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16855
16856 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16857 NETIF_F_GSO_GRE_CSUM;
16858 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16859 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16860 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16861 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16862 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16863 if (BNXT_SUPPORTS_TPA(bp))
16864 dev->hw_features |= NETIF_F_GRO_HW;
16865 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16866 if (dev->features & NETIF_F_GRO_HW)
16867 dev->features &= ~NETIF_F_LRO;
16868 dev->priv_flags |= IFF_UNICAST_FLT;
16869
16870 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16871 if (bp->tso_max_segs)
16872 netif_set_tso_max_segs(dev, bp->tso_max_segs);
16873
16874 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16875 NETDEV_XDP_ACT_RX_SG;
16876
16877 #ifdef CONFIG_BNXT_SRIOV
16878 init_waitqueue_head(&bp->sriov_cfg_wait);
16879 #endif
16880 if (BNXT_SUPPORTS_TPA(bp)) {
16881 bp->gro_func = bnxt_gro_func_5730x;
16882 if (BNXT_CHIP_P4(bp))
16883 bp->gro_func = bnxt_gro_func_5731x;
16884 else if (BNXT_CHIP_P5_PLUS(bp))
16885 bp->gro_func = bnxt_gro_func_5750x;
16886 }
16887 if (!BNXT_CHIP_P4_PLUS(bp))
16888 bp->flags |= BNXT_FLAG_DOUBLE_DB;
16889
16890 rc = bnxt_init_mac_addr(bp);
16891 if (rc) {
16892 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16893 rc = -EADDRNOTAVAIL;
16894 goto init_err_pci_clean;
16895 }
16896
16897 if (BNXT_PF(bp)) {
16898 /* Read the adapter's DSN to use as the eswitch switch_id */
16899 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16900 }
16901
16902 /* MTU range: 60 - FW defined max */
16903 dev->min_mtu = ETH_ZLEN;
16904 dev->max_mtu = bp->max_mtu;
16905
16906 rc = bnxt_probe_phy(bp, true);
16907 if (rc)
16908 goto init_err_pci_clean;
16909
16910 hw_resc = &bp->hw_resc;
16911 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16912 BNXT_L2_FLTR_MAX_FLTR;
16913 /* Older firmware may not report these filters properly */
16914 if (bp->max_fltr < BNXT_MAX_FLTR)
16915 bp->max_fltr = BNXT_MAX_FLTR;
16916 bnxt_init_l2_fltr_tbl(bp);
16917 __bnxt_set_rx_skb_mode(bp, false);
16918 bnxt_set_tpa_flags(bp);
16919 bnxt_init_ring_params(bp);
16920 bnxt_set_ring_params(bp);
16921 bnxt_rdma_aux_device_init(bp);
16922 rc = bnxt_set_dflt_rings(bp, true);
16923 if (rc) {
16924 if (BNXT_VF(bp) && rc == -ENODEV) {
16925 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16926 } else {
16927 netdev_err(bp->dev, "Not enough rings available.\n");
16928 rc = -ENOMEM;
16929 }
16930 goto init_err_pci_clean;
16931 }
16932
16933 bnxt_fw_init_one_p3(bp);
16934
16935 bnxt_init_dflt_coal(bp);
16936
16937 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16938 bp->flags |= BNXT_FLAG_STRIP_VLAN;
16939
16940 rc = bnxt_init_int_mode(bp);
16941 if (rc)
16942 goto init_err_pci_clean;
16943
16944 /* No TC has been set yet and rings may have been trimmed due to
16945 * limited MSIX, so we re-initialize the TX rings per TC.
16946 */
16947 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16948
16949 if (BNXT_PF(bp)) {
16950 if (!bnxt_pf_wq) {
16951 bnxt_pf_wq =
16952 create_singlethread_workqueue("bnxt_pf_wq");
16953 if (!bnxt_pf_wq) {
16954 dev_err(&pdev->dev, "Unable to create workqueue.\n");
16955 rc = -ENOMEM;
16956 goto init_err_pci_clean;
16957 }
16958 }
16959 rc = bnxt_init_tc(bp);
16960 if (rc)
16961 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16962 rc);
16963 }
16964
16965 bnxt_inv_fw_health_reg(bp);
16966 rc = bnxt_dl_register(bp);
16967 if (rc)
16968 goto init_err_dl;
16969
16970 INIT_LIST_HEAD(&bp->usr_fltr_list);
16971
16972 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16973 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16974
16975 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
16976 if (BNXT_SUPPORTS_QUEUE_API(bp))
16977 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16978 dev->netmem_tx = true;
16979
16980 rc = register_netdev(dev);
16981 if (rc)
16982 goto init_err_cleanup;
16983
16984 bnxt_dl_fw_reporters_create(bp);
16985
16986 bnxt_rdma_aux_device_add(bp);
16987
16988 bnxt_print_device_info(bp);
16989
16990 pci_save_state(pdev);
16991
16992 return 0;
16993 init_err_cleanup:
16994 bnxt_rdma_aux_device_uninit(bp);
16995 bnxt_dl_unregister(bp);
16996 init_err_dl:
16997 bnxt_shutdown_tc(bp);
16998 bnxt_clear_int_mode(bp);
16999
17000 init_err_pci_clean:
17001 bnxt_hwrm_func_drv_unrgtr(bp);
17002 bnxt_ptp_clear(bp);
17003 kfree(bp->ptp_cfg);
17004 bp->ptp_cfg = NULL;
17005 bnxt_free_hwrm_resources(bp);
17006 bnxt_hwmon_uninit(bp);
17007 bnxt_ethtool_free(bp);
17008 kfree(bp->fw_health);
17009 bp->fw_health = NULL;
17010 bnxt_cleanup_pci(bp);
17011 bnxt_free_ctx_mem(bp, true);
17012 bnxt_free_crash_dump_mem(bp);
17013 kfree(bp->rss_indir_tbl);
17014 bp->rss_indir_tbl = NULL;
17015
17016 init_err_free:
17017 free_netdev(dev);
17018 return rc;
17019 }
17020
bnxt_shutdown(struct pci_dev * pdev)17021 static void bnxt_shutdown(struct pci_dev *pdev)
17022 {
17023 struct net_device *dev = pci_get_drvdata(pdev);
17024 struct bnxt *bp;
17025
17026 if (!dev)
17027 return;
17028
17029 rtnl_lock();
17030 netdev_lock(dev);
17031 bp = netdev_priv(dev);
17032 if (!bp)
17033 goto shutdown_exit;
17034
17035 if (netif_running(dev))
17036 netif_close(dev);
17037
17038 if (bnxt_hwrm_func_drv_unrgtr(bp)) {
17039 pcie_flr(pdev);
17040 goto shutdown_exit;
17041 }
17042 bnxt_ptp_clear(bp);
17043 bnxt_clear_int_mode(bp);
17044 pci_disable_device(pdev);
17045
17046 if (system_state == SYSTEM_POWER_OFF) {
17047 pci_wake_from_d3(pdev, bp->wol);
17048 pci_set_power_state(pdev, PCI_D3hot);
17049 }
17050
17051 shutdown_exit:
17052 netdev_unlock(dev);
17053 rtnl_unlock();
17054 }
17055
17056 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)17057 static int bnxt_suspend(struct device *device)
17058 {
17059 struct net_device *dev = dev_get_drvdata(device);
17060 struct bnxt *bp = netdev_priv(dev);
17061 int rc = 0;
17062
17063 bnxt_ulp_stop(bp);
17064
17065 netdev_lock(dev);
17066 if (netif_running(dev)) {
17067 netif_device_detach(dev);
17068 rc = bnxt_close(dev);
17069 }
17070 bnxt_hwrm_func_drv_unrgtr(bp);
17071 bnxt_ptp_clear(bp);
17072 pci_disable_device(bp->pdev);
17073 bnxt_free_ctx_mem(bp, false);
17074 netdev_unlock(dev);
17075 return rc;
17076 }
17077
bnxt_resume(struct device * device)17078 static int bnxt_resume(struct device *device)
17079 {
17080 struct net_device *dev = dev_get_drvdata(device);
17081 struct bnxt *bp = netdev_priv(dev);
17082 int rc = 0;
17083
17084 netdev_lock(dev);
17085 rc = pci_enable_device(bp->pdev);
17086 if (rc) {
17087 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
17088 rc);
17089 goto resume_exit;
17090 }
17091 pci_set_master(bp->pdev);
17092 if (bnxt_hwrm_ver_get(bp)) {
17093 rc = -ENODEV;
17094 goto resume_exit;
17095 }
17096 rc = bnxt_hwrm_func_reset(bp);
17097 if (rc) {
17098 rc = -EBUSY;
17099 goto resume_exit;
17100 }
17101
17102 rc = bnxt_hwrm_func_qcaps(bp);
17103 if (rc)
17104 goto resume_exit;
17105
17106 bnxt_clear_reservations(bp, true);
17107
17108 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
17109 rc = -ENODEV;
17110 goto resume_exit;
17111 }
17112 if (bp->fw_crash_mem)
17113 bnxt_hwrm_crash_dump_mem_cfg(bp);
17114
17115 if (bnxt_ptp_init(bp)) {
17116 kfree(bp->ptp_cfg);
17117 bp->ptp_cfg = NULL;
17118 }
17119 bnxt_get_wol_settings(bp);
17120 if (netif_running(dev)) {
17121 rc = bnxt_open(dev);
17122 if (!rc)
17123 netif_device_attach(dev);
17124 }
17125
17126 resume_exit:
17127 netdev_unlock(bp->dev);
17128 bnxt_ulp_start(bp, rc);
17129 if (!rc)
17130 bnxt_reenable_sriov(bp);
17131 return rc;
17132 }
17133
17134 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
17135 #define BNXT_PM_OPS (&bnxt_pm_ops)
17136
17137 #else
17138
17139 #define BNXT_PM_OPS NULL
17140
17141 #endif /* CONFIG_PM_SLEEP */
17142
17143 /**
17144 * bnxt_io_error_detected - called when PCI error is detected
17145 * @pdev: Pointer to PCI device
17146 * @state: The current pci connection state
17147 *
17148 * This function is called after a PCI bus error affecting
17149 * this device has been detected.
17150 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)17151 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17152 pci_channel_state_t state)
17153 {
17154 struct net_device *netdev = pci_get_drvdata(pdev);
17155 struct bnxt *bp = netdev_priv(netdev);
17156 bool abort = false;
17157
17158 netdev_info(netdev, "PCI I/O error detected\n");
17159
17160 bnxt_ulp_stop(bp);
17161
17162 netdev_lock(netdev);
17163 netif_device_detach(netdev);
17164
17165 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17166 netdev_err(bp->dev, "Firmware reset already in progress\n");
17167 abort = true;
17168 }
17169
17170 if (abort || state == pci_channel_io_perm_failure) {
17171 netdev_unlock(netdev);
17172 return PCI_ERS_RESULT_DISCONNECT;
17173 }
17174
17175 /* Link is not reliable anymore if state is pci_channel_io_frozen
17176 * so we disable bus master to prevent any potential bad DMAs before
17177 * freeing kernel memory.
17178 */
17179 if (state == pci_channel_io_frozen) {
17180 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17181 bnxt_fw_fatal_close(bp);
17182 }
17183
17184 if (netif_running(netdev))
17185 __bnxt_close_nic(bp, true, true);
17186
17187 if (pci_is_enabled(pdev))
17188 pci_disable_device(pdev);
17189 bnxt_free_ctx_mem(bp, false);
17190 netdev_unlock(netdev);
17191
17192 /* Request a slot reset. */
17193 return PCI_ERS_RESULT_NEED_RESET;
17194 }
17195
17196 /**
17197 * bnxt_io_slot_reset - called after the pci bus has been reset.
17198 * @pdev: Pointer to PCI device
17199 *
17200 * Restart the card from scratch, as if from a cold-boot.
17201 * At this point, the card has experienced a hard reset,
17202 * followed by fixups by BIOS, and has its config space
17203 * set up identically to what it was at cold boot.
17204 */
bnxt_io_slot_reset(struct pci_dev * pdev)17205 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17206 {
17207 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17208 struct net_device *netdev = pci_get_drvdata(pdev);
17209 struct bnxt *bp = netdev_priv(netdev);
17210 int retry = 0;
17211 int err = 0;
17212 int off;
17213
17214 netdev_info(bp->dev, "PCI Slot Reset\n");
17215
17216 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17217 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17218 msleep(900);
17219
17220 netdev_lock(netdev);
17221
17222 if (pci_enable_device(pdev)) {
17223 dev_err(&pdev->dev,
17224 "Cannot re-enable PCI device after reset.\n");
17225 } else {
17226 pci_set_master(pdev);
17227 /* Upon fatal error, our device internal logic that latches to
17228 * BAR value is getting reset and will restore only upon
17229 * rewriting the BARs.
17230 *
17231 * As pci_restore_state() does not re-write the BARs if the
17232 * value is same as saved value earlier, driver needs to
17233 * write the BARs to 0 to force restore, in case of fatal error.
17234 */
17235 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17236 &bp->state)) {
17237 for (off = PCI_BASE_ADDRESS_0;
17238 off <= PCI_BASE_ADDRESS_5; off += 4)
17239 pci_write_config_dword(bp->pdev, off, 0);
17240 }
17241 pci_restore_state(pdev);
17242 pci_save_state(pdev);
17243
17244 bnxt_inv_fw_health_reg(bp);
17245 bnxt_try_map_fw_health_reg(bp);
17246
17247 /* In some PCIe AER scenarios, firmware may take up to
17248 * 10 seconds to become ready in the worst case.
17249 */
17250 do {
17251 err = bnxt_try_recover_fw(bp);
17252 if (!err)
17253 break;
17254 retry++;
17255 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
17256
17257 if (err) {
17258 dev_err(&pdev->dev, "Firmware not ready\n");
17259 goto reset_exit;
17260 }
17261
17262 err = bnxt_hwrm_func_reset(bp);
17263 if (!err)
17264 result = PCI_ERS_RESULT_RECOVERED;
17265
17266 /* IRQ will be initialized later in bnxt_io_resume */
17267 bnxt_ulp_irq_stop(bp);
17268 bnxt_clear_int_mode(bp);
17269 }
17270
17271 reset_exit:
17272 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17273 bnxt_clear_reservations(bp, true);
17274 netdev_unlock(netdev);
17275
17276 return result;
17277 }
17278
17279 /**
17280 * bnxt_io_resume - called when traffic can start flowing again.
17281 * @pdev: Pointer to PCI device
17282 *
17283 * This callback is called when the error recovery driver tells
17284 * us that its OK to resume normal operation.
17285 */
bnxt_io_resume(struct pci_dev * pdev)17286 static void bnxt_io_resume(struct pci_dev *pdev)
17287 {
17288 struct net_device *netdev = pci_get_drvdata(pdev);
17289 struct bnxt *bp = netdev_priv(netdev);
17290 int err;
17291
17292 netdev_info(bp->dev, "PCI Slot Resume\n");
17293 netdev_lock(netdev);
17294
17295 err = bnxt_hwrm_func_qcaps(bp);
17296 if (!err) {
17297 if (netif_running(netdev)) {
17298 err = bnxt_open(netdev);
17299 } else {
17300 err = bnxt_reserve_rings(bp, true);
17301 if (!err)
17302 err = bnxt_init_int_mode(bp);
17303 }
17304 }
17305
17306 if (!err)
17307 netif_device_attach(netdev);
17308
17309 netdev_unlock(netdev);
17310 bnxt_ulp_start(bp, err);
17311 if (!err)
17312 bnxt_reenable_sriov(bp);
17313 }
17314
17315 static const struct pci_error_handlers bnxt_err_handler = {
17316 .error_detected = bnxt_io_error_detected,
17317 .slot_reset = bnxt_io_slot_reset,
17318 .resume = bnxt_io_resume
17319 };
17320
17321 static struct pci_driver bnxt_pci_driver = {
17322 .name = DRV_MODULE_NAME,
17323 .id_table = bnxt_pci_tbl,
17324 .probe = bnxt_init_one,
17325 .remove = bnxt_remove_one,
17326 .shutdown = bnxt_shutdown,
17327 .driver.pm = BNXT_PM_OPS,
17328 .err_handler = &bnxt_err_handler,
17329 #if defined(CONFIG_BNXT_SRIOV)
17330 .sriov_configure = bnxt_sriov_configure,
17331 #endif
17332 };
17333
bnxt_init(void)17334 static int __init bnxt_init(void)
17335 {
17336 int err;
17337
17338 bnxt_debug_init();
17339 err = pci_register_driver(&bnxt_pci_driver);
17340 if (err) {
17341 bnxt_debug_exit();
17342 return err;
17343 }
17344
17345 return 0;
17346 }
17347
bnxt_exit(void)17348 static void __exit bnxt_exit(void)
17349 {
17350 pci_unregister_driver(&bnxt_pci_driver);
17351 if (bnxt_pf_wq)
17352 destroy_workqueue(bnxt_pf_wq);
17353 bnxt_debug_exit();
17354 }
17355
17356 module_init(bnxt_init);
17357 module_exit(bnxt_exit);
17358