1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_queues.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_hwrm.h"
62 #include "bnxt_ulp.h"
63 #include "bnxt_sriov.h"
64 #include "bnxt_ethtool.h"
65 #include "bnxt_dcb.h"
66 #include "bnxt_xdp.h"
67 #include "bnxt_ptp.h"
68 #include "bnxt_vfr.h"
69 #include "bnxt_tc.h"
70 #include "bnxt_devlink.h"
71 #include "bnxt_debugfs.h"
72 #include "bnxt_coredump.h"
73 #include "bnxt_hwmon.h"
74
75 #define BNXT_TX_TIMEOUT (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
77 NETIF_MSG_TX_ERR)
78
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
81
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
85
86 #define BNXT_TX_PUSH_THRESH 164
87
88 /* indexed by enum board_idx */
89 static const struct {
90 char *name;
91 } board_info[] = {
92 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
93 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
94 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
95 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
96 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
97 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
98 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
99 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
100 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
101 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
102 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
103 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
104 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
105 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
106 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
108 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
109 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
110 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
111 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
112 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
113 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
114 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
115 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
116 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
117 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
118 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
119 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
120 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
121 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
123 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
124 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
125 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
127 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
129 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
130 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
131 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
132 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
133 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
134 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
135 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
136 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
137 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
138 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
139 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
140 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
141 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
142 };
143
144 static const struct pci_device_id bnxt_pci_tbl[] = {
145 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
146 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
147 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
148 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
149 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
150 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
151 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
152 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
153 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
154 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
155 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
156 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
157 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
158 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
159 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
160 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
161 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
162 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
163 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
164 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
165 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
166 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
167 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
168 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
169 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
171 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
172 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
179 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
180 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
181 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
182 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
183 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
184 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
185 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
186 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
187 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
194 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
195 #ifdef CONFIG_BNXT_SRIOV
196 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
197 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
198 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
199 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
200 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
201 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
202 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
206 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
207 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
212 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
213 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
214 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
215 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
216 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
217 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
218 #endif
219 { 0 }
220 };
221
222 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
223
224 static const u16 bnxt_vf_req_snif[] = {
225 HWRM_FUNC_CFG,
226 HWRM_FUNC_VF_CFG,
227 HWRM_PORT_PHY_QCFG,
228 HWRM_CFA_L2_FILTER_ALLOC,
229 };
230
231 static const u16 bnxt_async_events_arr[] = {
232 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
233 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
234 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
235 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
236 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
237 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
238 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
239 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
240 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
241 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
242 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
243 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
244 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
245 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
246 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
247 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
248 };
249
250 static struct workqueue_struct *bnxt_pf_wq;
251
252 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
253 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
254 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
255
256 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
257 .ports = {
258 .src = 0,
259 .dst = 0,
260 },
261 .addrs = {
262 .v6addrs = {
263 .src = BNXT_IPV6_MASK_NONE,
264 .dst = BNXT_IPV6_MASK_NONE,
265 },
266 },
267 };
268
269 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
270 .ports = {
271 .src = cpu_to_be16(0xffff),
272 .dst = cpu_to_be16(0xffff),
273 },
274 .addrs = {
275 .v6addrs = {
276 .src = BNXT_IPV6_MASK_ALL,
277 .dst = BNXT_IPV6_MASK_ALL,
278 },
279 },
280 };
281
282 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
283 .ports = {
284 .src = cpu_to_be16(0xffff),
285 .dst = cpu_to_be16(0xffff),
286 },
287 .addrs = {
288 .v4addrs = {
289 .src = cpu_to_be32(0xffffffff),
290 .dst = cpu_to_be32(0xffffffff),
291 },
292 },
293 };
294
bnxt_vf_pciid(enum board_idx idx)295 static bool bnxt_vf_pciid(enum board_idx idx)
296 {
297 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
298 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
299 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
300 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
301 }
302
303 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
304 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
305
306 #define BNXT_DB_CQ(db, idx) \
307 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
308
309 #define BNXT_DB_NQ_P5(db, idx) \
310 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
311 (db)->doorbell)
312
313 #define BNXT_DB_NQ_P7(db, idx) \
314 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
315 DB_RING_IDX(db, idx), (db)->doorbell)
316
317 #define BNXT_DB_CQ_ARM(db, idx) \
318 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
319
320 #define BNXT_DB_NQ_ARM_P5(db, idx) \
321 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
322 DB_RING_IDX(db, idx), (db)->doorbell)
323
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)324 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
325 {
326 if (bp->flags & BNXT_FLAG_CHIP_P7)
327 BNXT_DB_NQ_P7(db, idx);
328 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
329 BNXT_DB_NQ_P5(db, idx);
330 else
331 BNXT_DB_CQ(db, idx);
332 }
333
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)334 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
335 {
336 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
337 BNXT_DB_NQ_ARM_P5(db, idx);
338 else
339 BNXT_DB_CQ_ARM(db, idx);
340 }
341
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)342 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
343 {
344 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
345 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
346 DB_RING_IDX(db, idx), db->doorbell);
347 else
348 BNXT_DB_CQ(db, idx);
349 }
350
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)351 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
352 {
353 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
354 return;
355
356 if (BNXT_PF(bp))
357 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
358 else
359 schedule_delayed_work(&bp->fw_reset_task, delay);
360 }
361
__bnxt_queue_sp_work(struct bnxt * bp)362 static void __bnxt_queue_sp_work(struct bnxt *bp)
363 {
364 if (BNXT_PF(bp))
365 queue_work(bnxt_pf_wq, &bp->sp_task);
366 else
367 schedule_work(&bp->sp_task);
368 }
369
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)370 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
371 {
372 set_bit(event, &bp->sp_event);
373 __bnxt_queue_sp_work(bp);
374 }
375
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)376 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
377 {
378 if (!rxr->bnapi->in_reset) {
379 rxr->bnapi->in_reset = true;
380 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
381 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
382 else
383 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
384 __bnxt_queue_sp_work(bp);
385 }
386 rxr->rx_next_cons = 0xffff;
387 }
388
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)389 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
390 u16 curr)
391 {
392 struct bnxt_napi *bnapi = txr->bnapi;
393
394 if (bnapi->tx_fault)
395 return;
396
397 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
398 txr->txq_index, txr->tx_hw_cons,
399 txr->tx_cons, txr->tx_prod, curr);
400 WARN_ON_ONCE(1);
401 bnapi->tx_fault = 1;
402 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
403 }
404
405 const u16 bnxt_lhint_arr[] = {
406 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
407 TX_BD_FLAGS_LHINT_512_TO_1023,
408 TX_BD_FLAGS_LHINT_1024_TO_2047,
409 TX_BD_FLAGS_LHINT_1024_TO_2047,
410 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
411 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
412 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
413 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
414 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
415 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
416 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
417 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
418 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
419 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
420 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
421 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
422 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
423 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
424 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
425 };
426
bnxt_xmit_get_cfa_action(struct sk_buff * skb)427 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
428 {
429 struct metadata_dst *md_dst = skb_metadata_dst(skb);
430
431 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
432 return 0;
433
434 return md_dst->u.port_info.port_id;
435 }
436
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)437 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
438 u16 prod)
439 {
440 /* Sync BD data before updating doorbell */
441 wmb();
442 bnxt_db_write(bp, &txr->tx_db, prod);
443 txr->kick_pending = 0;
444 }
445
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)446 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
447 {
448 struct bnxt *bp = netdev_priv(dev);
449 struct tx_bd *txbd, *txbd0;
450 struct tx_bd_ext *txbd1;
451 struct netdev_queue *txq;
452 int i;
453 dma_addr_t mapping;
454 unsigned int length, pad = 0;
455 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
456 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
457 struct pci_dev *pdev = bp->pdev;
458 u16 prod, last_frag, txts_prod;
459 struct bnxt_tx_ring_info *txr;
460 struct bnxt_sw_tx_bd *tx_buf;
461 __le32 lflags = 0;
462
463 i = skb_get_queue_mapping(skb);
464 if (unlikely(i >= bp->tx_nr_rings)) {
465 dev_kfree_skb_any(skb);
466 dev_core_stats_tx_dropped_inc(dev);
467 return NETDEV_TX_OK;
468 }
469
470 txq = netdev_get_tx_queue(dev, i);
471 txr = &bp->tx_ring[bp->tx_ring_map[i]];
472 prod = txr->tx_prod;
473
474 free_size = bnxt_tx_avail(bp, txr);
475 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
476 /* We must have raced with NAPI cleanup */
477 if (net_ratelimit() && txr->kick_pending)
478 netif_warn(bp, tx_err, dev,
479 "bnxt: ring busy w/ flush pending!\n");
480 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
481 bp->tx_wake_thresh))
482 return NETDEV_TX_BUSY;
483 }
484
485 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
486 goto tx_free;
487
488 length = skb->len;
489 len = skb_headlen(skb);
490 last_frag = skb_shinfo(skb)->nr_frags;
491
492 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
493
494 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
495 tx_buf->skb = skb;
496 tx_buf->nr_frags = last_frag;
497
498 vlan_tag_flags = 0;
499 cfa_action = bnxt_xmit_get_cfa_action(skb);
500 if (skb_vlan_tag_present(skb)) {
501 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
502 skb_vlan_tag_get(skb);
503 /* Currently supports 8021Q, 8021AD vlan offloads
504 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
505 */
506 if (skb->vlan_proto == htons(ETH_P_8021Q))
507 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
508 }
509
510 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
511 ptp->tx_tstamp_en) {
512 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
513 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
514 tx_buf->is_ts_pkt = 1;
515 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
516 } else if (!skb_is_gso(skb)) {
517 u16 seq_id, hdr_off;
518
519 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
520 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
521 if (vlan_tag_flags)
522 hdr_off += VLAN_HLEN;
523 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
524 tx_buf->is_ts_pkt = 1;
525 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
526
527 ptp->txts_req[txts_prod].tx_seqid = seq_id;
528 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
529 tx_buf->txts_prod = txts_prod;
530 }
531 }
532 }
533 if (unlikely(skb->no_fcs))
534 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
535
536 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
537 !lflags) {
538 struct tx_push_buffer *tx_push_buf = txr->tx_push;
539 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
540 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
541 void __iomem *db = txr->tx_db.doorbell;
542 void *pdata = tx_push_buf->data;
543 u64 *end;
544 int j, push_len;
545
546 /* Set COAL_NOW to be ready quickly for the next push */
547 tx_push->tx_bd_len_flags_type =
548 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
549 TX_BD_TYPE_LONG_TX_BD |
550 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
551 TX_BD_FLAGS_COAL_NOW |
552 TX_BD_FLAGS_PACKET_END |
553 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
554
555 if (skb->ip_summed == CHECKSUM_PARTIAL)
556 tx_push1->tx_bd_hsize_lflags =
557 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
558 else
559 tx_push1->tx_bd_hsize_lflags = 0;
560
561 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
562 tx_push1->tx_bd_cfa_action =
563 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
564
565 end = pdata + length;
566 end = PTR_ALIGN(end, 8) - 1;
567 *end = 0;
568
569 skb_copy_from_linear_data(skb, pdata, len);
570 pdata += len;
571 for (j = 0; j < last_frag; j++) {
572 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
573 void *fptr;
574
575 fptr = skb_frag_address_safe(frag);
576 if (!fptr)
577 goto normal_tx;
578
579 memcpy(pdata, fptr, skb_frag_size(frag));
580 pdata += skb_frag_size(frag);
581 }
582
583 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
584 txbd->tx_bd_haddr = txr->data_mapping;
585 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
586 prod = NEXT_TX(prod);
587 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
588 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
589 memcpy(txbd, tx_push1, sizeof(*txbd));
590 prod = NEXT_TX(prod);
591 tx_push->doorbell =
592 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
593 DB_RING_IDX(&txr->tx_db, prod));
594 WRITE_ONCE(txr->tx_prod, prod);
595
596 tx_buf->is_push = 1;
597 netdev_tx_sent_queue(txq, skb->len);
598 wmb(); /* Sync is_push and byte queue before pushing data */
599
600 push_len = (length + sizeof(*tx_push) + 7) / 8;
601 if (push_len > 16) {
602 __iowrite64_copy(db, tx_push_buf, 16);
603 __iowrite32_copy(db + 4, tx_push_buf + 1,
604 (push_len - 16) << 1);
605 } else {
606 __iowrite64_copy(db, tx_push_buf, push_len);
607 }
608
609 goto tx_done;
610 }
611
612 normal_tx:
613 if (length < BNXT_MIN_PKT_SIZE) {
614 pad = BNXT_MIN_PKT_SIZE - length;
615 if (skb_pad(skb, pad))
616 /* SKB already freed. */
617 goto tx_kick_pending;
618 length = BNXT_MIN_PKT_SIZE;
619 }
620
621 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
622
623 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
624 goto tx_free;
625
626 dma_unmap_addr_set(tx_buf, mapping, mapping);
627 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
628 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
629
630 txbd->tx_bd_haddr = cpu_to_le64(mapping);
631 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
632
633 prod = NEXT_TX(prod);
634 txbd1 = (struct tx_bd_ext *)
635 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
636
637 txbd1->tx_bd_hsize_lflags = lflags;
638 if (skb_is_gso(skb)) {
639 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
640 u32 hdr_len;
641
642 if (skb->encapsulation) {
643 if (udp_gso)
644 hdr_len = skb_inner_transport_offset(skb) +
645 sizeof(struct udphdr);
646 else
647 hdr_len = skb_inner_tcp_all_headers(skb);
648 } else if (udp_gso) {
649 hdr_len = skb_transport_offset(skb) +
650 sizeof(struct udphdr);
651 } else {
652 hdr_len = skb_tcp_all_headers(skb);
653 }
654
655 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
656 TX_BD_FLAGS_T_IPID |
657 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
658 length = skb_shinfo(skb)->gso_size;
659 txbd1->tx_bd_mss = cpu_to_le32(length);
660 length += hdr_len;
661 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
662 txbd1->tx_bd_hsize_lflags |=
663 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
664 txbd1->tx_bd_mss = 0;
665 }
666
667 length >>= 9;
668 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
669 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
670 skb->len);
671 i = 0;
672 goto tx_dma_error;
673 }
674 flags |= bnxt_lhint_arr[length];
675 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
676
677 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
678 txbd1->tx_bd_cfa_action =
679 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
680 txbd0 = txbd;
681 for (i = 0; i < last_frag; i++) {
682 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
683
684 prod = NEXT_TX(prod);
685 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
686
687 len = skb_frag_size(frag);
688 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
689 DMA_TO_DEVICE);
690
691 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
692 goto tx_dma_error;
693
694 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
695 dma_unmap_addr_set(tx_buf, mapping, mapping);
696
697 txbd->tx_bd_haddr = cpu_to_le64(mapping);
698
699 flags = len << TX_BD_LEN_SHIFT;
700 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
701 }
702
703 flags &= ~TX_BD_LEN;
704 txbd->tx_bd_len_flags_type =
705 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
706 TX_BD_FLAGS_PACKET_END);
707
708 netdev_tx_sent_queue(txq, skb->len);
709
710 skb_tx_timestamp(skb);
711
712 prod = NEXT_TX(prod);
713 WRITE_ONCE(txr->tx_prod, prod);
714
715 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
716 bnxt_txr_db_kick(bp, txr, prod);
717 } else {
718 if (free_size >= bp->tx_wake_thresh)
719 txbd0->tx_bd_len_flags_type |=
720 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
721 txr->kick_pending = 1;
722 }
723
724 tx_done:
725
726 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
727 if (netdev_xmit_more() && !tx_buf->is_push) {
728 txbd0->tx_bd_len_flags_type &=
729 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
730 bnxt_txr_db_kick(bp, txr, prod);
731 }
732
733 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
734 bp->tx_wake_thresh);
735 }
736 return NETDEV_TX_OK;
737
738 tx_dma_error:
739 last_frag = i;
740
741 /* start back at beginning and unmap skb */
742 prod = txr->tx_prod;
743 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
744 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
745 skb_headlen(skb), DMA_TO_DEVICE);
746 prod = NEXT_TX(prod);
747
748 /* unmap remaining mapped pages */
749 for (i = 0; i < last_frag; i++) {
750 prod = NEXT_TX(prod);
751 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
752 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
753 skb_frag_size(&skb_shinfo(skb)->frags[i]),
754 DMA_TO_DEVICE);
755 }
756
757 tx_free:
758 dev_kfree_skb_any(skb);
759 tx_kick_pending:
760 if (BNXT_TX_PTP_IS_SET(lflags)) {
761 txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
762 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
763 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
764 /* set SKB to err so PTP worker will clean up */
765 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
766 }
767 if (txr->kick_pending)
768 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
769 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
770 dev_core_stats_tx_dropped_inc(dev);
771 return NETDEV_TX_OK;
772 }
773
774 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)775 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
776 int budget)
777 {
778 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
779 struct pci_dev *pdev = bp->pdev;
780 u16 hw_cons = txr->tx_hw_cons;
781 unsigned int tx_bytes = 0;
782 u16 cons = txr->tx_cons;
783 int tx_pkts = 0;
784 bool rc = false;
785
786 while (RING_TX(bp, cons) != hw_cons) {
787 struct bnxt_sw_tx_bd *tx_buf;
788 struct sk_buff *skb;
789 bool is_ts_pkt;
790 int j, last;
791
792 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
793 skb = tx_buf->skb;
794
795 if (unlikely(!skb)) {
796 bnxt_sched_reset_txr(bp, txr, cons);
797 return rc;
798 }
799
800 is_ts_pkt = tx_buf->is_ts_pkt;
801 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
802 rc = true;
803 break;
804 }
805
806 cons = NEXT_TX(cons);
807 tx_pkts++;
808 tx_bytes += skb->len;
809 tx_buf->skb = NULL;
810 tx_buf->is_ts_pkt = 0;
811
812 if (tx_buf->is_push) {
813 tx_buf->is_push = 0;
814 goto next_tx_int;
815 }
816
817 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
818 skb_headlen(skb), DMA_TO_DEVICE);
819 last = tx_buf->nr_frags;
820
821 for (j = 0; j < last; j++) {
822 cons = NEXT_TX(cons);
823 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
824 dma_unmap_page(
825 &pdev->dev,
826 dma_unmap_addr(tx_buf, mapping),
827 skb_frag_size(&skb_shinfo(skb)->frags[j]),
828 DMA_TO_DEVICE);
829 }
830 if (unlikely(is_ts_pkt)) {
831 if (BNXT_CHIP_P5(bp)) {
832 /* PTP worker takes ownership of the skb */
833 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
834 skb = NULL;
835 }
836 }
837
838 next_tx_int:
839 cons = NEXT_TX(cons);
840
841 dev_consume_skb_any(skb);
842 }
843
844 WRITE_ONCE(txr->tx_cons, cons);
845
846 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
847 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
848 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
849
850 return rc;
851 }
852
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)853 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
854 {
855 struct bnxt_tx_ring_info *txr;
856 bool more = false;
857 int i;
858
859 bnxt_for_each_napi_tx(i, bnapi, txr) {
860 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
861 more |= __bnxt_tx_int(bp, txr, budget);
862 }
863 if (!more)
864 bnapi->events &= ~BNXT_TX_CMP_EVENT;
865 }
866
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)867 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
868 struct bnxt_rx_ring_info *rxr,
869 unsigned int *offset,
870 gfp_t gfp)
871 {
872 struct page *page;
873
874 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
875 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
876 BNXT_RX_PAGE_SIZE);
877 } else {
878 page = page_pool_dev_alloc_pages(rxr->page_pool);
879 *offset = 0;
880 }
881 if (!page)
882 return NULL;
883
884 *mapping = page_pool_get_dma_addr(page) + *offset;
885 return page;
886 }
887
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,gfp_t gfp)888 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
889 gfp_t gfp)
890 {
891 u8 *data;
892 struct pci_dev *pdev = bp->pdev;
893
894 if (gfp == GFP_ATOMIC)
895 data = napi_alloc_frag(bp->rx_buf_size);
896 else
897 data = netdev_alloc_frag(bp->rx_buf_size);
898 if (!data)
899 return NULL;
900
901 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
902 bp->rx_buf_use_size, bp->rx_dir,
903 DMA_ATTR_WEAK_ORDERING);
904
905 if (dma_mapping_error(&pdev->dev, *mapping)) {
906 skb_free_frag(data);
907 data = NULL;
908 }
909 return data;
910 }
911
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)912 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
913 u16 prod, gfp_t gfp)
914 {
915 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
916 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
917 dma_addr_t mapping;
918
919 if (BNXT_RX_PAGE_MODE(bp)) {
920 unsigned int offset;
921 struct page *page =
922 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
923
924 if (!page)
925 return -ENOMEM;
926
927 mapping += bp->rx_dma_offset;
928 rx_buf->data = page;
929 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
930 } else {
931 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
932
933 if (!data)
934 return -ENOMEM;
935
936 rx_buf->data = data;
937 rx_buf->data_ptr = data + bp->rx_offset;
938 }
939 rx_buf->mapping = mapping;
940
941 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
942 return 0;
943 }
944
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)945 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
946 {
947 u16 prod = rxr->rx_prod;
948 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
949 struct bnxt *bp = rxr->bnapi->bp;
950 struct rx_bd *cons_bd, *prod_bd;
951
952 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
953 cons_rx_buf = &rxr->rx_buf_ring[cons];
954
955 prod_rx_buf->data = data;
956 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
957
958 prod_rx_buf->mapping = cons_rx_buf->mapping;
959
960 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
961 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
962
963 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
964 }
965
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)966 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
967 {
968 u16 next, max = rxr->rx_agg_bmap_size;
969
970 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
971 if (next >= max)
972 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
973 return next;
974 }
975
bnxt_alloc_rx_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)976 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
977 struct bnxt_rx_ring_info *rxr,
978 u16 prod, gfp_t gfp)
979 {
980 struct rx_bd *rxbd =
981 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
982 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
983 struct page *page;
984 dma_addr_t mapping;
985 u16 sw_prod = rxr->rx_sw_agg_prod;
986 unsigned int offset = 0;
987
988 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
989
990 if (!page)
991 return -ENOMEM;
992
993 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
994 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
995
996 __set_bit(sw_prod, rxr->rx_agg_bmap);
997 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
998 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
999
1000 rx_agg_buf->page = page;
1001 rx_agg_buf->offset = offset;
1002 rx_agg_buf->mapping = mapping;
1003 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1004 rxbd->rx_bd_opaque = sw_prod;
1005 return 0;
1006 }
1007
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1008 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1009 struct bnxt_cp_ring_info *cpr,
1010 u16 cp_cons, u16 curr)
1011 {
1012 struct rx_agg_cmp *agg;
1013
1014 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1015 agg = (struct rx_agg_cmp *)
1016 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1017 return agg;
1018 }
1019
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1020 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1021 struct bnxt_rx_ring_info *rxr,
1022 u16 agg_id, u16 curr)
1023 {
1024 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1025
1026 return &tpa_info->agg_arr[curr];
1027 }
1028
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1029 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1030 u16 start, u32 agg_bufs, bool tpa)
1031 {
1032 struct bnxt_napi *bnapi = cpr->bnapi;
1033 struct bnxt *bp = bnapi->bp;
1034 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1035 u16 prod = rxr->rx_agg_prod;
1036 u16 sw_prod = rxr->rx_sw_agg_prod;
1037 bool p5_tpa = false;
1038 u32 i;
1039
1040 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1041 p5_tpa = true;
1042
1043 for (i = 0; i < agg_bufs; i++) {
1044 u16 cons;
1045 struct rx_agg_cmp *agg;
1046 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1047 struct rx_bd *prod_bd;
1048 struct page *page;
1049
1050 if (p5_tpa)
1051 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1052 else
1053 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1054 cons = agg->rx_agg_cmp_opaque;
1055 __clear_bit(cons, rxr->rx_agg_bmap);
1056
1057 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1058 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1059
1060 __set_bit(sw_prod, rxr->rx_agg_bmap);
1061 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1062 cons_rx_buf = &rxr->rx_agg_ring[cons];
1063
1064 /* It is possible for sw_prod to be equal to cons, so
1065 * set cons_rx_buf->page to NULL first.
1066 */
1067 page = cons_rx_buf->page;
1068 cons_rx_buf->page = NULL;
1069 prod_rx_buf->page = page;
1070 prod_rx_buf->offset = cons_rx_buf->offset;
1071
1072 prod_rx_buf->mapping = cons_rx_buf->mapping;
1073
1074 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1075
1076 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1077 prod_bd->rx_bd_opaque = sw_prod;
1078
1079 prod = NEXT_RX_AGG(prod);
1080 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1081 }
1082 rxr->rx_agg_prod = prod;
1083 rxr->rx_sw_agg_prod = sw_prod;
1084 }
1085
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1086 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1087 struct bnxt_rx_ring_info *rxr,
1088 u16 cons, void *data, u8 *data_ptr,
1089 dma_addr_t dma_addr,
1090 unsigned int offset_and_len)
1091 {
1092 unsigned int len = offset_and_len & 0xffff;
1093 struct page *page = data;
1094 u16 prod = rxr->rx_prod;
1095 struct sk_buff *skb;
1096 int err;
1097
1098 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1099 if (unlikely(err)) {
1100 bnxt_reuse_rx_data(rxr, cons, data);
1101 return NULL;
1102 }
1103 dma_addr -= bp->rx_dma_offset;
1104 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1105 bp->rx_dir);
1106 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1107 if (!skb) {
1108 page_pool_recycle_direct(rxr->page_pool, page);
1109 return NULL;
1110 }
1111 skb_mark_for_recycle(skb);
1112 skb_reserve(skb, bp->rx_offset);
1113 __skb_put(skb, len);
1114
1115 return skb;
1116 }
1117
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1118 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1119 struct bnxt_rx_ring_info *rxr,
1120 u16 cons, void *data, u8 *data_ptr,
1121 dma_addr_t dma_addr,
1122 unsigned int offset_and_len)
1123 {
1124 unsigned int payload = offset_and_len >> 16;
1125 unsigned int len = offset_and_len & 0xffff;
1126 skb_frag_t *frag;
1127 struct page *page = data;
1128 u16 prod = rxr->rx_prod;
1129 struct sk_buff *skb;
1130 int off, err;
1131
1132 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1133 if (unlikely(err)) {
1134 bnxt_reuse_rx_data(rxr, cons, data);
1135 return NULL;
1136 }
1137 dma_addr -= bp->rx_dma_offset;
1138 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1139 bp->rx_dir);
1140
1141 if (unlikely(!payload))
1142 payload = eth_get_headlen(bp->dev, data_ptr, len);
1143
1144 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1145 if (!skb) {
1146 page_pool_recycle_direct(rxr->page_pool, page);
1147 return NULL;
1148 }
1149
1150 skb_mark_for_recycle(skb);
1151 off = (void *)data_ptr - page_address(page);
1152 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1153 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1154 payload + NET_IP_ALIGN);
1155
1156 frag = &skb_shinfo(skb)->frags[0];
1157 skb_frag_size_sub(frag, payload);
1158 skb_frag_off_add(frag, payload);
1159 skb->data_len -= payload;
1160 skb->tail += payload;
1161
1162 return skb;
1163 }
1164
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1165 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1166 struct bnxt_rx_ring_info *rxr, u16 cons,
1167 void *data, u8 *data_ptr,
1168 dma_addr_t dma_addr,
1169 unsigned int offset_and_len)
1170 {
1171 u16 prod = rxr->rx_prod;
1172 struct sk_buff *skb;
1173 int err;
1174
1175 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1176 if (unlikely(err)) {
1177 bnxt_reuse_rx_data(rxr, cons, data);
1178 return NULL;
1179 }
1180
1181 skb = napi_build_skb(data, bp->rx_buf_size);
1182 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1183 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1184 if (!skb) {
1185 skb_free_frag(data);
1186 return NULL;
1187 }
1188
1189 skb_reserve(skb, bp->rx_offset);
1190 skb_put(skb, offset_and_len & 0xffff);
1191 return skb;
1192 }
1193
__bnxt_rx_agg_pages(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct skb_shared_info * shinfo,u16 idx,u32 agg_bufs,bool tpa,struct xdp_buff * xdp)1194 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1195 struct bnxt_cp_ring_info *cpr,
1196 struct skb_shared_info *shinfo,
1197 u16 idx, u32 agg_bufs, bool tpa,
1198 struct xdp_buff *xdp)
1199 {
1200 struct bnxt_napi *bnapi = cpr->bnapi;
1201 struct pci_dev *pdev = bp->pdev;
1202 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1203 u16 prod = rxr->rx_agg_prod;
1204 u32 i, total_frag_len = 0;
1205 bool p5_tpa = false;
1206
1207 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1208 p5_tpa = true;
1209
1210 for (i = 0; i < agg_bufs; i++) {
1211 skb_frag_t *frag = &shinfo->frags[i];
1212 u16 cons, frag_len;
1213 struct rx_agg_cmp *agg;
1214 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1215 struct page *page;
1216 dma_addr_t mapping;
1217
1218 if (p5_tpa)
1219 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1220 else
1221 agg = bnxt_get_agg(bp, cpr, idx, i);
1222 cons = agg->rx_agg_cmp_opaque;
1223 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1224 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1225
1226 cons_rx_buf = &rxr->rx_agg_ring[cons];
1227 skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1228 cons_rx_buf->offset, frag_len);
1229 shinfo->nr_frags = i + 1;
1230 __clear_bit(cons, rxr->rx_agg_bmap);
1231
1232 /* It is possible for bnxt_alloc_rx_page() to allocate
1233 * a sw_prod index that equals the cons index, so we
1234 * need to clear the cons entry now.
1235 */
1236 mapping = cons_rx_buf->mapping;
1237 page = cons_rx_buf->page;
1238 cons_rx_buf->page = NULL;
1239
1240 if (xdp && page_is_pfmemalloc(page))
1241 xdp_buff_set_frag_pfmemalloc(xdp);
1242
1243 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1244 --shinfo->nr_frags;
1245 cons_rx_buf->page = page;
1246
1247 /* Update prod since possibly some pages have been
1248 * allocated already.
1249 */
1250 rxr->rx_agg_prod = prod;
1251 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1252 return 0;
1253 }
1254
1255 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1256 bp->rx_dir);
1257
1258 total_frag_len += frag_len;
1259 prod = NEXT_RX_AGG(prod);
1260 }
1261 rxr->rx_agg_prod = prod;
1262 return total_frag_len;
1263 }
1264
bnxt_rx_agg_pages_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1265 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1266 struct bnxt_cp_ring_info *cpr,
1267 struct sk_buff *skb, u16 idx,
1268 u32 agg_bufs, bool tpa)
1269 {
1270 struct skb_shared_info *shinfo = skb_shinfo(skb);
1271 u32 total_frag_len = 0;
1272
1273 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1274 agg_bufs, tpa, NULL);
1275 if (!total_frag_len) {
1276 skb_mark_for_recycle(skb);
1277 dev_kfree_skb(skb);
1278 return NULL;
1279 }
1280
1281 skb->data_len += total_frag_len;
1282 skb->len += total_frag_len;
1283 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1284 return skb;
1285 }
1286
bnxt_rx_agg_pages_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1287 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1288 struct bnxt_cp_ring_info *cpr,
1289 struct xdp_buff *xdp, u16 idx,
1290 u32 agg_bufs, bool tpa)
1291 {
1292 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1293 u32 total_frag_len = 0;
1294
1295 if (!xdp_buff_has_frags(xdp))
1296 shinfo->nr_frags = 0;
1297
1298 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1299 idx, agg_bufs, tpa, xdp);
1300 if (total_frag_len) {
1301 xdp_buff_set_frags_flag(xdp);
1302 shinfo->nr_frags = agg_bufs;
1303 shinfo->xdp_frags_size = total_frag_len;
1304 }
1305 return total_frag_len;
1306 }
1307
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1308 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1309 u8 agg_bufs, u32 *raw_cons)
1310 {
1311 u16 last;
1312 struct rx_agg_cmp *agg;
1313
1314 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1315 last = RING_CMP(*raw_cons);
1316 agg = (struct rx_agg_cmp *)
1317 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1318 return RX_AGG_CMP_VALID(agg, *raw_cons);
1319 }
1320
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1321 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1322 unsigned int len,
1323 dma_addr_t mapping)
1324 {
1325 struct bnxt *bp = bnapi->bp;
1326 struct pci_dev *pdev = bp->pdev;
1327 struct sk_buff *skb;
1328
1329 skb = napi_alloc_skb(&bnapi->napi, len);
1330 if (!skb)
1331 return NULL;
1332
1333 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1334 bp->rx_dir);
1335
1336 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1337 len + NET_IP_ALIGN);
1338
1339 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1340 bp->rx_dir);
1341
1342 skb_put(skb, len);
1343
1344 return skb;
1345 }
1346
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1347 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1348 unsigned int len,
1349 dma_addr_t mapping)
1350 {
1351 return bnxt_copy_data(bnapi, data, len, mapping);
1352 }
1353
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1354 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1355 struct xdp_buff *xdp,
1356 unsigned int len,
1357 dma_addr_t mapping)
1358 {
1359 unsigned int metasize = 0;
1360 u8 *data = xdp->data;
1361 struct sk_buff *skb;
1362
1363 len = xdp->data_end - xdp->data_meta;
1364 metasize = xdp->data - xdp->data_meta;
1365 data = xdp->data_meta;
1366
1367 skb = bnxt_copy_data(bnapi, data, len, mapping);
1368 if (!skb)
1369 return skb;
1370
1371 if (metasize) {
1372 skb_metadata_set(skb, metasize);
1373 __skb_pull(skb, metasize);
1374 }
1375
1376 return skb;
1377 }
1378
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1379 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1380 u32 *raw_cons, void *cmp)
1381 {
1382 struct rx_cmp *rxcmp = cmp;
1383 u32 tmp_raw_cons = *raw_cons;
1384 u8 cmp_type, agg_bufs = 0;
1385
1386 cmp_type = RX_CMP_TYPE(rxcmp);
1387
1388 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1389 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1390 RX_CMP_AGG_BUFS) >>
1391 RX_CMP_AGG_BUFS_SHIFT;
1392 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1393 struct rx_tpa_end_cmp *tpa_end = cmp;
1394
1395 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1396 return 0;
1397
1398 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1399 }
1400
1401 if (agg_bufs) {
1402 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1403 return -EBUSY;
1404 }
1405 *raw_cons = tmp_raw_cons;
1406 return 0;
1407 }
1408
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1409 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1410 {
1411 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1412 u16 idx = agg_id & MAX_TPA_P5_MASK;
1413
1414 if (test_bit(idx, map->agg_idx_bmap))
1415 idx = find_first_zero_bit(map->agg_idx_bmap,
1416 BNXT_AGG_IDX_BMAP_SIZE);
1417 __set_bit(idx, map->agg_idx_bmap);
1418 map->agg_id_tbl[agg_id] = idx;
1419 return idx;
1420 }
1421
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1422 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1423 {
1424 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1425
1426 __clear_bit(idx, map->agg_idx_bmap);
1427 }
1428
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1429 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1430 {
1431 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1432
1433 return map->agg_id_tbl[agg_id];
1434 }
1435
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1436 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1437 struct rx_tpa_start_cmp *tpa_start,
1438 struct rx_tpa_start_cmp_ext *tpa_start1)
1439 {
1440 tpa_info->cfa_code_valid = 1;
1441 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1442 tpa_info->vlan_valid = 0;
1443 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1444 tpa_info->vlan_valid = 1;
1445 tpa_info->metadata =
1446 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1447 }
1448 }
1449
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1450 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1451 struct rx_tpa_start_cmp *tpa_start,
1452 struct rx_tpa_start_cmp_ext *tpa_start1)
1453 {
1454 tpa_info->vlan_valid = 0;
1455 if (TPA_START_VLAN_VALID(tpa_start)) {
1456 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1457 u32 vlan_proto = ETH_P_8021Q;
1458
1459 tpa_info->vlan_valid = 1;
1460 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1461 vlan_proto = ETH_P_8021AD;
1462 tpa_info->metadata = vlan_proto << 16 |
1463 TPA_START_METADATA0_TCI(tpa_start1);
1464 }
1465 }
1466
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1467 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1468 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1469 struct rx_tpa_start_cmp_ext *tpa_start1)
1470 {
1471 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1472 struct bnxt_tpa_info *tpa_info;
1473 u16 cons, prod, agg_id;
1474 struct rx_bd *prod_bd;
1475 dma_addr_t mapping;
1476
1477 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1478 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1479 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1480 } else {
1481 agg_id = TPA_START_AGG_ID(tpa_start);
1482 }
1483 cons = tpa_start->rx_tpa_start_cmp_opaque;
1484 prod = rxr->rx_prod;
1485 cons_rx_buf = &rxr->rx_buf_ring[cons];
1486 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1487 tpa_info = &rxr->rx_tpa[agg_id];
1488
1489 if (unlikely(cons != rxr->rx_next_cons ||
1490 TPA_START_ERROR(tpa_start))) {
1491 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1492 cons, rxr->rx_next_cons,
1493 TPA_START_ERROR_CODE(tpa_start1));
1494 bnxt_sched_reset_rxr(bp, rxr);
1495 return;
1496 }
1497 prod_rx_buf->data = tpa_info->data;
1498 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1499
1500 mapping = tpa_info->mapping;
1501 prod_rx_buf->mapping = mapping;
1502
1503 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1504
1505 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1506
1507 tpa_info->data = cons_rx_buf->data;
1508 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1509 cons_rx_buf->data = NULL;
1510 tpa_info->mapping = cons_rx_buf->mapping;
1511
1512 tpa_info->len =
1513 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1514 RX_TPA_START_CMP_LEN_SHIFT;
1515 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1516 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1517 tpa_info->gso_type = SKB_GSO_TCPV4;
1518 if (TPA_START_IS_IPV6(tpa_start1))
1519 tpa_info->gso_type = SKB_GSO_TCPV6;
1520 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1521 else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
1522 TPA_START_HASH_TYPE(tpa_start) == 3)
1523 tpa_info->gso_type = SKB_GSO_TCPV6;
1524 tpa_info->rss_hash =
1525 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1526 } else {
1527 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1528 tpa_info->gso_type = 0;
1529 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1530 }
1531 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1532 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1533 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1534 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1535 else
1536 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1537 tpa_info->agg_count = 0;
1538
1539 rxr->rx_prod = NEXT_RX(prod);
1540 cons = RING_RX(bp, NEXT_RX(cons));
1541 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1542 cons_rx_buf = &rxr->rx_buf_ring[cons];
1543
1544 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1545 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1546 cons_rx_buf->data = NULL;
1547 }
1548
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1549 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1550 {
1551 if (agg_bufs)
1552 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1553 }
1554
1555 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1556 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1557 {
1558 struct udphdr *uh = NULL;
1559
1560 if (ip_proto == htons(ETH_P_IP)) {
1561 struct iphdr *iph = (struct iphdr *)skb->data;
1562
1563 if (iph->protocol == IPPROTO_UDP)
1564 uh = (struct udphdr *)(iph + 1);
1565 } else {
1566 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1567
1568 if (iph->nexthdr == IPPROTO_UDP)
1569 uh = (struct udphdr *)(iph + 1);
1570 }
1571 if (uh) {
1572 if (uh->check)
1573 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1574 else
1575 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1576 }
1577 }
1578 #endif
1579
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1580 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1581 int payload_off, int tcp_ts,
1582 struct sk_buff *skb)
1583 {
1584 #ifdef CONFIG_INET
1585 struct tcphdr *th;
1586 int len, nw_off;
1587 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1588 u32 hdr_info = tpa_info->hdr_info;
1589 bool loopback = false;
1590
1591 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1592 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1593 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1594
1595 /* If the packet is an internal loopback packet, the offsets will
1596 * have an extra 4 bytes.
1597 */
1598 if (inner_mac_off == 4) {
1599 loopback = true;
1600 } else if (inner_mac_off > 4) {
1601 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1602 ETH_HLEN - 2));
1603
1604 /* We only support inner iPv4/ipv6. If we don't see the
1605 * correct protocol ID, it must be a loopback packet where
1606 * the offsets are off by 4.
1607 */
1608 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1609 loopback = true;
1610 }
1611 if (loopback) {
1612 /* internal loopback packet, subtract all offsets by 4 */
1613 inner_ip_off -= 4;
1614 inner_mac_off -= 4;
1615 outer_ip_off -= 4;
1616 }
1617
1618 nw_off = inner_ip_off - ETH_HLEN;
1619 skb_set_network_header(skb, nw_off);
1620 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1621 struct ipv6hdr *iph = ipv6_hdr(skb);
1622
1623 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1624 len = skb->len - skb_transport_offset(skb);
1625 th = tcp_hdr(skb);
1626 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1627 } else {
1628 struct iphdr *iph = ip_hdr(skb);
1629
1630 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1631 len = skb->len - skb_transport_offset(skb);
1632 th = tcp_hdr(skb);
1633 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1634 }
1635
1636 if (inner_mac_off) { /* tunnel */
1637 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1638 ETH_HLEN - 2));
1639
1640 bnxt_gro_tunnel(skb, proto);
1641 }
1642 #endif
1643 return skb;
1644 }
1645
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1646 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1647 int payload_off, int tcp_ts,
1648 struct sk_buff *skb)
1649 {
1650 #ifdef CONFIG_INET
1651 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1652 u32 hdr_info = tpa_info->hdr_info;
1653 int iphdr_len, nw_off;
1654
1655 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1656 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1657 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1658
1659 nw_off = inner_ip_off - ETH_HLEN;
1660 skb_set_network_header(skb, nw_off);
1661 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1662 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1663 skb_set_transport_header(skb, nw_off + iphdr_len);
1664
1665 if (inner_mac_off) { /* tunnel */
1666 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1667 ETH_HLEN - 2));
1668
1669 bnxt_gro_tunnel(skb, proto);
1670 }
1671 #endif
1672 return skb;
1673 }
1674
1675 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1676 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1677
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1678 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1679 int payload_off, int tcp_ts,
1680 struct sk_buff *skb)
1681 {
1682 #ifdef CONFIG_INET
1683 struct tcphdr *th;
1684 int len, nw_off, tcp_opt_len = 0;
1685
1686 if (tcp_ts)
1687 tcp_opt_len = 12;
1688
1689 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1690 struct iphdr *iph;
1691
1692 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1693 ETH_HLEN;
1694 skb_set_network_header(skb, nw_off);
1695 iph = ip_hdr(skb);
1696 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1697 len = skb->len - skb_transport_offset(skb);
1698 th = tcp_hdr(skb);
1699 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1700 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1701 struct ipv6hdr *iph;
1702
1703 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1704 ETH_HLEN;
1705 skb_set_network_header(skb, nw_off);
1706 iph = ipv6_hdr(skb);
1707 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1708 len = skb->len - skb_transport_offset(skb);
1709 th = tcp_hdr(skb);
1710 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1711 } else {
1712 dev_kfree_skb_any(skb);
1713 return NULL;
1714 }
1715
1716 if (nw_off) /* tunnel */
1717 bnxt_gro_tunnel(skb, skb->protocol);
1718 #endif
1719 return skb;
1720 }
1721
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1722 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1723 struct bnxt_tpa_info *tpa_info,
1724 struct rx_tpa_end_cmp *tpa_end,
1725 struct rx_tpa_end_cmp_ext *tpa_end1,
1726 struct sk_buff *skb)
1727 {
1728 #ifdef CONFIG_INET
1729 int payload_off;
1730 u16 segs;
1731
1732 segs = TPA_END_TPA_SEGS(tpa_end);
1733 if (segs == 1)
1734 return skb;
1735
1736 NAPI_GRO_CB(skb)->count = segs;
1737 skb_shinfo(skb)->gso_size =
1738 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1739 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1740 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1741 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1742 else
1743 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1744 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1745 if (likely(skb))
1746 tcp_gro_complete(skb);
1747 #endif
1748 return skb;
1749 }
1750
1751 /* Given the cfa_code of a received packet determine which
1752 * netdev (vf-rep or PF) the packet is destined to.
1753 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1754 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1755 {
1756 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1757
1758 /* if vf-rep dev is NULL, the must belongs to the PF */
1759 return dev ? dev : bp->dev;
1760 }
1761
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1762 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1763 struct bnxt_cp_ring_info *cpr,
1764 u32 *raw_cons,
1765 struct rx_tpa_end_cmp *tpa_end,
1766 struct rx_tpa_end_cmp_ext *tpa_end1,
1767 u8 *event)
1768 {
1769 struct bnxt_napi *bnapi = cpr->bnapi;
1770 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1771 struct net_device *dev = bp->dev;
1772 u8 *data_ptr, agg_bufs;
1773 unsigned int len;
1774 struct bnxt_tpa_info *tpa_info;
1775 dma_addr_t mapping;
1776 struct sk_buff *skb;
1777 u16 idx = 0, agg_id;
1778 void *data;
1779 bool gro;
1780
1781 if (unlikely(bnapi->in_reset)) {
1782 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1783
1784 if (rc < 0)
1785 return ERR_PTR(-EBUSY);
1786 return NULL;
1787 }
1788
1789 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1790 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1791 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1792 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1793 tpa_info = &rxr->rx_tpa[agg_id];
1794 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1795 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1796 agg_bufs, tpa_info->agg_count);
1797 agg_bufs = tpa_info->agg_count;
1798 }
1799 tpa_info->agg_count = 0;
1800 *event |= BNXT_AGG_EVENT;
1801 bnxt_free_agg_idx(rxr, agg_id);
1802 idx = agg_id;
1803 gro = !!(bp->flags & BNXT_FLAG_GRO);
1804 } else {
1805 agg_id = TPA_END_AGG_ID(tpa_end);
1806 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1807 tpa_info = &rxr->rx_tpa[agg_id];
1808 idx = RING_CMP(*raw_cons);
1809 if (agg_bufs) {
1810 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1811 return ERR_PTR(-EBUSY);
1812
1813 *event |= BNXT_AGG_EVENT;
1814 idx = NEXT_CMP(idx);
1815 }
1816 gro = !!TPA_END_GRO(tpa_end);
1817 }
1818 data = tpa_info->data;
1819 data_ptr = tpa_info->data_ptr;
1820 prefetch(data_ptr);
1821 len = tpa_info->len;
1822 mapping = tpa_info->mapping;
1823
1824 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1825 bnxt_abort_tpa(cpr, idx, agg_bufs);
1826 if (agg_bufs > MAX_SKB_FRAGS)
1827 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1828 agg_bufs, (int)MAX_SKB_FRAGS);
1829 return NULL;
1830 }
1831
1832 if (len <= bp->rx_copy_thresh) {
1833 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1834 if (!skb) {
1835 bnxt_abort_tpa(cpr, idx, agg_bufs);
1836 cpr->sw_stats->rx.rx_oom_discards += 1;
1837 return NULL;
1838 }
1839 } else {
1840 u8 *new_data;
1841 dma_addr_t new_mapping;
1842
1843 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1844 if (!new_data) {
1845 bnxt_abort_tpa(cpr, idx, agg_bufs);
1846 cpr->sw_stats->rx.rx_oom_discards += 1;
1847 return NULL;
1848 }
1849
1850 tpa_info->data = new_data;
1851 tpa_info->data_ptr = new_data + bp->rx_offset;
1852 tpa_info->mapping = new_mapping;
1853
1854 skb = napi_build_skb(data, bp->rx_buf_size);
1855 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1856 bp->rx_buf_use_size, bp->rx_dir,
1857 DMA_ATTR_WEAK_ORDERING);
1858
1859 if (!skb) {
1860 skb_free_frag(data);
1861 bnxt_abort_tpa(cpr, idx, agg_bufs);
1862 cpr->sw_stats->rx.rx_oom_discards += 1;
1863 return NULL;
1864 }
1865 skb_reserve(skb, bp->rx_offset);
1866 skb_put(skb, len);
1867 }
1868
1869 if (agg_bufs) {
1870 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1871 if (!skb) {
1872 /* Page reuse already handled by bnxt_rx_pages(). */
1873 cpr->sw_stats->rx.rx_oom_discards += 1;
1874 return NULL;
1875 }
1876 }
1877
1878 if (tpa_info->cfa_code_valid)
1879 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1880 skb->protocol = eth_type_trans(skb, dev);
1881
1882 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1883 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1884
1885 if (tpa_info->vlan_valid &&
1886 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1887 __be16 vlan_proto = htons(tpa_info->metadata >>
1888 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1889 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1890
1891 if (eth_type_vlan(vlan_proto)) {
1892 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1893 } else {
1894 dev_kfree_skb(skb);
1895 return NULL;
1896 }
1897 }
1898
1899 skb_checksum_none_assert(skb);
1900 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1901 skb->ip_summed = CHECKSUM_UNNECESSARY;
1902 skb->csum_level =
1903 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1904 }
1905
1906 if (gro)
1907 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1908
1909 return skb;
1910 }
1911
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1912 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1913 struct rx_agg_cmp *rx_agg)
1914 {
1915 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1916 struct bnxt_tpa_info *tpa_info;
1917
1918 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1919 tpa_info = &rxr->rx_tpa[agg_id];
1920 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1921 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1922 }
1923
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1924 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1925 struct sk_buff *skb)
1926 {
1927 skb_mark_for_recycle(skb);
1928
1929 if (skb->dev != bp->dev) {
1930 /* this packet belongs to a vf-rep */
1931 bnxt_vf_rep_rx(bp, skb);
1932 return;
1933 }
1934 skb_record_rx_queue(skb, bnapi->index);
1935 napi_gro_receive(&bnapi->napi, skb);
1936 }
1937
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)1938 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1939 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
1940 {
1941 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1942
1943 if (BNXT_PTP_RX_TS_VALID(flags))
1944 goto ts_valid;
1945 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1946 return false;
1947
1948 ts_valid:
1949 *cmpl_ts = ts;
1950 return true;
1951 }
1952
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)1953 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
1954 struct rx_cmp *rxcmp,
1955 struct rx_cmp_ext *rxcmp1)
1956 {
1957 __be16 vlan_proto;
1958 u16 vtag;
1959
1960 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1961 __le32 flags2 = rxcmp1->rx_cmp_flags2;
1962 u32 meta_data;
1963
1964 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
1965 return skb;
1966
1967 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1968 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1969 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
1970 if (eth_type_vlan(vlan_proto))
1971 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1972 else
1973 goto vlan_err;
1974 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
1975 if (RX_CMP_VLAN_VALID(rxcmp)) {
1976 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
1977
1978 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
1979 vlan_proto = htons(ETH_P_8021Q);
1980 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
1981 vlan_proto = htons(ETH_P_8021AD);
1982 else
1983 goto vlan_err;
1984 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
1985 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1986 }
1987 }
1988 return skb;
1989 vlan_err:
1990 dev_kfree_skb(skb);
1991 return NULL;
1992 }
1993
bnxt_rss_ext_op(struct bnxt * bp,struct rx_cmp * rxcmp)1994 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
1995 struct rx_cmp *rxcmp)
1996 {
1997 u8 ext_op;
1998
1999 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2000 switch (ext_op) {
2001 case EXT_OP_INNER_4:
2002 case EXT_OP_OUTER_4:
2003 case EXT_OP_INNFL_3:
2004 case EXT_OP_OUTFL_3:
2005 return PKT_HASH_TYPE_L4;
2006 default:
2007 return PKT_HASH_TYPE_L3;
2008 }
2009 }
2010
2011 /* returns the following:
2012 * 1 - 1 packet successfully received
2013 * 0 - successful TPA_START, packet not completed yet
2014 * -EBUSY - completion ring does not have all the agg buffers yet
2015 * -ENOMEM - packet aborted due to out of memory
2016 * -EIO - packet aborted due to hw error indicated in BD
2017 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2018 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2019 u32 *raw_cons, u8 *event)
2020 {
2021 struct bnxt_napi *bnapi = cpr->bnapi;
2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2023 struct net_device *dev = bp->dev;
2024 struct rx_cmp *rxcmp;
2025 struct rx_cmp_ext *rxcmp1;
2026 u32 tmp_raw_cons = *raw_cons;
2027 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2028 struct bnxt_sw_rx_bd *rx_buf;
2029 unsigned int len;
2030 u8 *data_ptr, agg_bufs, cmp_type;
2031 bool xdp_active = false;
2032 dma_addr_t dma_addr;
2033 struct sk_buff *skb;
2034 struct xdp_buff xdp;
2035 u32 flags, misc;
2036 u32 cmpl_ts;
2037 void *data;
2038 int rc = 0;
2039
2040 rxcmp = (struct rx_cmp *)
2041 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2042
2043 cmp_type = RX_CMP_TYPE(rxcmp);
2044
2045 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2046 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2047 goto next_rx_no_prod_no_len;
2048 }
2049
2050 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2051 cp_cons = RING_CMP(tmp_raw_cons);
2052 rxcmp1 = (struct rx_cmp_ext *)
2053 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2054
2055 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2056 return -EBUSY;
2057
2058 /* The valid test of the entry must be done first before
2059 * reading any further.
2060 */
2061 dma_rmb();
2062 prod = rxr->rx_prod;
2063
2064 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2065 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2066 bnxt_tpa_start(bp, rxr, cmp_type,
2067 (struct rx_tpa_start_cmp *)rxcmp,
2068 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2069
2070 *event |= BNXT_RX_EVENT;
2071 goto next_rx_no_prod_no_len;
2072
2073 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2074 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2075 (struct rx_tpa_end_cmp *)rxcmp,
2076 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2077
2078 if (IS_ERR(skb))
2079 return -EBUSY;
2080
2081 rc = -ENOMEM;
2082 if (likely(skb)) {
2083 bnxt_deliver_skb(bp, bnapi, skb);
2084 rc = 1;
2085 }
2086 *event |= BNXT_RX_EVENT;
2087 goto next_rx_no_prod_no_len;
2088 }
2089
2090 cons = rxcmp->rx_cmp_opaque;
2091 if (unlikely(cons != rxr->rx_next_cons)) {
2092 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2093
2094 /* 0xffff is forced error, don't print it */
2095 if (rxr->rx_next_cons != 0xffff)
2096 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2097 cons, rxr->rx_next_cons);
2098 bnxt_sched_reset_rxr(bp, rxr);
2099 if (rc1)
2100 return rc1;
2101 goto next_rx_no_prod_no_len;
2102 }
2103 rx_buf = &rxr->rx_buf_ring[cons];
2104 data = rx_buf->data;
2105 data_ptr = rx_buf->data_ptr;
2106 prefetch(data_ptr);
2107
2108 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2109 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2110
2111 if (agg_bufs) {
2112 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2113 return -EBUSY;
2114
2115 cp_cons = NEXT_CMP(cp_cons);
2116 *event |= BNXT_AGG_EVENT;
2117 }
2118 *event |= BNXT_RX_EVENT;
2119
2120 rx_buf->data = NULL;
2121 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2122 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2123
2124 bnxt_reuse_rx_data(rxr, cons, data);
2125 if (agg_bufs)
2126 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2127 false);
2128
2129 rc = -EIO;
2130 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2131 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2132 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2133 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2134 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2135 rx_err);
2136 bnxt_sched_reset_rxr(bp, rxr);
2137 }
2138 }
2139 goto next_rx_no_len;
2140 }
2141
2142 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2143 len = flags >> RX_CMP_LEN_SHIFT;
2144 dma_addr = rx_buf->mapping;
2145
2146 if (bnxt_xdp_attached(bp, rxr)) {
2147 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2148 if (agg_bufs) {
2149 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2150 cp_cons, agg_bufs,
2151 false);
2152 if (!frag_len)
2153 goto oom_next_rx;
2154 }
2155 xdp_active = true;
2156 }
2157
2158 if (xdp_active) {
2159 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2160 rc = 1;
2161 goto next_rx;
2162 }
2163 }
2164
2165 if (len <= bp->rx_copy_thresh) {
2166 if (!xdp_active)
2167 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2168 else
2169 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2170 bnxt_reuse_rx_data(rxr, cons, data);
2171 if (!skb) {
2172 if (agg_bufs) {
2173 if (!xdp_active)
2174 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2175 agg_bufs, false);
2176 else
2177 bnxt_xdp_buff_frags_free(rxr, &xdp);
2178 }
2179 goto oom_next_rx;
2180 }
2181 } else {
2182 u32 payload;
2183
2184 if (rx_buf->data_ptr == data_ptr)
2185 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2186 else
2187 payload = 0;
2188 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2189 payload | len);
2190 if (!skb)
2191 goto oom_next_rx;
2192 }
2193
2194 if (agg_bufs) {
2195 if (!xdp_active) {
2196 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2197 if (!skb)
2198 goto oom_next_rx;
2199 } else {
2200 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
2201 if (!skb) {
2202 /* we should be able to free the old skb here */
2203 bnxt_xdp_buff_frags_free(rxr, &xdp);
2204 goto oom_next_rx;
2205 }
2206 }
2207 }
2208
2209 if (RX_CMP_HASH_VALID(rxcmp)) {
2210 enum pkt_hash_types type;
2211
2212 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2213 type = bnxt_rss_ext_op(bp, rxcmp);
2214 } else {
2215 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
2216
2217 /* RSS profiles 1 and 3 with extract code 0 for inner
2218 * 4-tuple
2219 */
2220 if (hash_type != 1 && hash_type != 3)
2221 type = PKT_HASH_TYPE_L3;
2222 else
2223 type = PKT_HASH_TYPE_L4;
2224 }
2225 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2226 }
2227
2228 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2229 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2230 skb->protocol = eth_type_trans(skb, dev);
2231
2232 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2233 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2234 if (!skb)
2235 goto next_rx;
2236 }
2237
2238 skb_checksum_none_assert(skb);
2239 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2240 if (dev->features & NETIF_F_RXCSUM) {
2241 skb->ip_summed = CHECKSUM_UNNECESSARY;
2242 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2243 }
2244 } else {
2245 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2246 if (dev->features & NETIF_F_RXCSUM)
2247 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2248 }
2249 }
2250
2251 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2252 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2253 u64 ns, ts;
2254
2255 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2256 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2257
2258 spin_lock_bh(&ptp->ptp_lock);
2259 ns = timecounter_cyc2time(&ptp->tc, ts);
2260 spin_unlock_bh(&ptp->ptp_lock);
2261 memset(skb_hwtstamps(skb), 0,
2262 sizeof(*skb_hwtstamps(skb)));
2263 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2264 }
2265 }
2266 }
2267 bnxt_deliver_skb(bp, bnapi, skb);
2268 rc = 1;
2269
2270 next_rx:
2271 cpr->rx_packets += 1;
2272 cpr->rx_bytes += len;
2273
2274 next_rx_no_len:
2275 rxr->rx_prod = NEXT_RX(prod);
2276 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2277
2278 next_rx_no_prod_no_len:
2279 *raw_cons = tmp_raw_cons;
2280
2281 return rc;
2282
2283 oom_next_rx:
2284 cpr->sw_stats->rx.rx_oom_discards += 1;
2285 rc = -ENOMEM;
2286 goto next_rx;
2287 }
2288
2289 /* In netpoll mode, if we are using a combined completion ring, we need to
2290 * discard the rx packets and recycle the buffers.
2291 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2292 static int bnxt_force_rx_discard(struct bnxt *bp,
2293 struct bnxt_cp_ring_info *cpr,
2294 u32 *raw_cons, u8 *event)
2295 {
2296 u32 tmp_raw_cons = *raw_cons;
2297 struct rx_cmp_ext *rxcmp1;
2298 struct rx_cmp *rxcmp;
2299 u16 cp_cons;
2300 u8 cmp_type;
2301 int rc;
2302
2303 cp_cons = RING_CMP(tmp_raw_cons);
2304 rxcmp = (struct rx_cmp *)
2305 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2306
2307 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2308 cp_cons = RING_CMP(tmp_raw_cons);
2309 rxcmp1 = (struct rx_cmp_ext *)
2310 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2311
2312 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2313 return -EBUSY;
2314
2315 /* The valid test of the entry must be done first before
2316 * reading any further.
2317 */
2318 dma_rmb();
2319 cmp_type = RX_CMP_TYPE(rxcmp);
2320 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2321 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2322 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2323 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2324 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2325 struct rx_tpa_end_cmp_ext *tpa_end1;
2326
2327 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2328 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2329 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2330 }
2331 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2332 if (rc && rc != -EBUSY)
2333 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2334 return rc;
2335 }
2336
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2337 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2338 {
2339 struct bnxt_fw_health *fw_health = bp->fw_health;
2340 u32 reg = fw_health->regs[reg_idx];
2341 u32 reg_type, reg_off, val = 0;
2342
2343 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2344 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2345 switch (reg_type) {
2346 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2347 pci_read_config_dword(bp->pdev, reg_off, &val);
2348 break;
2349 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2350 reg_off = fw_health->mapped_regs[reg_idx];
2351 fallthrough;
2352 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2353 val = readl(bp->bar0 + reg_off);
2354 break;
2355 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2356 val = readl(bp->bar1 + reg_off);
2357 break;
2358 }
2359 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2360 val &= fw_health->fw_reset_inprog_reg_mask;
2361 return val;
2362 }
2363
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2364 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2365 {
2366 int i;
2367
2368 for (i = 0; i < bp->rx_nr_rings; i++) {
2369 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2370 struct bnxt_ring_grp_info *grp_info;
2371
2372 grp_info = &bp->grp_info[grp_idx];
2373 if (grp_info->agg_fw_ring_id == ring_id)
2374 return grp_idx;
2375 }
2376 return INVALID_HW_RING_ID;
2377 }
2378
bnxt_get_force_speed(struct bnxt_link_info * link_info)2379 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2380 {
2381 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2382
2383 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2384 return link_info->force_link_speed2;
2385 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2386 return link_info->force_pam4_link_speed;
2387 return link_info->force_link_speed;
2388 }
2389
bnxt_set_force_speed(struct bnxt_link_info * link_info)2390 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2391 {
2392 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2393
2394 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2395 link_info->req_link_speed = link_info->force_link_speed2;
2396 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2397 switch (link_info->req_link_speed) {
2398 case BNXT_LINK_SPEED_50GB_PAM4:
2399 case BNXT_LINK_SPEED_100GB_PAM4:
2400 case BNXT_LINK_SPEED_200GB_PAM4:
2401 case BNXT_LINK_SPEED_400GB_PAM4:
2402 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2403 break;
2404 case BNXT_LINK_SPEED_100GB_PAM4_112:
2405 case BNXT_LINK_SPEED_200GB_PAM4_112:
2406 case BNXT_LINK_SPEED_400GB_PAM4_112:
2407 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2408 break;
2409 default:
2410 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2411 }
2412 return;
2413 }
2414 link_info->req_link_speed = link_info->force_link_speed;
2415 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2416 if (link_info->force_pam4_link_speed) {
2417 link_info->req_link_speed = link_info->force_pam4_link_speed;
2418 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2419 }
2420 }
2421
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2422 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2423 {
2424 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2425
2426 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2427 link_info->advertising = link_info->auto_link_speeds2;
2428 return;
2429 }
2430 link_info->advertising = link_info->auto_link_speeds;
2431 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2432 }
2433
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2434 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2435 {
2436 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2437
2438 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2439 if (link_info->req_link_speed != link_info->force_link_speed2)
2440 return true;
2441 return false;
2442 }
2443 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2444 link_info->req_link_speed != link_info->force_link_speed)
2445 return true;
2446 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2447 link_info->req_link_speed != link_info->force_pam4_link_speed)
2448 return true;
2449 return false;
2450 }
2451
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2452 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2453 {
2454 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2455
2456 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2457 if (link_info->advertising != link_info->auto_link_speeds2)
2458 return true;
2459 return false;
2460 }
2461 if (link_info->advertising != link_info->auto_link_speeds ||
2462 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2463 return true;
2464 return false;
2465 }
2466
2467 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2468 ((data2) & \
2469 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2470
2471 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2472 (((data2) & \
2473 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2474 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2475
2476 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2477 ((data1) & \
2478 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2479
2480 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2481 (((data1) & \
2482 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2483 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2484
2485 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2486 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2487 {
2488 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2489
2490 switch (err_type) {
2491 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2492 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2493 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2494 break;
2495 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2496 netdev_warn(bp->dev, "Pause Storm detected!\n");
2497 break;
2498 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2499 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2500 break;
2501 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2502 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2503 char *threshold_type;
2504 bool notify = false;
2505 char *dir_str;
2506
2507 switch (type) {
2508 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2509 threshold_type = "warning";
2510 break;
2511 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2512 threshold_type = "critical";
2513 break;
2514 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2515 threshold_type = "fatal";
2516 break;
2517 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2518 threshold_type = "shutdown";
2519 break;
2520 default:
2521 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2522 return false;
2523 }
2524 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2525 dir_str = "above";
2526 notify = true;
2527 } else {
2528 dir_str = "below";
2529 }
2530 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2531 dir_str, threshold_type);
2532 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2533 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2534 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2535 if (notify) {
2536 bp->thermal_threshold_type = type;
2537 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2538 return true;
2539 }
2540 return false;
2541 }
2542 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2543 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2544 break;
2545 default:
2546 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2547 err_type);
2548 break;
2549 }
2550 return false;
2551 }
2552
2553 #define BNXT_GET_EVENT_PORT(data) \
2554 ((data) & \
2555 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2556
2557 #define BNXT_EVENT_RING_TYPE(data2) \
2558 ((data2) & \
2559 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2560
2561 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2562 (BNXT_EVENT_RING_TYPE(data2) == \
2563 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2564
2565 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2566 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2567 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2568
2569 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2570 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2571 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2572
2573 #define BNXT_PHC_BITS 48
2574
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2575 static int bnxt_async_event_process(struct bnxt *bp,
2576 struct hwrm_async_event_cmpl *cmpl)
2577 {
2578 u16 event_id = le16_to_cpu(cmpl->event_id);
2579 u32 data1 = le32_to_cpu(cmpl->event_data1);
2580 u32 data2 = le32_to_cpu(cmpl->event_data2);
2581
2582 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2583 event_id, data1, data2);
2584
2585 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2586 switch (event_id) {
2587 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2588 struct bnxt_link_info *link_info = &bp->link_info;
2589
2590 if (BNXT_VF(bp))
2591 goto async_event_process_exit;
2592
2593 /* print unsupported speed warning in forced speed mode only */
2594 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2595 (data1 & 0x20000)) {
2596 u16 fw_speed = bnxt_get_force_speed(link_info);
2597 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2598
2599 if (speed != SPEED_UNKNOWN)
2600 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2601 speed);
2602 }
2603 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2604 }
2605 fallthrough;
2606 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2607 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2608 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2609 fallthrough;
2610 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2611 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2612 break;
2613 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2614 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2615 break;
2616 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2617 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2618
2619 if (BNXT_VF(bp))
2620 break;
2621
2622 if (bp->pf.port_id != port_id)
2623 break;
2624
2625 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2626 break;
2627 }
2628 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2629 if (BNXT_PF(bp))
2630 goto async_event_process_exit;
2631 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2632 break;
2633 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2634 char *type_str = "Solicited";
2635
2636 if (!bp->fw_health)
2637 goto async_event_process_exit;
2638
2639 bp->fw_reset_timestamp = jiffies;
2640 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2641 if (!bp->fw_reset_min_dsecs)
2642 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2643 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2644 if (!bp->fw_reset_max_dsecs)
2645 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2646 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2647 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2648 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2649 type_str = "Fatal";
2650 bp->fw_health->fatalities++;
2651 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2652 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2653 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2654 type_str = "Non-fatal";
2655 bp->fw_health->survivals++;
2656 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2657 }
2658 netif_warn(bp, hw, bp->dev,
2659 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2660 type_str, data1, data2,
2661 bp->fw_reset_min_dsecs * 100,
2662 bp->fw_reset_max_dsecs * 100);
2663 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2664 break;
2665 }
2666 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2667 struct bnxt_fw_health *fw_health = bp->fw_health;
2668 char *status_desc = "healthy";
2669 u32 status;
2670
2671 if (!fw_health)
2672 goto async_event_process_exit;
2673
2674 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2675 fw_health->enabled = false;
2676 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2677 break;
2678 }
2679 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2680 fw_health->tmr_multiplier =
2681 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2682 bp->current_interval * 10);
2683 fw_health->tmr_counter = fw_health->tmr_multiplier;
2684 if (!fw_health->enabled)
2685 fw_health->last_fw_heartbeat =
2686 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2687 fw_health->last_fw_reset_cnt =
2688 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2689 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2690 if (status != BNXT_FW_STATUS_HEALTHY)
2691 status_desc = "unhealthy";
2692 netif_info(bp, drv, bp->dev,
2693 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2694 fw_health->primary ? "primary" : "backup", status,
2695 status_desc, fw_health->last_fw_reset_cnt);
2696 if (!fw_health->enabled) {
2697 /* Make sure tmr_counter is set and visible to
2698 * bnxt_health_check() before setting enabled to true.
2699 */
2700 smp_wmb();
2701 fw_health->enabled = true;
2702 }
2703 goto async_event_process_exit;
2704 }
2705 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2706 netif_notice(bp, hw, bp->dev,
2707 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2708 data1, data2);
2709 goto async_event_process_exit;
2710 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2711 struct bnxt_rx_ring_info *rxr;
2712 u16 grp_idx;
2713
2714 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2715 goto async_event_process_exit;
2716
2717 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2718 BNXT_EVENT_RING_TYPE(data2), data1);
2719 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2720 goto async_event_process_exit;
2721
2722 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2723 if (grp_idx == INVALID_HW_RING_ID) {
2724 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2725 data1);
2726 goto async_event_process_exit;
2727 }
2728 rxr = bp->bnapi[grp_idx]->rx_ring;
2729 bnxt_sched_reset_rxr(bp, rxr);
2730 goto async_event_process_exit;
2731 }
2732 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2733 struct bnxt_fw_health *fw_health = bp->fw_health;
2734
2735 netif_notice(bp, hw, bp->dev,
2736 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2737 data1, data2);
2738 if (fw_health) {
2739 fw_health->echo_req_data1 = data1;
2740 fw_health->echo_req_data2 = data2;
2741 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2742 break;
2743 }
2744 goto async_event_process_exit;
2745 }
2746 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2747 bnxt_ptp_pps_event(bp, data1, data2);
2748 goto async_event_process_exit;
2749 }
2750 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2751 if (bnxt_event_error_report(bp, data1, data2))
2752 break;
2753 goto async_event_process_exit;
2754 }
2755 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2756 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2757 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2758 if (BNXT_PTP_USE_RTC(bp)) {
2759 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2760 u64 ns;
2761
2762 if (!ptp)
2763 goto async_event_process_exit;
2764
2765 spin_lock_bh(&ptp->ptp_lock);
2766 bnxt_ptp_update_current_time(bp);
2767 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2768 BNXT_PHC_BITS) | ptp->current_time);
2769 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2770 spin_unlock_bh(&ptp->ptp_lock);
2771 }
2772 break;
2773 }
2774 goto async_event_process_exit;
2775 }
2776 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2777 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2778
2779 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2780 goto async_event_process_exit;
2781 }
2782 default:
2783 goto async_event_process_exit;
2784 }
2785 __bnxt_queue_sp_work(bp);
2786 async_event_process_exit:
2787 return 0;
2788 }
2789
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2790 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2791 {
2792 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2793 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2794 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2795 (struct hwrm_fwd_req_cmpl *)txcmp;
2796
2797 switch (cmpl_type) {
2798 case CMPL_BASE_TYPE_HWRM_DONE:
2799 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2800 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2801 break;
2802
2803 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2804 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2805
2806 if ((vf_id < bp->pf.first_vf_id) ||
2807 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2808 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2809 vf_id);
2810 return -EINVAL;
2811 }
2812
2813 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2814 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2815 break;
2816
2817 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2818 bnxt_async_event_process(bp,
2819 (struct hwrm_async_event_cmpl *)txcmp);
2820 break;
2821
2822 default:
2823 break;
2824 }
2825
2826 return 0;
2827 }
2828
bnxt_msix(int irq,void * dev_instance)2829 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2830 {
2831 struct bnxt_napi *bnapi = dev_instance;
2832 struct bnxt *bp = bnapi->bp;
2833 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2834 u32 cons = RING_CMP(cpr->cp_raw_cons);
2835
2836 cpr->event_ctr++;
2837 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2838 napi_schedule(&bnapi->napi);
2839 return IRQ_HANDLED;
2840 }
2841
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2842 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2843 {
2844 u32 raw_cons = cpr->cp_raw_cons;
2845 u16 cons = RING_CMP(raw_cons);
2846 struct tx_cmp *txcmp;
2847
2848 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2849
2850 return TX_CMP_VALID(txcmp, raw_cons);
2851 }
2852
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2853 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2854 int budget)
2855 {
2856 struct bnxt_napi *bnapi = cpr->bnapi;
2857 u32 raw_cons = cpr->cp_raw_cons;
2858 u32 cons;
2859 int rx_pkts = 0;
2860 u8 event = 0;
2861 struct tx_cmp *txcmp;
2862
2863 cpr->has_more_work = 0;
2864 cpr->had_work_done = 1;
2865 while (1) {
2866 u8 cmp_type;
2867 int rc;
2868
2869 cons = RING_CMP(raw_cons);
2870 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2871
2872 if (!TX_CMP_VALID(txcmp, raw_cons))
2873 break;
2874
2875 /* The valid test of the entry must be done first before
2876 * reading any further.
2877 */
2878 dma_rmb();
2879 cmp_type = TX_CMP_TYPE(txcmp);
2880 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2881 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
2882 u32 opaque = txcmp->tx_cmp_opaque;
2883 struct bnxt_tx_ring_info *txr;
2884 u16 tx_freed;
2885
2886 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
2887 event |= BNXT_TX_CMP_EVENT;
2888 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
2889 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
2890 else
2891 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2892 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
2893 bp->tx_ring_mask;
2894 /* return full budget so NAPI will complete. */
2895 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
2896 rx_pkts = budget;
2897 raw_cons = NEXT_RAW_CMP(raw_cons);
2898 if (budget)
2899 cpr->has_more_work = 1;
2900 break;
2901 }
2902 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
2903 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
2904 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
2905 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2906 if (likely(budget))
2907 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2908 else
2909 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2910 &event);
2911 if (likely(rc >= 0))
2912 rx_pkts += rc;
2913 /* Increment rx_pkts when rc is -ENOMEM to count towards
2914 * the NAPI budget. Otherwise, we may potentially loop
2915 * here forever if we consistently cannot allocate
2916 * buffers.
2917 */
2918 else if (rc == -ENOMEM && budget)
2919 rx_pkts++;
2920 else if (rc == -EBUSY) /* partial completion */
2921 break;
2922 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
2923 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
2924 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
2925 bnxt_hwrm_handler(bp, txcmp);
2926 }
2927 raw_cons = NEXT_RAW_CMP(raw_cons);
2928
2929 if (rx_pkts && rx_pkts == budget) {
2930 cpr->has_more_work = 1;
2931 break;
2932 }
2933 }
2934
2935 if (event & BNXT_REDIRECT_EVENT) {
2936 xdp_do_flush();
2937 event &= ~BNXT_REDIRECT_EVENT;
2938 }
2939
2940 if (event & BNXT_TX_EVENT) {
2941 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
2942 u16 prod = txr->tx_prod;
2943
2944 /* Sync BD data before updating doorbell */
2945 wmb();
2946
2947 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2948 event &= ~BNXT_TX_EVENT;
2949 }
2950
2951 cpr->cp_raw_cons = raw_cons;
2952 bnapi->events |= event;
2953 return rx_pkts;
2954 }
2955
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)2956 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2957 int budget)
2958 {
2959 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
2960 bnapi->tx_int(bp, bnapi, budget);
2961
2962 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2963 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2964
2965 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2966 bnapi->events &= ~BNXT_RX_EVENT;
2967 }
2968 if (bnapi->events & BNXT_AGG_EVENT) {
2969 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2970
2971 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2972 bnapi->events &= ~BNXT_AGG_EVENT;
2973 }
2974 }
2975
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2976 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2977 int budget)
2978 {
2979 struct bnxt_napi *bnapi = cpr->bnapi;
2980 int rx_pkts;
2981
2982 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2983
2984 /* ACK completion ring before freeing tx ring and producing new
2985 * buffers in rx/agg rings to prevent overflowing the completion
2986 * ring.
2987 */
2988 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2989
2990 __bnxt_poll_work_done(bp, bnapi, budget);
2991 return rx_pkts;
2992 }
2993
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)2994 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2995 {
2996 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2997 struct bnxt *bp = bnapi->bp;
2998 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2999 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3000 struct tx_cmp *txcmp;
3001 struct rx_cmp_ext *rxcmp1;
3002 u32 cp_cons, tmp_raw_cons;
3003 u32 raw_cons = cpr->cp_raw_cons;
3004 bool flush_xdp = false;
3005 u32 rx_pkts = 0;
3006 u8 event = 0;
3007
3008 while (1) {
3009 int rc;
3010
3011 cp_cons = RING_CMP(raw_cons);
3012 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3013
3014 if (!TX_CMP_VALID(txcmp, raw_cons))
3015 break;
3016
3017 /* The valid test of the entry must be done first before
3018 * reading any further.
3019 */
3020 dma_rmb();
3021 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3022 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3023 cp_cons = RING_CMP(tmp_raw_cons);
3024 rxcmp1 = (struct rx_cmp_ext *)
3025 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3026
3027 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3028 break;
3029
3030 /* force an error to recycle the buffer */
3031 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3032 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3033
3034 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3035 if (likely(rc == -EIO) && budget)
3036 rx_pkts++;
3037 else if (rc == -EBUSY) /* partial completion */
3038 break;
3039 if (event & BNXT_REDIRECT_EVENT)
3040 flush_xdp = true;
3041 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3042 CMPL_BASE_TYPE_HWRM_DONE)) {
3043 bnxt_hwrm_handler(bp, txcmp);
3044 } else {
3045 netdev_err(bp->dev,
3046 "Invalid completion received on special ring\n");
3047 }
3048 raw_cons = NEXT_RAW_CMP(raw_cons);
3049
3050 if (rx_pkts == budget)
3051 break;
3052 }
3053
3054 cpr->cp_raw_cons = raw_cons;
3055 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3056 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3057
3058 if (event & BNXT_AGG_EVENT)
3059 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3060 if (flush_xdp)
3061 xdp_do_flush();
3062
3063 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3064 napi_complete_done(napi, rx_pkts);
3065 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3066 }
3067 return rx_pkts;
3068 }
3069
bnxt_poll(struct napi_struct * napi,int budget)3070 static int bnxt_poll(struct napi_struct *napi, int budget)
3071 {
3072 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3073 struct bnxt *bp = bnapi->bp;
3074 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3075 int work_done = 0;
3076
3077 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3078 napi_complete(napi);
3079 return 0;
3080 }
3081 while (1) {
3082 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3083
3084 if (work_done >= budget) {
3085 if (!budget)
3086 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3087 break;
3088 }
3089
3090 if (!bnxt_has_work(bp, cpr)) {
3091 if (napi_complete_done(napi, work_done))
3092 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3093 break;
3094 }
3095 }
3096 if (bp->flags & BNXT_FLAG_DIM) {
3097 struct dim_sample dim_sample = {};
3098
3099 dim_update_sample(cpr->event_ctr,
3100 cpr->rx_packets,
3101 cpr->rx_bytes,
3102 &dim_sample);
3103 net_dim(&cpr->dim, dim_sample);
3104 }
3105 return work_done;
3106 }
3107
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3108 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3109 {
3110 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3111 int i, work_done = 0;
3112
3113 for (i = 0; i < cpr->cp_ring_count; i++) {
3114 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3115
3116 if (cpr2->had_nqe_notify) {
3117 work_done += __bnxt_poll_work(bp, cpr2,
3118 budget - work_done);
3119 cpr->has_more_work |= cpr2->has_more_work;
3120 }
3121 }
3122 return work_done;
3123 }
3124
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3125 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3126 u64 dbr_type, int budget)
3127 {
3128 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3129 int i;
3130
3131 for (i = 0; i < cpr->cp_ring_count; i++) {
3132 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3133 struct bnxt_db_info *db;
3134
3135 if (cpr2->had_work_done) {
3136 u32 tgl = 0;
3137
3138 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3139 cpr2->had_nqe_notify = 0;
3140 tgl = cpr2->toggle;
3141 }
3142 db = &cpr2->cp_db;
3143 bnxt_writeq(bp,
3144 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3145 DB_RING_IDX(db, cpr2->cp_raw_cons),
3146 db->doorbell);
3147 cpr2->had_work_done = 0;
3148 }
3149 }
3150 __bnxt_poll_work_done(bp, bnapi, budget);
3151 }
3152
bnxt_poll_p5(struct napi_struct * napi,int budget)3153 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3154 {
3155 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3156 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3157 struct bnxt_cp_ring_info *cpr_rx;
3158 u32 raw_cons = cpr->cp_raw_cons;
3159 struct bnxt *bp = bnapi->bp;
3160 struct nqe_cn *nqcmp;
3161 int work_done = 0;
3162 u32 cons;
3163
3164 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3165 napi_complete(napi);
3166 return 0;
3167 }
3168 if (cpr->has_more_work) {
3169 cpr->has_more_work = 0;
3170 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3171 }
3172 while (1) {
3173 u16 type;
3174
3175 cons = RING_CMP(raw_cons);
3176 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3177
3178 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3179 if (cpr->has_more_work)
3180 break;
3181
3182 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3183 budget);
3184 cpr->cp_raw_cons = raw_cons;
3185 if (napi_complete_done(napi, work_done))
3186 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3187 cpr->cp_raw_cons);
3188 goto poll_done;
3189 }
3190
3191 /* The valid test of the entry must be done first before
3192 * reading any further.
3193 */
3194 dma_rmb();
3195
3196 type = le16_to_cpu(nqcmp->type);
3197 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3198 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3199 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3200 struct bnxt_cp_ring_info *cpr2;
3201
3202 /* No more budget for RX work */
3203 if (budget && work_done >= budget &&
3204 cq_type == BNXT_NQ_HDL_TYPE_RX)
3205 break;
3206
3207 idx = BNXT_NQ_HDL_IDX(idx);
3208 cpr2 = &cpr->cp_ring_arr[idx];
3209 cpr2->had_nqe_notify = 1;
3210 cpr2->toggle = NQE_CN_TOGGLE(type);
3211 work_done += __bnxt_poll_work(bp, cpr2,
3212 budget - work_done);
3213 cpr->has_more_work |= cpr2->has_more_work;
3214 } else {
3215 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3216 }
3217 raw_cons = NEXT_RAW_CMP(raw_cons);
3218 }
3219 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3220 if (raw_cons != cpr->cp_raw_cons) {
3221 cpr->cp_raw_cons = raw_cons;
3222 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3223 }
3224 poll_done:
3225 cpr_rx = &cpr->cp_ring_arr[0];
3226 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3227 (bp->flags & BNXT_FLAG_DIM)) {
3228 struct dim_sample dim_sample = {};
3229
3230 dim_update_sample(cpr->event_ctr,
3231 cpr_rx->rx_packets,
3232 cpr_rx->rx_bytes,
3233 &dim_sample);
3234 net_dim(&cpr->dim, dim_sample);
3235 }
3236 return work_done;
3237 }
3238
bnxt_free_tx_skbs(struct bnxt * bp)3239 static void bnxt_free_tx_skbs(struct bnxt *bp)
3240 {
3241 int i, max_idx;
3242 struct pci_dev *pdev = bp->pdev;
3243
3244 if (!bp->tx_ring)
3245 return;
3246
3247 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3248 for (i = 0; i < bp->tx_nr_rings; i++) {
3249 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3250 int j;
3251
3252 if (!txr->tx_buf_ring)
3253 continue;
3254
3255 for (j = 0; j < max_idx;) {
3256 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
3257 struct sk_buff *skb;
3258 int k, last;
3259
3260 if (i < bp->tx_nr_rings_xdp &&
3261 tx_buf->action == XDP_REDIRECT) {
3262 dma_unmap_single(&pdev->dev,
3263 dma_unmap_addr(tx_buf, mapping),
3264 dma_unmap_len(tx_buf, len),
3265 DMA_TO_DEVICE);
3266 xdp_return_frame(tx_buf->xdpf);
3267 tx_buf->action = 0;
3268 tx_buf->xdpf = NULL;
3269 j++;
3270 continue;
3271 }
3272
3273 skb = tx_buf->skb;
3274 if (!skb) {
3275 j++;
3276 continue;
3277 }
3278
3279 tx_buf->skb = NULL;
3280
3281 if (tx_buf->is_push) {
3282 dev_kfree_skb(skb);
3283 j += 2;
3284 continue;
3285 }
3286
3287 dma_unmap_single(&pdev->dev,
3288 dma_unmap_addr(tx_buf, mapping),
3289 skb_headlen(skb),
3290 DMA_TO_DEVICE);
3291
3292 last = tx_buf->nr_frags;
3293 j += 2;
3294 for (k = 0; k < last; k++, j++) {
3295 int ring_idx = j & bp->tx_ring_mask;
3296 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
3297
3298 tx_buf = &txr->tx_buf_ring[ring_idx];
3299 dma_unmap_page(
3300 &pdev->dev,
3301 dma_unmap_addr(tx_buf, mapping),
3302 skb_frag_size(frag), DMA_TO_DEVICE);
3303 }
3304 dev_kfree_skb(skb);
3305 }
3306 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
3307 }
3308 }
3309
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3310 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3311 {
3312 struct pci_dev *pdev = bp->pdev;
3313 int i, max_idx;
3314
3315 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3316
3317 for (i = 0; i < max_idx; i++) {
3318 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3319 dma_addr_t mapping = rx_buf->mapping;
3320 void *data = rx_buf->data;
3321
3322 if (!data)
3323 continue;
3324
3325 rx_buf->data = NULL;
3326 if (BNXT_RX_PAGE_MODE(bp)) {
3327 page_pool_recycle_direct(rxr->page_pool, data);
3328 } else {
3329 dma_unmap_single_attrs(&pdev->dev, mapping,
3330 bp->rx_buf_use_size, bp->rx_dir,
3331 DMA_ATTR_WEAK_ORDERING);
3332 skb_free_frag(data);
3333 }
3334 }
3335 }
3336
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3337 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3338 {
3339 int i, max_idx;
3340
3341 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3342
3343 for (i = 0; i < max_idx; i++) {
3344 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3345 struct page *page = rx_agg_buf->page;
3346
3347 if (!page)
3348 continue;
3349
3350 rx_agg_buf->page = NULL;
3351 __clear_bit(i, rxr->rx_agg_bmap);
3352
3353 page_pool_recycle_direct(rxr->page_pool, page);
3354 }
3355 }
3356
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,int ring_nr)3357 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
3358 {
3359 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3360 struct pci_dev *pdev = bp->pdev;
3361 struct bnxt_tpa_idx_map *map;
3362 int i;
3363
3364 if (!rxr->rx_tpa)
3365 goto skip_rx_tpa_free;
3366
3367 for (i = 0; i < bp->max_tpa; i++) {
3368 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3369 u8 *data = tpa_info->data;
3370
3371 if (!data)
3372 continue;
3373
3374 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
3375 bp->rx_buf_use_size, bp->rx_dir,
3376 DMA_ATTR_WEAK_ORDERING);
3377
3378 tpa_info->data = NULL;
3379
3380 skb_free_frag(data);
3381 }
3382
3383 skip_rx_tpa_free:
3384 if (!rxr->rx_buf_ring)
3385 goto skip_rx_buf_free;
3386
3387 bnxt_free_one_rx_ring(bp, rxr);
3388
3389 skip_rx_buf_free:
3390 if (!rxr->rx_agg_ring)
3391 goto skip_rx_agg_free;
3392
3393 bnxt_free_one_rx_agg_ring(bp, rxr);
3394
3395 skip_rx_agg_free:
3396 map = rxr->rx_tpa_idx_map;
3397 if (map)
3398 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3399 }
3400
bnxt_free_rx_skbs(struct bnxt * bp)3401 static void bnxt_free_rx_skbs(struct bnxt *bp)
3402 {
3403 int i;
3404
3405 if (!bp->rx_ring)
3406 return;
3407
3408 for (i = 0; i < bp->rx_nr_rings; i++)
3409 bnxt_free_one_rx_ring_skbs(bp, i);
3410 }
3411
bnxt_free_skbs(struct bnxt * bp)3412 static void bnxt_free_skbs(struct bnxt *bp)
3413 {
3414 bnxt_free_tx_skbs(bp);
3415 bnxt_free_rx_skbs(bp);
3416 }
3417
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3418 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3419 {
3420 u8 init_val = ctxm->init_value;
3421 u16 offset = ctxm->init_offset;
3422 u8 *p2 = p;
3423 int i;
3424
3425 if (!init_val)
3426 return;
3427 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3428 memset(p, init_val, len);
3429 return;
3430 }
3431 for (i = 0; i < len; i += ctxm->entry_size)
3432 *(p2 + i + offset) = init_val;
3433 }
3434
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3435 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3436 {
3437 struct pci_dev *pdev = bp->pdev;
3438 int i;
3439
3440 if (!rmem->pg_arr)
3441 goto skip_pages;
3442
3443 for (i = 0; i < rmem->nr_pages; i++) {
3444 if (!rmem->pg_arr[i])
3445 continue;
3446
3447 dma_free_coherent(&pdev->dev, rmem->page_size,
3448 rmem->pg_arr[i], rmem->dma_arr[i]);
3449
3450 rmem->pg_arr[i] = NULL;
3451 }
3452 skip_pages:
3453 if (rmem->pg_tbl) {
3454 size_t pg_tbl_size = rmem->nr_pages * 8;
3455
3456 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3457 pg_tbl_size = rmem->page_size;
3458 dma_free_coherent(&pdev->dev, pg_tbl_size,
3459 rmem->pg_tbl, rmem->pg_tbl_map);
3460 rmem->pg_tbl = NULL;
3461 }
3462 if (rmem->vmem_size && *rmem->vmem) {
3463 vfree(*rmem->vmem);
3464 *rmem->vmem = NULL;
3465 }
3466 }
3467
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3468 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3469 {
3470 struct pci_dev *pdev = bp->pdev;
3471 u64 valid_bit = 0;
3472 int i;
3473
3474 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3475 valid_bit = PTU_PTE_VALID;
3476 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3477 size_t pg_tbl_size = rmem->nr_pages * 8;
3478
3479 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3480 pg_tbl_size = rmem->page_size;
3481 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3482 &rmem->pg_tbl_map,
3483 GFP_KERNEL);
3484 if (!rmem->pg_tbl)
3485 return -ENOMEM;
3486 }
3487
3488 for (i = 0; i < rmem->nr_pages; i++) {
3489 u64 extra_bits = valid_bit;
3490
3491 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3492 rmem->page_size,
3493 &rmem->dma_arr[i],
3494 GFP_KERNEL);
3495 if (!rmem->pg_arr[i])
3496 return -ENOMEM;
3497
3498 if (rmem->ctx_mem)
3499 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3500 rmem->page_size);
3501 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3502 if (i == rmem->nr_pages - 2 &&
3503 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3504 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3505 else if (i == rmem->nr_pages - 1 &&
3506 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3507 extra_bits |= PTU_PTE_LAST;
3508 rmem->pg_tbl[i] =
3509 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3510 }
3511 }
3512
3513 if (rmem->vmem_size) {
3514 *rmem->vmem = vzalloc(rmem->vmem_size);
3515 if (!(*rmem->vmem))
3516 return -ENOMEM;
3517 }
3518 return 0;
3519 }
3520
bnxt_free_tpa_info(struct bnxt * bp)3521 static void bnxt_free_tpa_info(struct bnxt *bp)
3522 {
3523 int i, j;
3524
3525 for (i = 0; i < bp->rx_nr_rings; i++) {
3526 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3527
3528 kfree(rxr->rx_tpa_idx_map);
3529 rxr->rx_tpa_idx_map = NULL;
3530 if (rxr->rx_tpa) {
3531 for (j = 0; j < bp->max_tpa; j++) {
3532 kfree(rxr->rx_tpa[j].agg_arr);
3533 rxr->rx_tpa[j].agg_arr = NULL;
3534 }
3535 }
3536 kfree(rxr->rx_tpa);
3537 rxr->rx_tpa = NULL;
3538 }
3539 }
3540
bnxt_alloc_tpa_info(struct bnxt * bp)3541 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3542 {
3543 int i, j;
3544
3545 bp->max_tpa = MAX_TPA;
3546 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3547 if (!bp->max_tpa_v2)
3548 return 0;
3549 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3550 }
3551
3552 for (i = 0; i < bp->rx_nr_rings; i++) {
3553 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3554 struct rx_agg_cmp *agg;
3555
3556 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3557 GFP_KERNEL);
3558 if (!rxr->rx_tpa)
3559 return -ENOMEM;
3560
3561 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3562 continue;
3563 for (j = 0; j < bp->max_tpa; j++) {
3564 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3565 if (!agg)
3566 return -ENOMEM;
3567 rxr->rx_tpa[j].agg_arr = agg;
3568 }
3569 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3570 GFP_KERNEL);
3571 if (!rxr->rx_tpa_idx_map)
3572 return -ENOMEM;
3573 }
3574 return 0;
3575 }
3576
bnxt_free_rx_rings(struct bnxt * bp)3577 static void bnxt_free_rx_rings(struct bnxt *bp)
3578 {
3579 int i;
3580
3581 if (!bp->rx_ring)
3582 return;
3583
3584 bnxt_free_tpa_info(bp);
3585 for (i = 0; i < bp->rx_nr_rings; i++) {
3586 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3587 struct bnxt_ring_struct *ring;
3588
3589 if (rxr->xdp_prog)
3590 bpf_prog_put(rxr->xdp_prog);
3591
3592 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3593 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3594
3595 page_pool_destroy(rxr->page_pool);
3596 rxr->page_pool = NULL;
3597
3598 kfree(rxr->rx_agg_bmap);
3599 rxr->rx_agg_bmap = NULL;
3600
3601 ring = &rxr->rx_ring_struct;
3602 bnxt_free_ring(bp, &ring->ring_mem);
3603
3604 ring = &rxr->rx_agg_ring_struct;
3605 bnxt_free_ring(bp, &ring->ring_mem);
3606 }
3607 }
3608
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3609 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3610 struct bnxt_rx_ring_info *rxr,
3611 int numa_node)
3612 {
3613 struct page_pool_params pp = { 0 };
3614
3615 pp.pool_size = bp->rx_agg_ring_size;
3616 if (BNXT_RX_PAGE_MODE(bp))
3617 pp.pool_size += bp->rx_ring_size;
3618 pp.nid = numa_node;
3619 pp.napi = &rxr->bnapi->napi;
3620 pp.netdev = bp->dev;
3621 pp.dev = &bp->pdev->dev;
3622 pp.dma_dir = bp->rx_dir;
3623 pp.max_len = PAGE_SIZE;
3624 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3625
3626 rxr->page_pool = page_pool_create(&pp);
3627 if (IS_ERR(rxr->page_pool)) {
3628 int err = PTR_ERR(rxr->page_pool);
3629
3630 rxr->page_pool = NULL;
3631 return err;
3632 }
3633 return 0;
3634 }
3635
bnxt_alloc_rx_rings(struct bnxt * bp)3636 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3637 {
3638 int numa_node = dev_to_node(&bp->pdev->dev);
3639 int i, rc = 0, agg_rings = 0, cpu;
3640
3641 if (!bp->rx_ring)
3642 return -ENOMEM;
3643
3644 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3645 agg_rings = 1;
3646
3647 for (i = 0; i < bp->rx_nr_rings; i++) {
3648 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3649 struct bnxt_ring_struct *ring;
3650 int cpu_node;
3651
3652 ring = &rxr->rx_ring_struct;
3653
3654 cpu = cpumask_local_spread(i, numa_node);
3655 cpu_node = cpu_to_node(cpu);
3656 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3657 i, cpu_node);
3658 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3659 if (rc)
3660 return rc;
3661
3662 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3663 if (rc < 0)
3664 return rc;
3665
3666 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3667 MEM_TYPE_PAGE_POOL,
3668 rxr->page_pool);
3669 if (rc) {
3670 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3671 return rc;
3672 }
3673
3674 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3675 if (rc)
3676 return rc;
3677
3678 ring->grp_idx = i;
3679 if (agg_rings) {
3680 u16 mem_size;
3681
3682 ring = &rxr->rx_agg_ring_struct;
3683 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3684 if (rc)
3685 return rc;
3686
3687 ring->grp_idx = i;
3688 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3689 mem_size = rxr->rx_agg_bmap_size / 8;
3690 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3691 if (!rxr->rx_agg_bmap)
3692 return -ENOMEM;
3693 }
3694 }
3695 if (bp->flags & BNXT_FLAG_TPA)
3696 rc = bnxt_alloc_tpa_info(bp);
3697 return rc;
3698 }
3699
bnxt_free_tx_rings(struct bnxt * bp)3700 static void bnxt_free_tx_rings(struct bnxt *bp)
3701 {
3702 int i;
3703 struct pci_dev *pdev = bp->pdev;
3704
3705 if (!bp->tx_ring)
3706 return;
3707
3708 for (i = 0; i < bp->tx_nr_rings; i++) {
3709 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3710 struct bnxt_ring_struct *ring;
3711
3712 if (txr->tx_push) {
3713 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3714 txr->tx_push, txr->tx_push_mapping);
3715 txr->tx_push = NULL;
3716 }
3717
3718 ring = &txr->tx_ring_struct;
3719
3720 bnxt_free_ring(bp, &ring->ring_mem);
3721 }
3722 }
3723
3724 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3725 ((tc) * (bp)->tx_nr_rings_per_tc)
3726
3727 #define BNXT_RING_TO_TC_OFF(bp, tx) \
3728 ((tx) % (bp)->tx_nr_rings_per_tc)
3729
3730 #define BNXT_RING_TO_TC(bp, tx) \
3731 ((tx) / (bp)->tx_nr_rings_per_tc)
3732
bnxt_alloc_tx_rings(struct bnxt * bp)3733 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3734 {
3735 int i, j, rc;
3736 struct pci_dev *pdev = bp->pdev;
3737
3738 bp->tx_push_size = 0;
3739 if (bp->tx_push_thresh) {
3740 int push_size;
3741
3742 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3743 bp->tx_push_thresh);
3744
3745 if (push_size > 256) {
3746 push_size = 0;
3747 bp->tx_push_thresh = 0;
3748 }
3749
3750 bp->tx_push_size = push_size;
3751 }
3752
3753 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3754 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3755 struct bnxt_ring_struct *ring;
3756 u8 qidx;
3757
3758 ring = &txr->tx_ring_struct;
3759
3760 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3761 if (rc)
3762 return rc;
3763
3764 ring->grp_idx = txr->bnapi->index;
3765 if (bp->tx_push_size) {
3766 dma_addr_t mapping;
3767
3768 /* One pre-allocated DMA buffer to backup
3769 * TX push operation
3770 */
3771 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3772 bp->tx_push_size,
3773 &txr->tx_push_mapping,
3774 GFP_KERNEL);
3775
3776 if (!txr->tx_push)
3777 return -ENOMEM;
3778
3779 mapping = txr->tx_push_mapping +
3780 sizeof(struct tx_push_bd);
3781 txr->data_mapping = cpu_to_le64(mapping);
3782 }
3783 qidx = bp->tc_to_qidx[j];
3784 ring->queue_id = bp->q_info[qidx].queue_id;
3785 spin_lock_init(&txr->xdp_tx_lock);
3786 if (i < bp->tx_nr_rings_xdp)
3787 continue;
3788 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
3789 j++;
3790 }
3791 return 0;
3792 }
3793
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)3794 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3795 {
3796 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3797
3798 kfree(cpr->cp_desc_ring);
3799 cpr->cp_desc_ring = NULL;
3800 ring->ring_mem.pg_arr = NULL;
3801 kfree(cpr->cp_desc_mapping);
3802 cpr->cp_desc_mapping = NULL;
3803 ring->ring_mem.dma_arr = NULL;
3804 }
3805
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)3806 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3807 {
3808 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3809 if (!cpr->cp_desc_ring)
3810 return -ENOMEM;
3811 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3812 GFP_KERNEL);
3813 if (!cpr->cp_desc_mapping)
3814 return -ENOMEM;
3815 return 0;
3816 }
3817
bnxt_free_all_cp_arrays(struct bnxt * bp)3818 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3819 {
3820 int i;
3821
3822 if (!bp->bnapi)
3823 return;
3824 for (i = 0; i < bp->cp_nr_rings; i++) {
3825 struct bnxt_napi *bnapi = bp->bnapi[i];
3826
3827 if (!bnapi)
3828 continue;
3829 bnxt_free_cp_arrays(&bnapi->cp_ring);
3830 }
3831 }
3832
bnxt_alloc_all_cp_arrays(struct bnxt * bp)3833 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3834 {
3835 int i, n = bp->cp_nr_pages;
3836
3837 for (i = 0; i < bp->cp_nr_rings; i++) {
3838 struct bnxt_napi *bnapi = bp->bnapi[i];
3839 int rc;
3840
3841 if (!bnapi)
3842 continue;
3843 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3844 if (rc)
3845 return rc;
3846 }
3847 return 0;
3848 }
3849
bnxt_free_cp_rings(struct bnxt * bp)3850 static void bnxt_free_cp_rings(struct bnxt *bp)
3851 {
3852 int i;
3853
3854 if (!bp->bnapi)
3855 return;
3856
3857 for (i = 0; i < bp->cp_nr_rings; i++) {
3858 struct bnxt_napi *bnapi = bp->bnapi[i];
3859 struct bnxt_cp_ring_info *cpr;
3860 struct bnxt_ring_struct *ring;
3861 int j;
3862
3863 if (!bnapi)
3864 continue;
3865
3866 cpr = &bnapi->cp_ring;
3867 ring = &cpr->cp_ring_struct;
3868
3869 bnxt_free_ring(bp, &ring->ring_mem);
3870
3871 if (!cpr->cp_ring_arr)
3872 continue;
3873
3874 for (j = 0; j < cpr->cp_ring_count; j++) {
3875 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
3876
3877 ring = &cpr2->cp_ring_struct;
3878 bnxt_free_ring(bp, &ring->ring_mem);
3879 bnxt_free_cp_arrays(cpr2);
3880 }
3881 kfree(cpr->cp_ring_arr);
3882 cpr->cp_ring_arr = NULL;
3883 cpr->cp_ring_count = 0;
3884 }
3885 }
3886
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)3887 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
3888 struct bnxt_cp_ring_info *cpr)
3889 {
3890 struct bnxt_ring_mem_info *rmem;
3891 struct bnxt_ring_struct *ring;
3892 int rc;
3893
3894 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3895 if (rc) {
3896 bnxt_free_cp_arrays(cpr);
3897 return -ENOMEM;
3898 }
3899 ring = &cpr->cp_ring_struct;
3900 rmem = &ring->ring_mem;
3901 rmem->nr_pages = bp->cp_nr_pages;
3902 rmem->page_size = HW_CMPD_RING_SIZE;
3903 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3904 rmem->dma_arr = cpr->cp_desc_mapping;
3905 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3906 rc = bnxt_alloc_ring(bp, rmem);
3907 if (rc) {
3908 bnxt_free_ring(bp, rmem);
3909 bnxt_free_cp_arrays(cpr);
3910 }
3911 return rc;
3912 }
3913
bnxt_alloc_cp_rings(struct bnxt * bp)3914 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3915 {
3916 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3917 int i, j, rc, ulp_msix;
3918 int tcs = bp->num_tc;
3919
3920 if (!tcs)
3921 tcs = 1;
3922 ulp_msix = bnxt_get_ulp_msix_num(bp);
3923 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
3924 struct bnxt_napi *bnapi = bp->bnapi[i];
3925 struct bnxt_cp_ring_info *cpr, *cpr2;
3926 struct bnxt_ring_struct *ring;
3927 int cp_count = 0, k;
3928 int rx = 0, tx = 0;
3929
3930 if (!bnapi)
3931 continue;
3932
3933 cpr = &bnapi->cp_ring;
3934 cpr->bnapi = bnapi;
3935 ring = &cpr->cp_ring_struct;
3936
3937 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3938 if (rc)
3939 return rc;
3940
3941 ring->map_idx = ulp_msix + i;
3942
3943 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3944 continue;
3945
3946 if (i < bp->rx_nr_rings) {
3947 cp_count++;
3948 rx = 1;
3949 }
3950 if (i < bp->tx_nr_rings_xdp) {
3951 cp_count++;
3952 tx = 1;
3953 } else if ((sh && i < bp->tx_nr_rings) ||
3954 (!sh && i >= bp->rx_nr_rings)) {
3955 cp_count += tcs;
3956 tx = 1;
3957 }
3958
3959 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
3960 GFP_KERNEL);
3961 if (!cpr->cp_ring_arr)
3962 return -ENOMEM;
3963 cpr->cp_ring_count = cp_count;
3964
3965 for (k = 0; k < cp_count; k++) {
3966 cpr2 = &cpr->cp_ring_arr[k];
3967 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
3968 if (rc)
3969 return rc;
3970 cpr2->bnapi = bnapi;
3971 cpr2->sw_stats = cpr->sw_stats;
3972 cpr2->cp_idx = k;
3973 if (!k && rx) {
3974 bp->rx_ring[i].rx_cpr = cpr2;
3975 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
3976 } else {
3977 int n, tc = k - rx;
3978
3979 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
3980 bp->tx_ring[n].tx_cpr = cpr2;
3981 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
3982 }
3983 }
3984 if (tx)
3985 j++;
3986 }
3987 return 0;
3988 }
3989
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3990 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
3991 struct bnxt_rx_ring_info *rxr)
3992 {
3993 struct bnxt_ring_mem_info *rmem;
3994 struct bnxt_ring_struct *ring;
3995
3996 ring = &rxr->rx_ring_struct;
3997 rmem = &ring->ring_mem;
3998 rmem->nr_pages = bp->rx_nr_pages;
3999 rmem->page_size = HW_RXBD_RING_SIZE;
4000 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4001 rmem->dma_arr = rxr->rx_desc_mapping;
4002 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4003 rmem->vmem = (void **)&rxr->rx_buf_ring;
4004
4005 ring = &rxr->rx_agg_ring_struct;
4006 rmem = &ring->ring_mem;
4007 rmem->nr_pages = bp->rx_agg_nr_pages;
4008 rmem->page_size = HW_RXBD_RING_SIZE;
4009 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4010 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4011 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4012 rmem->vmem = (void **)&rxr->rx_agg_ring;
4013 }
4014
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4015 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4016 struct bnxt_rx_ring_info *rxr)
4017 {
4018 struct bnxt_ring_mem_info *rmem;
4019 struct bnxt_ring_struct *ring;
4020 int i;
4021
4022 rxr->page_pool->p.napi = NULL;
4023 rxr->page_pool = NULL;
4024 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4025
4026 ring = &rxr->rx_ring_struct;
4027 rmem = &ring->ring_mem;
4028 rmem->pg_tbl = NULL;
4029 rmem->pg_tbl_map = 0;
4030 for (i = 0; i < rmem->nr_pages; i++) {
4031 rmem->pg_arr[i] = NULL;
4032 rmem->dma_arr[i] = 0;
4033 }
4034 *rmem->vmem = NULL;
4035
4036 ring = &rxr->rx_agg_ring_struct;
4037 rmem = &ring->ring_mem;
4038 rmem->pg_tbl = NULL;
4039 rmem->pg_tbl_map = 0;
4040 for (i = 0; i < rmem->nr_pages; i++) {
4041 rmem->pg_arr[i] = NULL;
4042 rmem->dma_arr[i] = 0;
4043 }
4044 *rmem->vmem = NULL;
4045 }
4046
bnxt_init_ring_struct(struct bnxt * bp)4047 static void bnxt_init_ring_struct(struct bnxt *bp)
4048 {
4049 int i, j;
4050
4051 for (i = 0; i < bp->cp_nr_rings; i++) {
4052 struct bnxt_napi *bnapi = bp->bnapi[i];
4053 struct bnxt_ring_mem_info *rmem;
4054 struct bnxt_cp_ring_info *cpr;
4055 struct bnxt_rx_ring_info *rxr;
4056 struct bnxt_tx_ring_info *txr;
4057 struct bnxt_ring_struct *ring;
4058
4059 if (!bnapi)
4060 continue;
4061
4062 cpr = &bnapi->cp_ring;
4063 ring = &cpr->cp_ring_struct;
4064 rmem = &ring->ring_mem;
4065 rmem->nr_pages = bp->cp_nr_pages;
4066 rmem->page_size = HW_CMPD_RING_SIZE;
4067 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4068 rmem->dma_arr = cpr->cp_desc_mapping;
4069 rmem->vmem_size = 0;
4070
4071 rxr = bnapi->rx_ring;
4072 if (!rxr)
4073 goto skip_rx;
4074
4075 ring = &rxr->rx_ring_struct;
4076 rmem = &ring->ring_mem;
4077 rmem->nr_pages = bp->rx_nr_pages;
4078 rmem->page_size = HW_RXBD_RING_SIZE;
4079 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4080 rmem->dma_arr = rxr->rx_desc_mapping;
4081 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4082 rmem->vmem = (void **)&rxr->rx_buf_ring;
4083
4084 ring = &rxr->rx_agg_ring_struct;
4085 rmem = &ring->ring_mem;
4086 rmem->nr_pages = bp->rx_agg_nr_pages;
4087 rmem->page_size = HW_RXBD_RING_SIZE;
4088 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4089 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4090 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4091 rmem->vmem = (void **)&rxr->rx_agg_ring;
4092
4093 skip_rx:
4094 bnxt_for_each_napi_tx(j, bnapi, txr) {
4095 ring = &txr->tx_ring_struct;
4096 rmem = &ring->ring_mem;
4097 rmem->nr_pages = bp->tx_nr_pages;
4098 rmem->page_size = HW_TXBD_RING_SIZE;
4099 rmem->pg_arr = (void **)txr->tx_desc_ring;
4100 rmem->dma_arr = txr->tx_desc_mapping;
4101 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4102 rmem->vmem = (void **)&txr->tx_buf_ring;
4103 }
4104 }
4105 }
4106
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4107 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4108 {
4109 int i;
4110 u32 prod;
4111 struct rx_bd **rx_buf_ring;
4112
4113 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4114 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4115 int j;
4116 struct rx_bd *rxbd;
4117
4118 rxbd = rx_buf_ring[i];
4119 if (!rxbd)
4120 continue;
4121
4122 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4123 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4124 rxbd->rx_bd_opaque = prod;
4125 }
4126 }
4127 }
4128
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4129 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4130 struct bnxt_rx_ring_info *rxr,
4131 int ring_nr)
4132 {
4133 u32 prod;
4134 int i;
4135
4136 prod = rxr->rx_prod;
4137 for (i = 0; i < bp->rx_ring_size; i++) {
4138 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4139 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4140 ring_nr, i, bp->rx_ring_size);
4141 break;
4142 }
4143 prod = NEXT_RX(prod);
4144 }
4145 rxr->rx_prod = prod;
4146 }
4147
bnxt_alloc_one_rx_ring_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4148 static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
4149 struct bnxt_rx_ring_info *rxr,
4150 int ring_nr)
4151 {
4152 u32 prod;
4153 int i;
4154
4155 prod = rxr->rx_agg_prod;
4156 for (i = 0; i < bp->rx_agg_ring_size; i++) {
4157 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
4158 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4159 ring_nr, i, bp->rx_ring_size);
4160 break;
4161 }
4162 prod = NEXT_RX_AGG(prod);
4163 }
4164 rxr->rx_agg_prod = prod;
4165 }
4166
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4167 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4168 {
4169 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4170 int i;
4171
4172 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4173
4174 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4175 return 0;
4176
4177 bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
4178
4179 if (rxr->rx_tpa) {
4180 dma_addr_t mapping;
4181 u8 *data;
4182
4183 for (i = 0; i < bp->max_tpa; i++) {
4184 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
4185 if (!data)
4186 return -ENOMEM;
4187
4188 rxr->rx_tpa[i].data = data;
4189 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4190 rxr->rx_tpa[i].mapping = mapping;
4191 }
4192 }
4193 return 0;
4194 }
4195
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4196 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4197 struct bnxt_rx_ring_info *rxr)
4198 {
4199 struct bnxt_ring_struct *ring;
4200 u32 type;
4201
4202 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4203 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4204
4205 if (NET_IP_ALIGN == 2)
4206 type |= RX_BD_FLAGS_SOP;
4207
4208 ring = &rxr->rx_ring_struct;
4209 bnxt_init_rxbd_pages(ring, type);
4210 ring->fw_ring_id = INVALID_HW_RING_ID;
4211 }
4212
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4213 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4214 struct bnxt_rx_ring_info *rxr)
4215 {
4216 struct bnxt_ring_struct *ring;
4217 u32 type;
4218
4219 ring = &rxr->rx_agg_ring_struct;
4220 ring->fw_ring_id = INVALID_HW_RING_ID;
4221 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4222 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4223 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4224
4225 bnxt_init_rxbd_pages(ring, type);
4226 }
4227 }
4228
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4229 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4230 {
4231 struct bnxt_rx_ring_info *rxr;
4232
4233 rxr = &bp->rx_ring[ring_nr];
4234 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4235
4236 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4237 &rxr->bnapi->napi);
4238
4239 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4240 bpf_prog_add(bp->xdp_prog, 1);
4241 rxr->xdp_prog = bp->xdp_prog;
4242 }
4243
4244 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4245
4246 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4247 }
4248
bnxt_init_cp_rings(struct bnxt * bp)4249 static void bnxt_init_cp_rings(struct bnxt *bp)
4250 {
4251 int i, j;
4252
4253 for (i = 0; i < bp->cp_nr_rings; i++) {
4254 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4255 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4256
4257 ring->fw_ring_id = INVALID_HW_RING_ID;
4258 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4259 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4260 if (!cpr->cp_ring_arr)
4261 continue;
4262 for (j = 0; j < cpr->cp_ring_count; j++) {
4263 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4264
4265 ring = &cpr2->cp_ring_struct;
4266 ring->fw_ring_id = INVALID_HW_RING_ID;
4267 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4268 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4269 }
4270 }
4271 }
4272
bnxt_init_rx_rings(struct bnxt * bp)4273 static int bnxt_init_rx_rings(struct bnxt *bp)
4274 {
4275 int i, rc = 0;
4276
4277 if (BNXT_RX_PAGE_MODE(bp)) {
4278 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4279 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4280 } else {
4281 bp->rx_offset = BNXT_RX_OFFSET;
4282 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4283 }
4284
4285 for (i = 0; i < bp->rx_nr_rings; i++) {
4286 rc = bnxt_init_one_rx_ring(bp, i);
4287 if (rc)
4288 break;
4289 }
4290
4291 return rc;
4292 }
4293
bnxt_init_tx_rings(struct bnxt * bp)4294 static int bnxt_init_tx_rings(struct bnxt *bp)
4295 {
4296 u16 i;
4297
4298 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4299 BNXT_MIN_TX_DESC_CNT);
4300
4301 for (i = 0; i < bp->tx_nr_rings; i++) {
4302 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4303 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4304
4305 ring->fw_ring_id = INVALID_HW_RING_ID;
4306
4307 if (i >= bp->tx_nr_rings_xdp)
4308 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4309 NETDEV_QUEUE_TYPE_TX,
4310 &txr->bnapi->napi);
4311 }
4312
4313 return 0;
4314 }
4315
bnxt_free_ring_grps(struct bnxt * bp)4316 static void bnxt_free_ring_grps(struct bnxt *bp)
4317 {
4318 kfree(bp->grp_info);
4319 bp->grp_info = NULL;
4320 }
4321
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4322 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4323 {
4324 int i;
4325
4326 if (irq_re_init) {
4327 bp->grp_info = kcalloc(bp->cp_nr_rings,
4328 sizeof(struct bnxt_ring_grp_info),
4329 GFP_KERNEL);
4330 if (!bp->grp_info)
4331 return -ENOMEM;
4332 }
4333 for (i = 0; i < bp->cp_nr_rings; i++) {
4334 if (irq_re_init)
4335 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4336 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4337 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4338 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4339 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4340 }
4341 return 0;
4342 }
4343
bnxt_free_vnics(struct bnxt * bp)4344 static void bnxt_free_vnics(struct bnxt *bp)
4345 {
4346 kfree(bp->vnic_info);
4347 bp->vnic_info = NULL;
4348 bp->nr_vnics = 0;
4349 }
4350
bnxt_alloc_vnics(struct bnxt * bp)4351 static int bnxt_alloc_vnics(struct bnxt *bp)
4352 {
4353 int num_vnics = 1;
4354
4355 #ifdef CONFIG_RFS_ACCEL
4356 if (bp->flags & BNXT_FLAG_RFS) {
4357 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4358 num_vnics++;
4359 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4360 num_vnics += bp->rx_nr_rings;
4361 }
4362 #endif
4363
4364 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4365 num_vnics++;
4366
4367 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4368 GFP_KERNEL);
4369 if (!bp->vnic_info)
4370 return -ENOMEM;
4371
4372 bp->nr_vnics = num_vnics;
4373 return 0;
4374 }
4375
bnxt_init_vnics(struct bnxt * bp)4376 static void bnxt_init_vnics(struct bnxt *bp)
4377 {
4378 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4379 int i;
4380
4381 for (i = 0; i < bp->nr_vnics; i++) {
4382 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4383 int j;
4384
4385 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4386 vnic->vnic_id = i;
4387 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4388 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4389
4390 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4391
4392 if (bp->vnic_info[i].rss_hash_key) {
4393 if (i == BNXT_VNIC_DEFAULT) {
4394 u8 *key = (void *)vnic->rss_hash_key;
4395 int k;
4396
4397 if (!bp->rss_hash_key_valid &&
4398 !bp->rss_hash_key_updated) {
4399 get_random_bytes(bp->rss_hash_key,
4400 HW_HASH_KEY_SIZE);
4401 bp->rss_hash_key_updated = true;
4402 }
4403
4404 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4405 HW_HASH_KEY_SIZE);
4406
4407 if (!bp->rss_hash_key_updated)
4408 continue;
4409
4410 bp->rss_hash_key_updated = false;
4411 bp->rss_hash_key_valid = true;
4412
4413 bp->toeplitz_prefix = 0;
4414 for (k = 0; k < 8; k++) {
4415 bp->toeplitz_prefix <<= 8;
4416 bp->toeplitz_prefix |= key[k];
4417 }
4418 } else {
4419 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4420 HW_HASH_KEY_SIZE);
4421 }
4422 }
4423 }
4424 }
4425
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4426 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4427 {
4428 int pages;
4429
4430 pages = ring_size / desc_per_pg;
4431
4432 if (!pages)
4433 return 1;
4434
4435 pages++;
4436
4437 while (pages & (pages - 1))
4438 pages++;
4439
4440 return pages;
4441 }
4442
bnxt_set_tpa_flags(struct bnxt * bp)4443 void bnxt_set_tpa_flags(struct bnxt *bp)
4444 {
4445 bp->flags &= ~BNXT_FLAG_TPA;
4446 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4447 return;
4448 if (bp->dev->features & NETIF_F_LRO)
4449 bp->flags |= BNXT_FLAG_LRO;
4450 else if (bp->dev->features & NETIF_F_GRO_HW)
4451 bp->flags |= BNXT_FLAG_GRO;
4452 }
4453
4454 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4455 * be set on entry.
4456 */
bnxt_set_ring_params(struct bnxt * bp)4457 void bnxt_set_ring_params(struct bnxt *bp)
4458 {
4459 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4460 u32 agg_factor = 0, agg_ring_size = 0;
4461
4462 /* 8 for CRC and VLAN */
4463 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4464
4465 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4466 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4467
4468 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
4469 ring_size = bp->rx_ring_size;
4470 bp->rx_agg_ring_size = 0;
4471 bp->rx_agg_nr_pages = 0;
4472
4473 if (bp->flags & BNXT_FLAG_TPA)
4474 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4475
4476 bp->flags &= ~BNXT_FLAG_JUMBO;
4477 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4478 u32 jumbo_factor;
4479
4480 bp->flags |= BNXT_FLAG_JUMBO;
4481 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4482 if (jumbo_factor > agg_factor)
4483 agg_factor = jumbo_factor;
4484 }
4485 if (agg_factor) {
4486 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4487 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4488 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4489 bp->rx_ring_size, ring_size);
4490 bp->rx_ring_size = ring_size;
4491 }
4492 agg_ring_size = ring_size * agg_factor;
4493
4494 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4495 RX_DESC_CNT);
4496 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4497 u32 tmp = agg_ring_size;
4498
4499 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4500 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4501 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4502 tmp, agg_ring_size);
4503 }
4504 bp->rx_agg_ring_size = agg_ring_size;
4505 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4506
4507 if (BNXT_RX_PAGE_MODE(bp)) {
4508 rx_space = PAGE_SIZE;
4509 rx_size = PAGE_SIZE -
4510 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4511 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4512 } else {
4513 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
4514 rx_space = rx_size + NET_SKB_PAD +
4515 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4516 }
4517 }
4518
4519 bp->rx_buf_use_size = rx_size;
4520 bp->rx_buf_size = rx_space;
4521
4522 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4523 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4524
4525 ring_size = bp->tx_ring_size;
4526 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4527 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4528
4529 max_rx_cmpl = bp->rx_ring_size;
4530 /* MAX TPA needs to be added because TPA_START completions are
4531 * immediately recycled, so the TPA completions are not bound by
4532 * the RX ring size.
4533 */
4534 if (bp->flags & BNXT_FLAG_TPA)
4535 max_rx_cmpl += bp->max_tpa;
4536 /* RX and TPA completions are 32-byte, all others are 16-byte */
4537 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4538 bp->cp_ring_size = ring_size;
4539
4540 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4541 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4542 bp->cp_nr_pages = MAX_CP_PAGES;
4543 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4544 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4545 ring_size, bp->cp_ring_size);
4546 }
4547 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4548 bp->cp_ring_mask = bp->cp_bit - 1;
4549 }
4550
4551 /* Changing allocation mode of RX rings.
4552 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4553 */
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4554 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4555 {
4556 struct net_device *dev = bp->dev;
4557
4558 if (page_mode) {
4559 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4560 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4561
4562 if (bp->xdp_prog->aux->xdp_has_frags)
4563 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4564 else
4565 dev->max_mtu =
4566 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4567 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4568 bp->flags |= BNXT_FLAG_JUMBO;
4569 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4570 } else {
4571 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4572 bp->rx_skb_func = bnxt_rx_page_skb;
4573 }
4574 bp->rx_dir = DMA_BIDIRECTIONAL;
4575 /* Disable LRO or GRO_HW */
4576 netdev_update_features(dev);
4577 } else {
4578 dev->max_mtu = bp->max_mtu;
4579 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4580 bp->rx_dir = DMA_FROM_DEVICE;
4581 bp->rx_skb_func = bnxt_rx_skb;
4582 }
4583 return 0;
4584 }
4585
bnxt_free_vnic_attributes(struct bnxt * bp)4586 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4587 {
4588 int i;
4589 struct bnxt_vnic_info *vnic;
4590 struct pci_dev *pdev = bp->pdev;
4591
4592 if (!bp->vnic_info)
4593 return;
4594
4595 for (i = 0; i < bp->nr_vnics; i++) {
4596 vnic = &bp->vnic_info[i];
4597
4598 kfree(vnic->fw_grp_ids);
4599 vnic->fw_grp_ids = NULL;
4600
4601 kfree(vnic->uc_list);
4602 vnic->uc_list = NULL;
4603
4604 if (vnic->mc_list) {
4605 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4606 vnic->mc_list, vnic->mc_list_mapping);
4607 vnic->mc_list = NULL;
4608 }
4609
4610 if (vnic->rss_table) {
4611 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4612 vnic->rss_table,
4613 vnic->rss_table_dma_addr);
4614 vnic->rss_table = NULL;
4615 }
4616
4617 vnic->rss_hash_key = NULL;
4618 vnic->flags = 0;
4619 }
4620 }
4621
bnxt_alloc_vnic_attributes(struct bnxt * bp)4622 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4623 {
4624 int i, rc = 0, size;
4625 struct bnxt_vnic_info *vnic;
4626 struct pci_dev *pdev = bp->pdev;
4627 int max_rings;
4628
4629 for (i = 0; i < bp->nr_vnics; i++) {
4630 vnic = &bp->vnic_info[i];
4631
4632 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4633 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4634
4635 if (mem_size > 0) {
4636 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4637 if (!vnic->uc_list) {
4638 rc = -ENOMEM;
4639 goto out;
4640 }
4641 }
4642 }
4643
4644 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4645 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4646 vnic->mc_list =
4647 dma_alloc_coherent(&pdev->dev,
4648 vnic->mc_list_size,
4649 &vnic->mc_list_mapping,
4650 GFP_KERNEL);
4651 if (!vnic->mc_list) {
4652 rc = -ENOMEM;
4653 goto out;
4654 }
4655 }
4656
4657 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4658 goto vnic_skip_grps;
4659
4660 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4661 max_rings = bp->rx_nr_rings;
4662 else
4663 max_rings = 1;
4664
4665 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4666 if (!vnic->fw_grp_ids) {
4667 rc = -ENOMEM;
4668 goto out;
4669 }
4670 vnic_skip_grps:
4671 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4672 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4673 continue;
4674
4675 /* Allocate rss table and hash key */
4676 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4677 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4678 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4679
4680 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4681 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4682 vnic->rss_table_size,
4683 &vnic->rss_table_dma_addr,
4684 GFP_KERNEL);
4685 if (!vnic->rss_table) {
4686 rc = -ENOMEM;
4687 goto out;
4688 }
4689
4690 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4691 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4692 }
4693 return 0;
4694
4695 out:
4696 return rc;
4697 }
4698
bnxt_free_hwrm_resources(struct bnxt * bp)4699 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4700 {
4701 struct bnxt_hwrm_wait_token *token;
4702
4703 dma_pool_destroy(bp->hwrm_dma_pool);
4704 bp->hwrm_dma_pool = NULL;
4705
4706 rcu_read_lock();
4707 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4708 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4709 rcu_read_unlock();
4710 }
4711
bnxt_alloc_hwrm_resources(struct bnxt * bp)4712 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4713 {
4714 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4715 BNXT_HWRM_DMA_SIZE,
4716 BNXT_HWRM_DMA_ALIGN, 0);
4717 if (!bp->hwrm_dma_pool)
4718 return -ENOMEM;
4719
4720 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4721
4722 return 0;
4723 }
4724
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)4725 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4726 {
4727 kfree(stats->hw_masks);
4728 stats->hw_masks = NULL;
4729 kfree(stats->sw_stats);
4730 stats->sw_stats = NULL;
4731 if (stats->hw_stats) {
4732 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4733 stats->hw_stats_map);
4734 stats->hw_stats = NULL;
4735 }
4736 }
4737
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)4738 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4739 bool alloc_masks)
4740 {
4741 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4742 &stats->hw_stats_map, GFP_KERNEL);
4743 if (!stats->hw_stats)
4744 return -ENOMEM;
4745
4746 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4747 if (!stats->sw_stats)
4748 goto stats_mem_err;
4749
4750 if (alloc_masks) {
4751 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4752 if (!stats->hw_masks)
4753 goto stats_mem_err;
4754 }
4755 return 0;
4756
4757 stats_mem_err:
4758 bnxt_free_stats_mem(bp, stats);
4759 return -ENOMEM;
4760 }
4761
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)4762 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4763 {
4764 int i;
4765
4766 for (i = 0; i < count; i++)
4767 mask_arr[i] = mask;
4768 }
4769
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)4770 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4771 {
4772 int i;
4773
4774 for (i = 0; i < count; i++)
4775 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4776 }
4777
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)4778 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4779 struct bnxt_stats_mem *stats)
4780 {
4781 struct hwrm_func_qstats_ext_output *resp;
4782 struct hwrm_func_qstats_ext_input *req;
4783 __le64 *hw_masks;
4784 int rc;
4785
4786 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4787 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4788 return -EOPNOTSUPP;
4789
4790 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4791 if (rc)
4792 return rc;
4793
4794 req->fid = cpu_to_le16(0xffff);
4795 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4796
4797 resp = hwrm_req_hold(bp, req);
4798 rc = hwrm_req_send(bp, req);
4799 if (!rc) {
4800 hw_masks = &resp->rx_ucast_pkts;
4801 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4802 }
4803 hwrm_req_drop(bp, req);
4804 return rc;
4805 }
4806
4807 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4808 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4809
bnxt_init_stats(struct bnxt * bp)4810 static void bnxt_init_stats(struct bnxt *bp)
4811 {
4812 struct bnxt_napi *bnapi = bp->bnapi[0];
4813 struct bnxt_cp_ring_info *cpr;
4814 struct bnxt_stats_mem *stats;
4815 __le64 *rx_stats, *tx_stats;
4816 int rc, rx_count, tx_count;
4817 u64 *rx_masks, *tx_masks;
4818 u64 mask;
4819 u8 flags;
4820
4821 cpr = &bnapi->cp_ring;
4822 stats = &cpr->stats;
4823 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4824 if (rc) {
4825 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4826 mask = (1ULL << 48) - 1;
4827 else
4828 mask = -1ULL;
4829 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4830 }
4831 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4832 stats = &bp->port_stats;
4833 rx_stats = stats->hw_stats;
4834 rx_masks = stats->hw_masks;
4835 rx_count = sizeof(struct rx_port_stats) / 8;
4836 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4837 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4838 tx_count = sizeof(struct tx_port_stats) / 8;
4839
4840 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4841 rc = bnxt_hwrm_port_qstats(bp, flags);
4842 if (rc) {
4843 mask = (1ULL << 40) - 1;
4844
4845 bnxt_fill_masks(rx_masks, mask, rx_count);
4846 bnxt_fill_masks(tx_masks, mask, tx_count);
4847 } else {
4848 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4849 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4850 bnxt_hwrm_port_qstats(bp, 0);
4851 }
4852 }
4853 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4854 stats = &bp->rx_port_stats_ext;
4855 rx_stats = stats->hw_stats;
4856 rx_masks = stats->hw_masks;
4857 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4858 stats = &bp->tx_port_stats_ext;
4859 tx_stats = stats->hw_stats;
4860 tx_masks = stats->hw_masks;
4861 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4862
4863 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4864 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4865 if (rc) {
4866 mask = (1ULL << 40) - 1;
4867
4868 bnxt_fill_masks(rx_masks, mask, rx_count);
4869 if (tx_stats)
4870 bnxt_fill_masks(tx_masks, mask, tx_count);
4871 } else {
4872 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4873 if (tx_stats)
4874 bnxt_copy_hw_masks(tx_masks, tx_stats,
4875 tx_count);
4876 bnxt_hwrm_port_qstats_ext(bp, 0);
4877 }
4878 }
4879 }
4880
bnxt_free_port_stats(struct bnxt * bp)4881 static void bnxt_free_port_stats(struct bnxt *bp)
4882 {
4883 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4884 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4885
4886 bnxt_free_stats_mem(bp, &bp->port_stats);
4887 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4888 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4889 }
4890
bnxt_free_ring_stats(struct bnxt * bp)4891 static void bnxt_free_ring_stats(struct bnxt *bp)
4892 {
4893 int i;
4894
4895 if (!bp->bnapi)
4896 return;
4897
4898 for (i = 0; i < bp->cp_nr_rings; i++) {
4899 struct bnxt_napi *bnapi = bp->bnapi[i];
4900 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4901
4902 bnxt_free_stats_mem(bp, &cpr->stats);
4903
4904 kfree(cpr->sw_stats);
4905 cpr->sw_stats = NULL;
4906 }
4907 }
4908
bnxt_alloc_stats(struct bnxt * bp)4909 static int bnxt_alloc_stats(struct bnxt *bp)
4910 {
4911 u32 size, i;
4912 int rc;
4913
4914 size = bp->hw_ring_stats_size;
4915
4916 for (i = 0; i < bp->cp_nr_rings; i++) {
4917 struct bnxt_napi *bnapi = bp->bnapi[i];
4918 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4919
4920 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
4921 if (!cpr->sw_stats)
4922 return -ENOMEM;
4923
4924 cpr->stats.len = size;
4925 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4926 if (rc)
4927 return rc;
4928
4929 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4930 }
4931
4932 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4933 return 0;
4934
4935 if (bp->port_stats.hw_stats)
4936 goto alloc_ext_stats;
4937
4938 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4939 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4940 if (rc)
4941 return rc;
4942
4943 bp->flags |= BNXT_FLAG_PORT_STATS;
4944
4945 alloc_ext_stats:
4946 /* Display extended statistics only if FW supports it */
4947 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4948 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4949 return 0;
4950
4951 if (bp->rx_port_stats_ext.hw_stats)
4952 goto alloc_tx_ext_stats;
4953
4954 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4955 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4956 /* Extended stats are optional */
4957 if (rc)
4958 return 0;
4959
4960 alloc_tx_ext_stats:
4961 if (bp->tx_port_stats_ext.hw_stats)
4962 return 0;
4963
4964 if (bp->hwrm_spec_code >= 0x10902 ||
4965 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4966 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4967 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4968 /* Extended stats are optional */
4969 if (rc)
4970 return 0;
4971 }
4972 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4973 return 0;
4974 }
4975
bnxt_clear_ring_indices(struct bnxt * bp)4976 static void bnxt_clear_ring_indices(struct bnxt *bp)
4977 {
4978 int i, j;
4979
4980 if (!bp->bnapi)
4981 return;
4982
4983 for (i = 0; i < bp->cp_nr_rings; i++) {
4984 struct bnxt_napi *bnapi = bp->bnapi[i];
4985 struct bnxt_cp_ring_info *cpr;
4986 struct bnxt_rx_ring_info *rxr;
4987 struct bnxt_tx_ring_info *txr;
4988
4989 if (!bnapi)
4990 continue;
4991
4992 cpr = &bnapi->cp_ring;
4993 cpr->cp_raw_cons = 0;
4994
4995 bnxt_for_each_napi_tx(j, bnapi, txr) {
4996 txr->tx_prod = 0;
4997 txr->tx_cons = 0;
4998 txr->tx_hw_cons = 0;
4999 }
5000
5001 rxr = bnapi->rx_ring;
5002 if (rxr) {
5003 rxr->rx_prod = 0;
5004 rxr->rx_agg_prod = 0;
5005 rxr->rx_sw_agg_prod = 0;
5006 rxr->rx_next_cons = 0;
5007 }
5008 bnapi->events = 0;
5009 }
5010 }
5011
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5012 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5013 {
5014 u8 type = fltr->type, flags = fltr->flags;
5015
5016 INIT_LIST_HEAD(&fltr->list);
5017 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5018 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5019 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5020 }
5021
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5022 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5023 {
5024 if (!list_empty(&fltr->list))
5025 list_del_init(&fltr->list);
5026 }
5027
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5028 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5029 {
5030 struct bnxt_filter_base *usr_fltr, *tmp;
5031
5032 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5033 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5034 continue;
5035 bnxt_del_one_usr_fltr(bp, usr_fltr);
5036 }
5037 }
5038
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5039 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5040 {
5041 hlist_del(&fltr->hash);
5042 bnxt_del_one_usr_fltr(bp, fltr);
5043 if (fltr->flags) {
5044 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5045 bp->ntp_fltr_count--;
5046 }
5047 kfree(fltr);
5048 }
5049
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5050 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5051 {
5052 int i;
5053
5054 /* Under rtnl_lock and all our NAPIs have been disabled. It's
5055 * safe to delete the hash table.
5056 */
5057 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5058 struct hlist_head *head;
5059 struct hlist_node *tmp;
5060 struct bnxt_ntuple_filter *fltr;
5061
5062 head = &bp->ntp_fltr_hash_tbl[i];
5063 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5064 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5065 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5066 !list_empty(&fltr->base.list)))
5067 continue;
5068 bnxt_del_fltr(bp, &fltr->base);
5069 }
5070 }
5071 if (!all)
5072 return;
5073
5074 bitmap_free(bp->ntp_fltr_bmap);
5075 bp->ntp_fltr_bmap = NULL;
5076 bp->ntp_fltr_count = 0;
5077 }
5078
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5079 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5080 {
5081 int i, rc = 0;
5082
5083 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5084 return 0;
5085
5086 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5087 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5088
5089 bp->ntp_fltr_count = 0;
5090 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5091
5092 if (!bp->ntp_fltr_bmap)
5093 rc = -ENOMEM;
5094
5095 return rc;
5096 }
5097
bnxt_free_l2_filters(struct bnxt * bp,bool all)5098 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5099 {
5100 int i;
5101
5102 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5103 struct hlist_head *head;
5104 struct hlist_node *tmp;
5105 struct bnxt_l2_filter *fltr;
5106
5107 head = &bp->l2_fltr_hash_tbl[i];
5108 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5109 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5110 !list_empty(&fltr->base.list)))
5111 continue;
5112 bnxt_del_fltr(bp, &fltr->base);
5113 }
5114 }
5115 }
5116
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5117 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5118 {
5119 int i;
5120
5121 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5122 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5123 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5124 }
5125
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5126 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5127 {
5128 bnxt_free_vnic_attributes(bp);
5129 bnxt_free_tx_rings(bp);
5130 bnxt_free_rx_rings(bp);
5131 bnxt_free_cp_rings(bp);
5132 bnxt_free_all_cp_arrays(bp);
5133 bnxt_free_ntp_fltrs(bp, false);
5134 bnxt_free_l2_filters(bp, false);
5135 if (irq_re_init) {
5136 bnxt_free_ring_stats(bp);
5137 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5138 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5139 bnxt_free_port_stats(bp);
5140 bnxt_free_ring_grps(bp);
5141 bnxt_free_vnics(bp);
5142 kfree(bp->tx_ring_map);
5143 bp->tx_ring_map = NULL;
5144 kfree(bp->tx_ring);
5145 bp->tx_ring = NULL;
5146 kfree(bp->rx_ring);
5147 bp->rx_ring = NULL;
5148 kfree(bp->bnapi);
5149 bp->bnapi = NULL;
5150 } else {
5151 bnxt_clear_ring_indices(bp);
5152 }
5153 }
5154
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5155 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5156 {
5157 int i, j, rc, size, arr_size;
5158 void *bnapi;
5159
5160 if (irq_re_init) {
5161 /* Allocate bnapi mem pointer array and mem block for
5162 * all queues
5163 */
5164 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5165 bp->cp_nr_rings);
5166 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5167 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5168 if (!bnapi)
5169 return -ENOMEM;
5170
5171 bp->bnapi = bnapi;
5172 bnapi += arr_size;
5173 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5174 bp->bnapi[i] = bnapi;
5175 bp->bnapi[i]->index = i;
5176 bp->bnapi[i]->bp = bp;
5177 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5178 struct bnxt_cp_ring_info *cpr =
5179 &bp->bnapi[i]->cp_ring;
5180
5181 cpr->cp_ring_struct.ring_mem.flags =
5182 BNXT_RMEM_RING_PTE_FLAG;
5183 }
5184 }
5185
5186 bp->rx_ring = kcalloc(bp->rx_nr_rings,
5187 sizeof(struct bnxt_rx_ring_info),
5188 GFP_KERNEL);
5189 if (!bp->rx_ring)
5190 return -ENOMEM;
5191
5192 for (i = 0; i < bp->rx_nr_rings; i++) {
5193 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5194
5195 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5196 rxr->rx_ring_struct.ring_mem.flags =
5197 BNXT_RMEM_RING_PTE_FLAG;
5198 rxr->rx_agg_ring_struct.ring_mem.flags =
5199 BNXT_RMEM_RING_PTE_FLAG;
5200 } else {
5201 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5202 }
5203 rxr->bnapi = bp->bnapi[i];
5204 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5205 }
5206
5207 bp->tx_ring = kcalloc(bp->tx_nr_rings,
5208 sizeof(struct bnxt_tx_ring_info),
5209 GFP_KERNEL);
5210 if (!bp->tx_ring)
5211 return -ENOMEM;
5212
5213 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5214 GFP_KERNEL);
5215
5216 if (!bp->tx_ring_map)
5217 return -ENOMEM;
5218
5219 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5220 j = 0;
5221 else
5222 j = bp->rx_nr_rings;
5223
5224 for (i = 0; i < bp->tx_nr_rings; i++) {
5225 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5226 struct bnxt_napi *bnapi2;
5227
5228 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5229 txr->tx_ring_struct.ring_mem.flags =
5230 BNXT_RMEM_RING_PTE_FLAG;
5231 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5232 if (i >= bp->tx_nr_rings_xdp) {
5233 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5234
5235 bnapi2 = bp->bnapi[k];
5236 txr->txq_index = i - bp->tx_nr_rings_xdp;
5237 txr->tx_napi_idx =
5238 BNXT_RING_TO_TC(bp, txr->txq_index);
5239 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5240 bnapi2->tx_int = bnxt_tx_int;
5241 } else {
5242 bnapi2 = bp->bnapi[j];
5243 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5244 bnapi2->tx_ring[0] = txr;
5245 bnapi2->tx_int = bnxt_tx_int_xdp;
5246 j++;
5247 }
5248 txr->bnapi = bnapi2;
5249 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5250 txr->tx_cpr = &bnapi2->cp_ring;
5251 }
5252
5253 rc = bnxt_alloc_stats(bp);
5254 if (rc)
5255 goto alloc_mem_err;
5256 bnxt_init_stats(bp);
5257
5258 rc = bnxt_alloc_ntp_fltrs(bp);
5259 if (rc)
5260 goto alloc_mem_err;
5261
5262 rc = bnxt_alloc_vnics(bp);
5263 if (rc)
5264 goto alloc_mem_err;
5265 }
5266
5267 rc = bnxt_alloc_all_cp_arrays(bp);
5268 if (rc)
5269 goto alloc_mem_err;
5270
5271 bnxt_init_ring_struct(bp);
5272
5273 rc = bnxt_alloc_rx_rings(bp);
5274 if (rc)
5275 goto alloc_mem_err;
5276
5277 rc = bnxt_alloc_tx_rings(bp);
5278 if (rc)
5279 goto alloc_mem_err;
5280
5281 rc = bnxt_alloc_cp_rings(bp);
5282 if (rc)
5283 goto alloc_mem_err;
5284
5285 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5286 BNXT_VNIC_MCAST_FLAG |
5287 BNXT_VNIC_UCAST_FLAG;
5288 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5289 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5290 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5291
5292 rc = bnxt_alloc_vnic_attributes(bp);
5293 if (rc)
5294 goto alloc_mem_err;
5295 return 0;
5296
5297 alloc_mem_err:
5298 bnxt_free_mem(bp, true);
5299 return rc;
5300 }
5301
bnxt_disable_int(struct bnxt * bp)5302 static void bnxt_disable_int(struct bnxt *bp)
5303 {
5304 int i;
5305
5306 if (!bp->bnapi)
5307 return;
5308
5309 for (i = 0; i < bp->cp_nr_rings; i++) {
5310 struct bnxt_napi *bnapi = bp->bnapi[i];
5311 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5312 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5313
5314 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5315 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5316 }
5317 }
5318
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5319 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5320 {
5321 struct bnxt_napi *bnapi = bp->bnapi[n];
5322 struct bnxt_cp_ring_info *cpr;
5323
5324 cpr = &bnapi->cp_ring;
5325 return cpr->cp_ring_struct.map_idx;
5326 }
5327
bnxt_disable_int_sync(struct bnxt * bp)5328 static void bnxt_disable_int_sync(struct bnxt *bp)
5329 {
5330 int i;
5331
5332 if (!bp->irq_tbl)
5333 return;
5334
5335 atomic_inc(&bp->intr_sem);
5336
5337 bnxt_disable_int(bp);
5338 for (i = 0; i < bp->cp_nr_rings; i++) {
5339 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5340
5341 synchronize_irq(bp->irq_tbl[map_idx].vector);
5342 }
5343 }
5344
bnxt_enable_int(struct bnxt * bp)5345 static void bnxt_enable_int(struct bnxt *bp)
5346 {
5347 int i;
5348
5349 atomic_set(&bp->intr_sem, 0);
5350 for (i = 0; i < bp->cp_nr_rings; i++) {
5351 struct bnxt_napi *bnapi = bp->bnapi[i];
5352 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5353
5354 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5355 }
5356 }
5357
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5358 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5359 bool async_only)
5360 {
5361 DECLARE_BITMAP(async_events_bmap, 256);
5362 u32 *events = (u32 *)async_events_bmap;
5363 struct hwrm_func_drv_rgtr_output *resp;
5364 struct hwrm_func_drv_rgtr_input *req;
5365 u32 flags;
5366 int rc, i;
5367
5368 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5369 if (rc)
5370 return rc;
5371
5372 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5373 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5374 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5375
5376 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5377 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5378 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5379 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5380 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5381 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5382 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5383 req->flags = cpu_to_le32(flags);
5384 req->ver_maj_8b = DRV_VER_MAJ;
5385 req->ver_min_8b = DRV_VER_MIN;
5386 req->ver_upd_8b = DRV_VER_UPD;
5387 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5388 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5389 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5390
5391 if (BNXT_PF(bp)) {
5392 u32 data[8];
5393 int i;
5394
5395 memset(data, 0, sizeof(data));
5396 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5397 u16 cmd = bnxt_vf_req_snif[i];
5398 unsigned int bit, idx;
5399
5400 idx = cmd / 32;
5401 bit = cmd % 32;
5402 data[idx] |= 1 << bit;
5403 }
5404
5405 for (i = 0; i < 8; i++)
5406 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5407
5408 req->enables |=
5409 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5410 }
5411
5412 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5413 req->flags |= cpu_to_le32(
5414 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5415
5416 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5417 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5418 u16 event_id = bnxt_async_events_arr[i];
5419
5420 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5421 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5422 continue;
5423 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5424 !bp->ptp_cfg)
5425 continue;
5426 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5427 }
5428 if (bmap && bmap_size) {
5429 for (i = 0; i < bmap_size; i++) {
5430 if (test_bit(i, bmap))
5431 __set_bit(i, async_events_bmap);
5432 }
5433 }
5434 for (i = 0; i < 8; i++)
5435 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5436
5437 if (async_only)
5438 req->enables =
5439 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5440
5441 resp = hwrm_req_hold(bp, req);
5442 rc = hwrm_req_send(bp, req);
5443 if (!rc) {
5444 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5445 if (resp->flags &
5446 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5447 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5448 }
5449 hwrm_req_drop(bp, req);
5450 return rc;
5451 }
5452
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5453 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5454 {
5455 struct hwrm_func_drv_unrgtr_input *req;
5456 int rc;
5457
5458 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5459 return 0;
5460
5461 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5462 if (rc)
5463 return rc;
5464 return hwrm_req_send(bp, req);
5465 }
5466
5467 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5468
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5469 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5470 {
5471 struct hwrm_tunnel_dst_port_free_input *req;
5472 int rc;
5473
5474 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5475 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5476 return 0;
5477 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5478 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5479 return 0;
5480
5481 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5482 if (rc)
5483 return rc;
5484
5485 req->tunnel_type = tunnel_type;
5486
5487 switch (tunnel_type) {
5488 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5489 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5490 bp->vxlan_port = 0;
5491 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5492 break;
5493 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5494 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5495 bp->nge_port = 0;
5496 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5497 break;
5498 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5499 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5500 bp->vxlan_gpe_port = 0;
5501 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5502 break;
5503 default:
5504 break;
5505 }
5506
5507 rc = hwrm_req_send(bp, req);
5508 if (rc)
5509 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5510 rc);
5511 if (bp->flags & BNXT_FLAG_TPA)
5512 bnxt_set_tpa(bp, true);
5513 return rc;
5514 }
5515
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5516 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5517 u8 tunnel_type)
5518 {
5519 struct hwrm_tunnel_dst_port_alloc_output *resp;
5520 struct hwrm_tunnel_dst_port_alloc_input *req;
5521 int rc;
5522
5523 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5524 if (rc)
5525 return rc;
5526
5527 req->tunnel_type = tunnel_type;
5528 req->tunnel_dst_port_val = port;
5529
5530 resp = hwrm_req_hold(bp, req);
5531 rc = hwrm_req_send(bp, req);
5532 if (rc) {
5533 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5534 rc);
5535 goto err_out;
5536 }
5537
5538 switch (tunnel_type) {
5539 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5540 bp->vxlan_port = port;
5541 bp->vxlan_fw_dst_port_id =
5542 le16_to_cpu(resp->tunnel_dst_port_id);
5543 break;
5544 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5545 bp->nge_port = port;
5546 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5547 break;
5548 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5549 bp->vxlan_gpe_port = port;
5550 bp->vxlan_gpe_fw_dst_port_id =
5551 le16_to_cpu(resp->tunnel_dst_port_id);
5552 break;
5553 default:
5554 break;
5555 }
5556 if (bp->flags & BNXT_FLAG_TPA)
5557 bnxt_set_tpa(bp, true);
5558
5559 err_out:
5560 hwrm_req_drop(bp, req);
5561 return rc;
5562 }
5563
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5564 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5565 {
5566 struct hwrm_cfa_l2_set_rx_mask_input *req;
5567 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5568 int rc;
5569
5570 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5571 if (rc)
5572 return rc;
5573
5574 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5575 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5576 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5577 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5578 }
5579 req->mask = cpu_to_le32(vnic->rx_mask);
5580 return hwrm_req_send_silent(bp, req);
5581 }
5582
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)5583 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5584 {
5585 if (!atomic_dec_and_test(&fltr->refcnt))
5586 return;
5587 spin_lock_bh(&bp->ntp_fltr_lock);
5588 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5589 spin_unlock_bh(&bp->ntp_fltr_lock);
5590 return;
5591 }
5592 hlist_del_rcu(&fltr->base.hash);
5593 bnxt_del_one_usr_fltr(bp, &fltr->base);
5594 if (fltr->base.flags) {
5595 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5596 bp->ntp_fltr_count--;
5597 }
5598 spin_unlock_bh(&bp->ntp_fltr_lock);
5599 kfree_rcu(fltr, base.rcu);
5600 }
5601
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5602 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5603 struct bnxt_l2_key *key,
5604 u32 idx)
5605 {
5606 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5607 struct bnxt_l2_filter *fltr;
5608
5609 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5610 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5611
5612 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5613 l2_key->vlan == key->vlan)
5614 return fltr;
5615 }
5616 return NULL;
5617 }
5618
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5619 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5620 struct bnxt_l2_key *key,
5621 u32 idx)
5622 {
5623 struct bnxt_l2_filter *fltr = NULL;
5624
5625 rcu_read_lock();
5626 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5627 if (fltr)
5628 atomic_inc(&fltr->refcnt);
5629 rcu_read_unlock();
5630 return fltr;
5631 }
5632
5633 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5634 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5635 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5636 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5637 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5638
5639 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5640 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5641 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5642 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5643 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5644
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)5645 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5646 {
5647 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5648 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5649 return sizeof(fkeys->addrs.v4addrs) +
5650 sizeof(fkeys->ports);
5651
5652 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5653 return sizeof(fkeys->addrs.v4addrs);
5654 }
5655
5656 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5657 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5658 return sizeof(fkeys->addrs.v6addrs) +
5659 sizeof(fkeys->ports);
5660
5661 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5662 return sizeof(fkeys->addrs.v6addrs);
5663 }
5664
5665 return 0;
5666 }
5667
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)5668 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5669 const unsigned char *key)
5670 {
5671 u64 prefix = bp->toeplitz_prefix, hash = 0;
5672 struct bnxt_ipv4_tuple tuple4;
5673 struct bnxt_ipv6_tuple tuple6;
5674 int i, j, len = 0;
5675 u8 *four_tuple;
5676
5677 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5678 if (!len)
5679 return 0;
5680
5681 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5682 tuple4.v4addrs = fkeys->addrs.v4addrs;
5683 tuple4.ports = fkeys->ports;
5684 four_tuple = (unsigned char *)&tuple4;
5685 } else {
5686 tuple6.v6addrs = fkeys->addrs.v6addrs;
5687 tuple6.ports = fkeys->ports;
5688 four_tuple = (unsigned char *)&tuple6;
5689 }
5690
5691 for (i = 0, j = 8; i < len; i++, j++) {
5692 u8 byte = four_tuple[i];
5693 int bit;
5694
5695 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5696 if (byte & 0x80)
5697 hash ^= prefix;
5698 }
5699 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5700 }
5701
5702 /* The valid part of the hash is in the upper 32 bits. */
5703 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5704 }
5705
5706 #ifdef CONFIG_RFS_ACCEL
5707 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)5708 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5709 {
5710 struct bnxt_l2_filter *fltr;
5711 u32 idx;
5712
5713 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5714 BNXT_L2_FLTR_HASH_MASK;
5715 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5716 return fltr;
5717 }
5718 #endif
5719
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)5720 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5721 struct bnxt_l2_key *key, u32 idx)
5722 {
5723 struct hlist_head *head;
5724
5725 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
5726 fltr->l2_key.vlan = key->vlan;
5727 fltr->base.type = BNXT_FLTR_TYPE_L2;
5728 if (fltr->base.flags) {
5729 int bit_id;
5730
5731 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5732 bp->max_fltr, 0);
5733 if (bit_id < 0)
5734 return -ENOMEM;
5735 fltr->base.sw_id = (u16)bit_id;
5736 bp->ntp_fltr_count++;
5737 }
5738 head = &bp->l2_fltr_hash_tbl[idx];
5739 hlist_add_head_rcu(&fltr->base.hash, head);
5740 bnxt_insert_usr_fltr(bp, &fltr->base);
5741 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
5742 atomic_set(&fltr->refcnt, 1);
5743 return 0;
5744 }
5745
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)5746 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5747 struct bnxt_l2_key *key,
5748 gfp_t gfp)
5749 {
5750 struct bnxt_l2_filter *fltr;
5751 u32 idx;
5752 int rc;
5753
5754 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5755 BNXT_L2_FLTR_HASH_MASK;
5756 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5757 if (fltr)
5758 return fltr;
5759
5760 fltr = kzalloc(sizeof(*fltr), gfp);
5761 if (!fltr)
5762 return ERR_PTR(-ENOMEM);
5763 spin_lock_bh(&bp->ntp_fltr_lock);
5764 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5765 spin_unlock_bh(&bp->ntp_fltr_lock);
5766 if (rc) {
5767 bnxt_del_l2_filter(bp, fltr);
5768 fltr = ERR_PTR(rc);
5769 }
5770 return fltr;
5771 }
5772
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)5773 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
5774 struct bnxt_l2_key *key,
5775 u16 flags)
5776 {
5777 struct bnxt_l2_filter *fltr;
5778 u32 idx;
5779 int rc;
5780
5781 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5782 BNXT_L2_FLTR_HASH_MASK;
5783 spin_lock_bh(&bp->ntp_fltr_lock);
5784 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5785 if (fltr) {
5786 fltr = ERR_PTR(-EEXIST);
5787 goto l2_filter_exit;
5788 }
5789 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
5790 if (!fltr) {
5791 fltr = ERR_PTR(-ENOMEM);
5792 goto l2_filter_exit;
5793 }
5794 fltr->base.flags = flags;
5795 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5796 if (rc) {
5797 spin_unlock_bh(&bp->ntp_fltr_lock);
5798 bnxt_del_l2_filter(bp, fltr);
5799 return ERR_PTR(rc);
5800 }
5801
5802 l2_filter_exit:
5803 spin_unlock_bh(&bp->ntp_fltr_lock);
5804 return fltr;
5805 }
5806
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)5807 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
5808 {
5809 #ifdef CONFIG_BNXT_SRIOV
5810 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
5811
5812 return vf->fw_fid;
5813 #else
5814 return INVALID_HW_RING_ID;
5815 #endif
5816 }
5817
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)5818 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5819 {
5820 struct hwrm_cfa_l2_filter_free_input *req;
5821 u16 target_id = 0xffff;
5822 int rc;
5823
5824 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5825 struct bnxt_pf_info *pf = &bp->pf;
5826
5827 if (fltr->base.vf_idx >= pf->active_vfs)
5828 return -EINVAL;
5829
5830 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5831 if (target_id == INVALID_HW_RING_ID)
5832 return -EINVAL;
5833 }
5834
5835 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5836 if (rc)
5837 return rc;
5838
5839 req->target_id = cpu_to_le16(target_id);
5840 req->l2_filter_id = fltr->base.filter_id;
5841 return hwrm_req_send(bp, req);
5842 }
5843
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)5844 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5845 {
5846 struct hwrm_cfa_l2_filter_alloc_output *resp;
5847 struct hwrm_cfa_l2_filter_alloc_input *req;
5848 u16 target_id = 0xffff;
5849 int rc;
5850
5851 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5852 struct bnxt_pf_info *pf = &bp->pf;
5853
5854 if (fltr->base.vf_idx >= pf->active_vfs)
5855 return -EINVAL;
5856
5857 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5858 }
5859 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5860 if (rc)
5861 return rc;
5862
5863 req->target_id = cpu_to_le16(target_id);
5864 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5865
5866 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5867 req->flags |=
5868 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5869 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
5870 req->enables =
5871 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5872 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5873 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5874 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
5875 eth_broadcast_addr(req->l2_addr_mask);
5876
5877 if (fltr->l2_key.vlan) {
5878 req->enables |=
5879 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
5880 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
5881 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
5882 req->num_vlans = 1;
5883 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
5884 req->l2_ivlan_mask = cpu_to_le16(0xfff);
5885 }
5886
5887 resp = hwrm_req_hold(bp, req);
5888 rc = hwrm_req_send(bp, req);
5889 if (!rc) {
5890 fltr->base.filter_id = resp->l2_filter_id;
5891 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
5892 }
5893 hwrm_req_drop(bp, req);
5894 return rc;
5895 }
5896
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)5897 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
5898 struct bnxt_ntuple_filter *fltr)
5899 {
5900 struct hwrm_cfa_ntuple_filter_free_input *req;
5901 int rc;
5902
5903 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
5904 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
5905 if (rc)
5906 return rc;
5907
5908 req->ntuple_filter_id = fltr->base.filter_id;
5909 return hwrm_req_send(bp, req);
5910 }
5911
5912 #define BNXT_NTP_FLTR_FLAGS \
5913 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
5914 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
5915 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
5916 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
5917 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
5918 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
5919 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
5920 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
5921 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
5922 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
5923 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
5924 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
5925 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
5926
5927 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
5928 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
5929
bnxt_fill_ipv6_mask(__be32 mask[4])5930 void bnxt_fill_ipv6_mask(__be32 mask[4])
5931 {
5932 int i;
5933
5934 for (i = 0; i < 4; i++)
5935 mask[i] = cpu_to_be32(~0);
5936 }
5937
5938 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)5939 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
5940 struct hwrm_cfa_ntuple_filter_alloc_input *req,
5941 struct bnxt_ntuple_filter *fltr)
5942 {
5943 u16 rxq = fltr->base.rxq;
5944
5945 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
5946 struct ethtool_rxfh_context *ctx;
5947 struct bnxt_rss_ctx *rss_ctx;
5948 struct bnxt_vnic_info *vnic;
5949
5950 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
5951 fltr->base.fw_vnic_id);
5952 if (ctx) {
5953 rss_ctx = ethtool_rxfh_context_priv(ctx);
5954 vnic = &rss_ctx->vnic;
5955
5956 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5957 }
5958 return;
5959 }
5960 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
5961 struct bnxt_vnic_info *vnic;
5962 u32 enables;
5963
5964 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
5965 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5966 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
5967 req->enables |= cpu_to_le32(enables);
5968 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
5969 } else {
5970 u32 flags;
5971
5972 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
5973 req->flags |= cpu_to_le32(flags);
5974 req->dst_id = cpu_to_le16(rxq);
5975 }
5976 }
5977
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)5978 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
5979 struct bnxt_ntuple_filter *fltr)
5980 {
5981 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
5982 struct hwrm_cfa_ntuple_filter_alloc_input *req;
5983 struct bnxt_flow_masks *masks = &fltr->fmasks;
5984 struct flow_keys *keys = &fltr->fkeys;
5985 struct bnxt_l2_filter *l2_fltr;
5986 struct bnxt_vnic_info *vnic;
5987 int rc;
5988
5989 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
5990 if (rc)
5991 return rc;
5992
5993 l2_fltr = fltr->l2_fltr;
5994 req->l2_filter_id = l2_fltr->base.filter_id;
5995
5996 if (fltr->base.flags & BNXT_ACT_DROP) {
5997 req->flags =
5998 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
5999 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6000 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6001 } else {
6002 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6003 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6004 }
6005 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6006
6007 req->ethertype = htons(ETH_P_IP);
6008 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6009 req->ip_protocol = keys->basic.ip_proto;
6010
6011 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6012 req->ethertype = htons(ETH_P_IPV6);
6013 req->ip_addr_type =
6014 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6015 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6016 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6017 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6018 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6019 } else {
6020 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6021 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6022 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6023 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6024 }
6025 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6026 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6027 req->tunnel_type =
6028 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6029 }
6030
6031 req->src_port = keys->ports.src;
6032 req->src_port_mask = masks->ports.src;
6033 req->dst_port = keys->ports.dst;
6034 req->dst_port_mask = masks->ports.dst;
6035
6036 resp = hwrm_req_hold(bp, req);
6037 rc = hwrm_req_send(bp, req);
6038 if (!rc)
6039 fltr->base.filter_id = resp->ntuple_filter_id;
6040 hwrm_req_drop(bp, req);
6041 return rc;
6042 }
6043
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6044 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6045 const u8 *mac_addr)
6046 {
6047 struct bnxt_l2_filter *fltr;
6048 struct bnxt_l2_key key;
6049 int rc;
6050
6051 ether_addr_copy(key.dst_mac_addr, mac_addr);
6052 key.vlan = 0;
6053 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6054 if (IS_ERR(fltr))
6055 return PTR_ERR(fltr);
6056
6057 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6058 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6059 if (rc)
6060 bnxt_del_l2_filter(bp, fltr);
6061 else
6062 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6063 return rc;
6064 }
6065
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6066 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6067 {
6068 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6069
6070 /* Any associated ntuple filters will also be cleared by firmware. */
6071 for (i = 0; i < num_of_vnics; i++) {
6072 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6073
6074 for (j = 0; j < vnic->uc_filter_count; j++) {
6075 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6076
6077 bnxt_hwrm_l2_filter_free(bp, fltr);
6078 bnxt_del_l2_filter(bp, fltr);
6079 }
6080 vnic->uc_filter_count = 0;
6081 }
6082 }
6083
6084 #define BNXT_DFLT_TUNL_TPA_BMAP \
6085 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6086 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6087 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6088
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6089 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6090 struct hwrm_vnic_tpa_cfg_input *req)
6091 {
6092 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6093
6094 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6095 return;
6096
6097 if (bp->vxlan_port)
6098 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6099 if (bp->vxlan_gpe_port)
6100 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6101 if (bp->nge_port)
6102 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6103
6104 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6105 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6106 }
6107
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6108 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6109 u32 tpa_flags)
6110 {
6111 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6112 struct hwrm_vnic_tpa_cfg_input *req;
6113 int rc;
6114
6115 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6116 return 0;
6117
6118 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6119 if (rc)
6120 return rc;
6121
6122 if (tpa_flags) {
6123 u16 mss = bp->dev->mtu - 40;
6124 u32 nsegs, n, segs = 0, flags;
6125
6126 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6127 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6128 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6129 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6130 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6131 if (tpa_flags & BNXT_FLAG_GRO)
6132 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6133
6134 req->flags = cpu_to_le32(flags);
6135
6136 req->enables =
6137 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6138 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6139 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6140
6141 /* Number of segs are log2 units, and first packet is not
6142 * included as part of this units.
6143 */
6144 if (mss <= BNXT_RX_PAGE_SIZE) {
6145 n = BNXT_RX_PAGE_SIZE / mss;
6146 nsegs = (MAX_SKB_FRAGS - 1) * n;
6147 } else {
6148 n = mss / BNXT_RX_PAGE_SIZE;
6149 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6150 n++;
6151 nsegs = (MAX_SKB_FRAGS - n) / n;
6152 }
6153
6154 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6155 segs = MAX_TPA_SEGS_P5;
6156 max_aggs = bp->max_tpa;
6157 } else {
6158 segs = ilog2(nsegs);
6159 }
6160 req->max_agg_segs = cpu_to_le16(segs);
6161 req->max_aggs = cpu_to_le16(max_aggs);
6162
6163 req->min_agg_len = cpu_to_le32(512);
6164 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6165 }
6166 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6167
6168 return hwrm_req_send(bp, req);
6169 }
6170
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6171 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6172 {
6173 struct bnxt_ring_grp_info *grp_info;
6174
6175 grp_info = &bp->grp_info[ring->grp_idx];
6176 return grp_info->cp_fw_ring_id;
6177 }
6178
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6179 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6180 {
6181 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6182 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6183 else
6184 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6185 }
6186
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6187 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6188 {
6189 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6190 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6191 else
6192 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6193 }
6194
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6195 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6196 {
6197 int entries;
6198
6199 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6200 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6201 else
6202 entries = HW_HASH_INDEX_SIZE;
6203
6204 bp->rss_indir_tbl_entries = entries;
6205 bp->rss_indir_tbl =
6206 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6207 if (!bp->rss_indir_tbl)
6208 return -ENOMEM;
6209
6210 return 0;
6211 }
6212
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6213 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6214 struct ethtool_rxfh_context *rss_ctx)
6215 {
6216 u16 max_rings, max_entries, pad, i;
6217 u32 *rss_indir_tbl;
6218
6219 if (!bp->rx_nr_rings)
6220 return;
6221
6222 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6223 max_rings = bp->rx_nr_rings - 1;
6224 else
6225 max_rings = bp->rx_nr_rings;
6226
6227 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6228 if (rss_ctx)
6229 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6230 else
6231 rss_indir_tbl = &bp->rss_indir_tbl[0];
6232
6233 for (i = 0; i < max_entries; i++)
6234 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6235
6236 pad = bp->rss_indir_tbl_entries - max_entries;
6237 if (pad)
6238 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6239 }
6240
bnxt_get_max_rss_ring(struct bnxt * bp)6241 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6242 {
6243 u32 i, tbl_size, max_ring = 0;
6244
6245 if (!bp->rss_indir_tbl)
6246 return 0;
6247
6248 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6249 for (i = 0; i < tbl_size; i++)
6250 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6251 return max_ring;
6252 }
6253
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6254 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6255 {
6256 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6257 if (!rx_rings)
6258 return 0;
6259 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6260 BNXT_RSS_TABLE_ENTRIES_P5);
6261 }
6262 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6263 return 2;
6264 return 1;
6265 }
6266
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6267 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6268 {
6269 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6270 u16 i, j;
6271
6272 /* Fill the RSS indirection table with ring group ids */
6273 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6274 if (!no_rss)
6275 j = bp->rss_indir_tbl[i];
6276 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6277 }
6278 }
6279
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6280 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6281 struct bnxt_vnic_info *vnic)
6282 {
6283 __le16 *ring_tbl = vnic->rss_table;
6284 struct bnxt_rx_ring_info *rxr;
6285 u16 tbl_size, i;
6286
6287 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6288
6289 for (i = 0; i < tbl_size; i++) {
6290 u16 ring_id, j;
6291
6292 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6293 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6294 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6295 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6296 else
6297 j = bp->rss_indir_tbl[i];
6298 rxr = &bp->rx_ring[j];
6299
6300 ring_id = rxr->rx_ring_struct.fw_ring_id;
6301 *ring_tbl++ = cpu_to_le16(ring_id);
6302 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6303 *ring_tbl++ = cpu_to_le16(ring_id);
6304 }
6305 }
6306
6307 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6308 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6309 struct bnxt_vnic_info *vnic)
6310 {
6311 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6312 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6313 if (bp->flags & BNXT_FLAG_CHIP_P7)
6314 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6315 } else {
6316 bnxt_fill_hw_rss_tbl(bp, vnic);
6317 }
6318
6319 if (bp->rss_hash_delta) {
6320 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6321 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6322 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6323 else
6324 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6325 } else {
6326 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6327 }
6328 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6329 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6330 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6331 }
6332
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6333 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6334 bool set_rss)
6335 {
6336 struct hwrm_vnic_rss_cfg_input *req;
6337 int rc;
6338
6339 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6340 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6341 return 0;
6342
6343 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6344 if (rc)
6345 return rc;
6346
6347 if (set_rss)
6348 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6349 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6350 return hwrm_req_send(bp, req);
6351 }
6352
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6353 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6354 struct bnxt_vnic_info *vnic, bool set_rss)
6355 {
6356 struct hwrm_vnic_rss_cfg_input *req;
6357 dma_addr_t ring_tbl_map;
6358 u32 i, nr_ctxs;
6359 int rc;
6360
6361 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6362 if (rc)
6363 return rc;
6364
6365 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6366 if (!set_rss)
6367 return hwrm_req_send(bp, req);
6368
6369 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6370 ring_tbl_map = vnic->rss_table_dma_addr;
6371 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6372
6373 hwrm_req_hold(bp, req);
6374 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6375 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6376 req->ring_table_pair_index = i;
6377 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6378 rc = hwrm_req_send(bp, req);
6379 if (rc)
6380 goto exit;
6381 }
6382
6383 exit:
6384 hwrm_req_drop(bp, req);
6385 return rc;
6386 }
6387
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6388 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6389 {
6390 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6391 struct hwrm_vnic_rss_qcfg_output *resp;
6392 struct hwrm_vnic_rss_qcfg_input *req;
6393
6394 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6395 return;
6396
6397 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6398 /* all contexts configured to same hash_type, zero always exists */
6399 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6400 resp = hwrm_req_hold(bp, req);
6401 if (!hwrm_req_send(bp, req)) {
6402 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6403 bp->rss_hash_delta = 0;
6404 }
6405 hwrm_req_drop(bp, req);
6406 }
6407
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6408 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6409 {
6410 struct hwrm_vnic_plcmodes_cfg_input *req;
6411 int rc;
6412
6413 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6414 if (rc)
6415 return rc;
6416
6417 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6418 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6419
6420 if (BNXT_RX_PAGE_MODE(bp)) {
6421 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6422 } else {
6423 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6424 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6425 req->enables |=
6426 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6427 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
6428 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
6429 }
6430 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6431 return hwrm_req_send(bp, req);
6432 }
6433
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6434 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6435 struct bnxt_vnic_info *vnic,
6436 u16 ctx_idx)
6437 {
6438 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6439
6440 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6441 return;
6442
6443 req->rss_cos_lb_ctx_id =
6444 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6445
6446 hwrm_req_send(bp, req);
6447 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6448 }
6449
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6450 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6451 {
6452 int i, j;
6453
6454 for (i = 0; i < bp->nr_vnics; i++) {
6455 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6456
6457 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6458 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6459 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6460 }
6461 }
6462 bp->rsscos_nr_ctxs = 0;
6463 }
6464
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6465 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6466 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6467 {
6468 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6469 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6470 int rc;
6471
6472 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6473 if (rc)
6474 return rc;
6475
6476 resp = hwrm_req_hold(bp, req);
6477 rc = hwrm_req_send(bp, req);
6478 if (!rc)
6479 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6480 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6481 hwrm_req_drop(bp, req);
6482
6483 return rc;
6484 }
6485
bnxt_get_roce_vnic_mode(struct bnxt * bp)6486 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6487 {
6488 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6489 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6490 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6491 }
6492
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6493 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6494 {
6495 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6496 struct hwrm_vnic_cfg_input *req;
6497 unsigned int ring = 0, grp_idx;
6498 u16 def_vlan = 0;
6499 int rc;
6500
6501 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6502 if (rc)
6503 return rc;
6504
6505 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6506 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6507
6508 req->default_rx_ring_id =
6509 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6510 req->default_cmpl_ring_id =
6511 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6512 req->enables =
6513 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6514 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6515 goto vnic_mru;
6516 }
6517 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6518 /* Only RSS support for now TBD: COS & LB */
6519 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6520 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6521 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6522 VNIC_CFG_REQ_ENABLES_MRU);
6523 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6524 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6525 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6526 VNIC_CFG_REQ_ENABLES_MRU);
6527 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6528 } else {
6529 req->rss_rule = cpu_to_le16(0xffff);
6530 }
6531
6532 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6533 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6534 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6535 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6536 } else {
6537 req->cos_rule = cpu_to_le16(0xffff);
6538 }
6539
6540 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6541 ring = 0;
6542 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6543 ring = vnic->vnic_id - 1;
6544 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6545 ring = bp->rx_nr_rings - 1;
6546
6547 grp_idx = bp->rx_ring[ring].bnapi->index;
6548 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6549 req->lb_rule = cpu_to_le16(0xffff);
6550 vnic_mru:
6551 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
6552 req->mru = cpu_to_le16(vnic->mru);
6553
6554 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6555 #ifdef CONFIG_BNXT_SRIOV
6556 if (BNXT_VF(bp))
6557 def_vlan = bp->vf.vlan;
6558 #endif
6559 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6560 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6561 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6562 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6563
6564 return hwrm_req_send(bp, req);
6565 }
6566
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6567 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6568 struct bnxt_vnic_info *vnic)
6569 {
6570 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6571 struct hwrm_vnic_free_input *req;
6572
6573 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6574 return;
6575
6576 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6577
6578 hwrm_req_send(bp, req);
6579 vnic->fw_vnic_id = INVALID_HW_RING_ID;
6580 }
6581 }
6582
bnxt_hwrm_vnic_free(struct bnxt * bp)6583 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6584 {
6585 u16 i;
6586
6587 for (i = 0; i < bp->nr_vnics; i++)
6588 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6589 }
6590
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)6591 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6592 unsigned int start_rx_ring_idx,
6593 unsigned int nr_rings)
6594 {
6595 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6596 struct hwrm_vnic_alloc_output *resp;
6597 struct hwrm_vnic_alloc_input *req;
6598 int rc;
6599
6600 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6601 if (rc)
6602 return rc;
6603
6604 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6605 goto vnic_no_ring_grps;
6606
6607 /* map ring groups to this vnic */
6608 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6609 grp_idx = bp->rx_ring[i].bnapi->index;
6610 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6611 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6612 j, nr_rings);
6613 break;
6614 }
6615 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6616 }
6617
6618 vnic_no_ring_grps:
6619 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6620 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6621 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6622 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6623
6624 resp = hwrm_req_hold(bp, req);
6625 rc = hwrm_req_send(bp, req);
6626 if (!rc)
6627 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6628 hwrm_req_drop(bp, req);
6629 return rc;
6630 }
6631
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)6632 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6633 {
6634 struct hwrm_vnic_qcaps_output *resp;
6635 struct hwrm_vnic_qcaps_input *req;
6636 int rc;
6637
6638 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6639 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6640 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6641 if (bp->hwrm_spec_code < 0x10600)
6642 return 0;
6643
6644 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6645 if (rc)
6646 return rc;
6647
6648 resp = hwrm_req_hold(bp, req);
6649 rc = hwrm_req_send(bp, req);
6650 if (!rc) {
6651 u32 flags = le32_to_cpu(resp->flags);
6652
6653 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6654 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6655 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6656 if (flags &
6657 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6658 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6659
6660 /* Older P5 fw before EXT_HW_STATS support did not set
6661 * VLAN_STRIP_CAP properly.
6662 */
6663 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6664 (BNXT_CHIP_P5(bp) &&
6665 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6666 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6667 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6668 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6669 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6670 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6671 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6672 if (bp->max_tpa_v2) {
6673 if (BNXT_CHIP_P5(bp))
6674 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6675 else
6676 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6677 }
6678 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6679 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6680 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6681 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6682 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6683 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6684 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6685 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6686 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6687 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6688 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
6689 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
6690 }
6691 hwrm_req_drop(bp, req);
6692 return rc;
6693 }
6694
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)6695 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6696 {
6697 struct hwrm_ring_grp_alloc_output *resp;
6698 struct hwrm_ring_grp_alloc_input *req;
6699 int rc;
6700 u16 i;
6701
6702 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6703 return 0;
6704
6705 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6706 if (rc)
6707 return rc;
6708
6709 resp = hwrm_req_hold(bp, req);
6710 for (i = 0; i < bp->rx_nr_rings; i++) {
6711 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6712
6713 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6714 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6715 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6716 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6717
6718 rc = hwrm_req_send(bp, req);
6719
6720 if (rc)
6721 break;
6722
6723 bp->grp_info[grp_idx].fw_grp_id =
6724 le32_to_cpu(resp->ring_group_id);
6725 }
6726 hwrm_req_drop(bp, req);
6727 return rc;
6728 }
6729
bnxt_hwrm_ring_grp_free(struct bnxt * bp)6730 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
6731 {
6732 struct hwrm_ring_grp_free_input *req;
6733 u16 i;
6734
6735 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
6736 return;
6737
6738 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
6739 return;
6740
6741 hwrm_req_hold(bp, req);
6742 for (i = 0; i < bp->cp_nr_rings; i++) {
6743 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
6744 continue;
6745 req->ring_group_id =
6746 cpu_to_le32(bp->grp_info[i].fw_grp_id);
6747
6748 hwrm_req_send(bp, req);
6749 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
6750 }
6751 hwrm_req_drop(bp, req);
6752 }
6753
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)6754 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6755 struct bnxt_ring_struct *ring,
6756 u32 ring_type, u32 map_index)
6757 {
6758 struct hwrm_ring_alloc_output *resp;
6759 struct hwrm_ring_alloc_input *req;
6760 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
6761 struct bnxt_ring_grp_info *grp_info;
6762 int rc, err = 0;
6763 u16 ring_id;
6764
6765 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6766 if (rc)
6767 goto exit;
6768
6769 req->enables = 0;
6770 if (rmem->nr_pages > 1) {
6771 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
6772 /* Page size is in log2 units */
6773 req->page_size = BNXT_PAGE_SHIFT;
6774 req->page_tbl_depth = 1;
6775 } else {
6776 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
6777 }
6778 req->fbo = 0;
6779 /* Association of ring index with doorbell index and MSIX number */
6780 req->logical_id = cpu_to_le16(map_index);
6781
6782 switch (ring_type) {
6783 case HWRM_RING_ALLOC_TX: {
6784 struct bnxt_tx_ring_info *txr;
6785 u16 flags = 0;
6786
6787 txr = container_of(ring, struct bnxt_tx_ring_info,
6788 tx_ring_struct);
6789 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
6790 /* Association of transmit ring with completion ring */
6791 grp_info = &bp->grp_info[ring->grp_idx];
6792 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
6793 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
6794 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6795 req->queue_id = cpu_to_le16(ring->queue_id);
6796 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
6797 req->cmpl_coal_cnt =
6798 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
6799 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
6800 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
6801 req->flags = cpu_to_le16(flags);
6802 break;
6803 }
6804 case HWRM_RING_ALLOC_RX:
6805 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6806 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
6807 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6808 u16 flags = 0;
6809
6810 /* Association of rx ring with stats context */
6811 grp_info = &bp->grp_info[ring->grp_idx];
6812 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
6813 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6814 req->enables |= cpu_to_le32(
6815 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6816 if (NET_IP_ALIGN == 2)
6817 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
6818 req->flags = cpu_to_le16(flags);
6819 }
6820 break;
6821 case HWRM_RING_ALLOC_AGG:
6822 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6823 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
6824 /* Association of agg ring with rx ring */
6825 grp_info = &bp->grp_info[ring->grp_idx];
6826 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
6827 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
6828 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6829 req->enables |= cpu_to_le32(
6830 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
6831 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6832 } else {
6833 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6834 }
6835 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
6836 break;
6837 case HWRM_RING_ALLOC_CMPL:
6838 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
6839 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6840 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6841 /* Association of cp ring with nq */
6842 grp_info = &bp->grp_info[map_index];
6843 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
6844 req->cq_handle = cpu_to_le64(ring->handle);
6845 req->enables |= cpu_to_le32(
6846 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
6847 } else {
6848 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6849 }
6850 break;
6851 case HWRM_RING_ALLOC_NQ:
6852 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
6853 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6854 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6855 break;
6856 default:
6857 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
6858 ring_type);
6859 return -1;
6860 }
6861
6862 resp = hwrm_req_hold(bp, req);
6863 rc = hwrm_req_send(bp, req);
6864 err = le16_to_cpu(resp->error_code);
6865 ring_id = le16_to_cpu(resp->ring_id);
6866 hwrm_req_drop(bp, req);
6867
6868 exit:
6869 if (rc || err) {
6870 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
6871 ring_type, rc, err);
6872 return -EIO;
6873 }
6874 ring->fw_ring_id = ring_id;
6875 return rc;
6876 }
6877
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)6878 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
6879 {
6880 int rc;
6881
6882 if (BNXT_PF(bp)) {
6883 struct hwrm_func_cfg_input *req;
6884
6885 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
6886 if (rc)
6887 return rc;
6888
6889 req->fid = cpu_to_le16(0xffff);
6890 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6891 req->async_event_cr = cpu_to_le16(idx);
6892 return hwrm_req_send(bp, req);
6893 } else {
6894 struct hwrm_func_vf_cfg_input *req;
6895
6896 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
6897 if (rc)
6898 return rc;
6899
6900 req->enables =
6901 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6902 req->async_event_cr = cpu_to_le16(idx);
6903 return hwrm_req_send(bp, req);
6904 }
6905 }
6906
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)6907 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
6908 u32 ring_type)
6909 {
6910 switch (ring_type) {
6911 case HWRM_RING_ALLOC_TX:
6912 db->db_ring_mask = bp->tx_ring_mask;
6913 break;
6914 case HWRM_RING_ALLOC_RX:
6915 db->db_ring_mask = bp->rx_ring_mask;
6916 break;
6917 case HWRM_RING_ALLOC_AGG:
6918 db->db_ring_mask = bp->rx_agg_ring_mask;
6919 break;
6920 case HWRM_RING_ALLOC_CMPL:
6921 case HWRM_RING_ALLOC_NQ:
6922 db->db_ring_mask = bp->cp_ring_mask;
6923 break;
6924 }
6925 if (bp->flags & BNXT_FLAG_CHIP_P7) {
6926 db->db_epoch_mask = db->db_ring_mask + 1;
6927 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
6928 }
6929 }
6930
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)6931 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
6932 u32 map_idx, u32 xid)
6933 {
6934 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6935 switch (ring_type) {
6936 case HWRM_RING_ALLOC_TX:
6937 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
6938 break;
6939 case HWRM_RING_ALLOC_RX:
6940 case HWRM_RING_ALLOC_AGG:
6941 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
6942 break;
6943 case HWRM_RING_ALLOC_CMPL:
6944 db->db_key64 = DBR_PATH_L2;
6945 break;
6946 case HWRM_RING_ALLOC_NQ:
6947 db->db_key64 = DBR_PATH_L2;
6948 break;
6949 }
6950 db->db_key64 |= (u64)xid << DBR_XID_SFT;
6951
6952 if (bp->flags & BNXT_FLAG_CHIP_P7)
6953 db->db_key64 |= DBR_VALID;
6954
6955 db->doorbell = bp->bar1 + bp->db_offset;
6956 } else {
6957 db->doorbell = bp->bar1 + map_idx * 0x80;
6958 switch (ring_type) {
6959 case HWRM_RING_ALLOC_TX:
6960 db->db_key32 = DB_KEY_TX;
6961 break;
6962 case HWRM_RING_ALLOC_RX:
6963 case HWRM_RING_ALLOC_AGG:
6964 db->db_key32 = DB_KEY_RX;
6965 break;
6966 case HWRM_RING_ALLOC_CMPL:
6967 db->db_key32 = DB_KEY_CP;
6968 break;
6969 }
6970 }
6971 bnxt_set_db_mask(bp, db, ring_type);
6972 }
6973
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6974 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
6975 struct bnxt_rx_ring_info *rxr)
6976 {
6977 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6978 struct bnxt_napi *bnapi = rxr->bnapi;
6979 u32 type = HWRM_RING_ALLOC_RX;
6980 u32 map_idx = bnapi->index;
6981 int rc;
6982
6983 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6984 if (rc)
6985 return rc;
6986
6987 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
6988 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
6989
6990 return 0;
6991 }
6992
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6993 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
6994 struct bnxt_rx_ring_info *rxr)
6995 {
6996 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6997 u32 type = HWRM_RING_ALLOC_AGG;
6998 u32 grp_idx = ring->grp_idx;
6999 u32 map_idx;
7000 int rc;
7001
7002 map_idx = grp_idx + bp->rx_nr_rings;
7003 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7004 if (rc)
7005 return rc;
7006
7007 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7008 ring->fw_ring_id);
7009 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7010 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7011 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7012
7013 return 0;
7014 }
7015
bnxt_hwrm_ring_alloc(struct bnxt * bp)7016 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7017 {
7018 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7019 int i, rc = 0;
7020 u32 type;
7021
7022 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7023 type = HWRM_RING_ALLOC_NQ;
7024 else
7025 type = HWRM_RING_ALLOC_CMPL;
7026 for (i = 0; i < bp->cp_nr_rings; i++) {
7027 struct bnxt_napi *bnapi = bp->bnapi[i];
7028 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7029 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7030 u32 map_idx = ring->map_idx;
7031 unsigned int vector;
7032
7033 vector = bp->irq_tbl[map_idx].vector;
7034 disable_irq_nosync(vector);
7035 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7036 if (rc) {
7037 enable_irq(vector);
7038 goto err_out;
7039 }
7040 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7041 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7042 enable_irq(vector);
7043 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7044
7045 if (!i) {
7046 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7047 if (rc)
7048 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7049 }
7050 }
7051
7052 type = HWRM_RING_ALLOC_TX;
7053 for (i = 0; i < bp->tx_nr_rings; i++) {
7054 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7055 struct bnxt_ring_struct *ring;
7056 u32 map_idx;
7057
7058 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7059 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
7060 struct bnxt_napi *bnapi = txr->bnapi;
7061 u32 type2 = HWRM_RING_ALLOC_CMPL;
7062
7063 ring = &cpr2->cp_ring_struct;
7064 ring->handle = BNXT_SET_NQ_HDL(cpr2);
7065 map_idx = bnapi->index;
7066 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
7067 if (rc)
7068 goto err_out;
7069 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
7070 ring->fw_ring_id);
7071 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
7072 }
7073 ring = &txr->tx_ring_struct;
7074 map_idx = i;
7075 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7076 if (rc)
7077 goto err_out;
7078 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
7079 }
7080
7081 for (i = 0; i < bp->rx_nr_rings; i++) {
7082 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7083
7084 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7085 if (rc)
7086 goto err_out;
7087 /* If we have agg rings, post agg buffers first. */
7088 if (!agg_rings)
7089 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7090 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7091 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
7092 struct bnxt_napi *bnapi = rxr->bnapi;
7093 u32 type2 = HWRM_RING_ALLOC_CMPL;
7094 struct bnxt_ring_struct *ring;
7095 u32 map_idx = bnapi->index;
7096
7097 ring = &cpr2->cp_ring_struct;
7098 ring->handle = BNXT_SET_NQ_HDL(cpr2);
7099 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
7100 if (rc)
7101 goto err_out;
7102 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
7103 ring->fw_ring_id);
7104 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
7105 }
7106 }
7107
7108 if (agg_rings) {
7109 for (i = 0; i < bp->rx_nr_rings; i++) {
7110 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7111 if (rc)
7112 goto err_out;
7113 }
7114 }
7115 err_out:
7116 return rc;
7117 }
7118
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7119 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7120 struct bnxt_ring_struct *ring,
7121 u32 ring_type, int cmpl_ring_id)
7122 {
7123 struct hwrm_ring_free_output *resp;
7124 struct hwrm_ring_free_input *req;
7125 u16 error_code = 0;
7126 int rc;
7127
7128 if (BNXT_NO_FW_ACCESS(bp))
7129 return 0;
7130
7131 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7132 if (rc)
7133 goto exit;
7134
7135 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7136 req->ring_type = ring_type;
7137 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7138
7139 resp = hwrm_req_hold(bp, req);
7140 rc = hwrm_req_send(bp, req);
7141 error_code = le16_to_cpu(resp->error_code);
7142 hwrm_req_drop(bp, req);
7143 exit:
7144 if (rc || error_code) {
7145 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7146 ring_type, rc, error_code);
7147 return -EIO;
7148 }
7149 return 0;
7150 }
7151
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7152 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7153 struct bnxt_rx_ring_info *rxr,
7154 bool close_path)
7155 {
7156 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7157 u32 grp_idx = rxr->bnapi->index;
7158 u32 cmpl_ring_id;
7159
7160 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7161 return;
7162
7163 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7164 hwrm_ring_free_send_msg(bp, ring,
7165 RING_FREE_REQ_RING_TYPE_RX,
7166 close_path ? cmpl_ring_id :
7167 INVALID_HW_RING_ID);
7168 ring->fw_ring_id = INVALID_HW_RING_ID;
7169 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7170 }
7171
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7172 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7173 struct bnxt_rx_ring_info *rxr,
7174 bool close_path)
7175 {
7176 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7177 u32 grp_idx = rxr->bnapi->index;
7178 u32 type, cmpl_ring_id;
7179
7180 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7181 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7182 else
7183 type = RING_FREE_REQ_RING_TYPE_RX;
7184
7185 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7186 return;
7187
7188 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7189 hwrm_ring_free_send_msg(bp, ring, type,
7190 close_path ? cmpl_ring_id :
7191 INVALID_HW_RING_ID);
7192 ring->fw_ring_id = INVALID_HW_RING_ID;
7193 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7194 }
7195
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7196 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7197 {
7198 u32 type;
7199 int i;
7200
7201 if (!bp->bnapi)
7202 return;
7203
7204 for (i = 0; i < bp->tx_nr_rings; i++) {
7205 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7206 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7207
7208 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7209 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
7210
7211 hwrm_ring_free_send_msg(bp, ring,
7212 RING_FREE_REQ_RING_TYPE_TX,
7213 close_path ? cmpl_ring_id :
7214 INVALID_HW_RING_ID);
7215 ring->fw_ring_id = INVALID_HW_RING_ID;
7216 }
7217 }
7218
7219 for (i = 0; i < bp->rx_nr_rings; i++) {
7220 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7221 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7222 }
7223
7224 /* The completion rings are about to be freed. After that the
7225 * IRQ doorbell will not work anymore. So we need to disable
7226 * IRQ here.
7227 */
7228 bnxt_disable_int_sync(bp);
7229
7230 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7231 type = RING_FREE_REQ_RING_TYPE_NQ;
7232 else
7233 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7234 for (i = 0; i < bp->cp_nr_rings; i++) {
7235 struct bnxt_napi *bnapi = bp->bnapi[i];
7236 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7237 struct bnxt_ring_struct *ring;
7238 int j;
7239
7240 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
7241 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
7242
7243 ring = &cpr2->cp_ring_struct;
7244 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7245 continue;
7246 hwrm_ring_free_send_msg(bp, ring,
7247 RING_FREE_REQ_RING_TYPE_L2_CMPL,
7248 INVALID_HW_RING_ID);
7249 ring->fw_ring_id = INVALID_HW_RING_ID;
7250 }
7251 ring = &cpr->cp_ring_struct;
7252 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7253 hwrm_ring_free_send_msg(bp, ring, type,
7254 INVALID_HW_RING_ID);
7255 ring->fw_ring_id = INVALID_HW_RING_ID;
7256 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7257 }
7258 }
7259 }
7260
7261 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7262 bool shared);
7263 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7264 bool shared);
7265
bnxt_hwrm_get_rings(struct bnxt * bp)7266 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7267 {
7268 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7269 struct hwrm_func_qcfg_output *resp;
7270 struct hwrm_func_qcfg_input *req;
7271 int rc;
7272
7273 if (bp->hwrm_spec_code < 0x10601)
7274 return 0;
7275
7276 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7277 if (rc)
7278 return rc;
7279
7280 req->fid = cpu_to_le16(0xffff);
7281 resp = hwrm_req_hold(bp, req);
7282 rc = hwrm_req_send(bp, req);
7283 if (rc) {
7284 hwrm_req_drop(bp, req);
7285 return rc;
7286 }
7287
7288 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7289 if (BNXT_NEW_RM(bp)) {
7290 u16 cp, stats;
7291
7292 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7293 hw_resc->resv_hw_ring_grps =
7294 le32_to_cpu(resp->alloc_hw_ring_grps);
7295 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7296 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7297 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7298 stats = le16_to_cpu(resp->alloc_stat_ctx);
7299 hw_resc->resv_irqs = cp;
7300 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7301 int rx = hw_resc->resv_rx_rings;
7302 int tx = hw_resc->resv_tx_rings;
7303
7304 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7305 rx >>= 1;
7306 if (cp < (rx + tx)) {
7307 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7308 if (rc)
7309 goto get_rings_exit;
7310 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7311 rx <<= 1;
7312 hw_resc->resv_rx_rings = rx;
7313 hw_resc->resv_tx_rings = tx;
7314 }
7315 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7316 hw_resc->resv_hw_ring_grps = rx;
7317 }
7318 hw_resc->resv_cp_rings = cp;
7319 hw_resc->resv_stat_ctxs = stats;
7320 }
7321 get_rings_exit:
7322 hwrm_req_drop(bp, req);
7323 return rc;
7324 }
7325
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7326 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7327 {
7328 struct hwrm_func_qcfg_output *resp;
7329 struct hwrm_func_qcfg_input *req;
7330 int rc;
7331
7332 if (bp->hwrm_spec_code < 0x10601)
7333 return 0;
7334
7335 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7336 if (rc)
7337 return rc;
7338
7339 req->fid = cpu_to_le16(fid);
7340 resp = hwrm_req_hold(bp, req);
7341 rc = hwrm_req_send(bp, req);
7342 if (!rc)
7343 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7344
7345 hwrm_req_drop(bp, req);
7346 return rc;
7347 }
7348
7349 static bool bnxt_rfs_supported(struct bnxt *bp);
7350
7351 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7352 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7353 {
7354 struct hwrm_func_cfg_input *req;
7355 u32 enables = 0;
7356
7357 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7358 return NULL;
7359
7360 req->fid = cpu_to_le16(0xffff);
7361 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7362 req->num_tx_rings = cpu_to_le16(hwr->tx);
7363 if (BNXT_NEW_RM(bp)) {
7364 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7365 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7366 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7367 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7368 enables |= hwr->cp_p5 ?
7369 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7370 } else {
7371 enables |= hwr->cp ?
7372 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7373 enables |= hwr->grp ?
7374 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7375 }
7376 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7377 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7378 0;
7379 req->num_rx_rings = cpu_to_le16(hwr->rx);
7380 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7381 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7382 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7383 req->num_msix = cpu_to_le16(hwr->cp);
7384 } else {
7385 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7386 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7387 }
7388 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7389 req->num_vnics = cpu_to_le16(hwr->vnic);
7390 }
7391 req->enables = cpu_to_le32(enables);
7392 return req;
7393 }
7394
7395 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7396 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7397 {
7398 struct hwrm_func_vf_cfg_input *req;
7399 u32 enables = 0;
7400
7401 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7402 return NULL;
7403
7404 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7405 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7406 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7407 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7408 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7409 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7410 enables |= hwr->cp_p5 ?
7411 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7412 } else {
7413 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7414 enables |= hwr->grp ?
7415 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7416 }
7417 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7418 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7419
7420 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7421 req->num_tx_rings = cpu_to_le16(hwr->tx);
7422 req->num_rx_rings = cpu_to_le16(hwr->rx);
7423 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7424 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7425 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7426 } else {
7427 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7428 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7429 }
7430 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7431 req->num_vnics = cpu_to_le16(hwr->vnic);
7432
7433 req->enables = cpu_to_le32(enables);
7434 return req;
7435 }
7436
7437 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7438 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7439 {
7440 struct hwrm_func_cfg_input *req;
7441 int rc;
7442
7443 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7444 if (!req)
7445 return -ENOMEM;
7446
7447 if (!req->enables) {
7448 hwrm_req_drop(bp, req);
7449 return 0;
7450 }
7451
7452 rc = hwrm_req_send(bp, req);
7453 if (rc)
7454 return rc;
7455
7456 if (bp->hwrm_spec_code < 0x10601)
7457 bp->hw_resc.resv_tx_rings = hwr->tx;
7458
7459 return bnxt_hwrm_get_rings(bp);
7460 }
7461
7462 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7463 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7464 {
7465 struct hwrm_func_vf_cfg_input *req;
7466 int rc;
7467
7468 if (!BNXT_NEW_RM(bp)) {
7469 bp->hw_resc.resv_tx_rings = hwr->tx;
7470 return 0;
7471 }
7472
7473 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7474 if (!req)
7475 return -ENOMEM;
7476
7477 rc = hwrm_req_send(bp, req);
7478 if (rc)
7479 return rc;
7480
7481 return bnxt_hwrm_get_rings(bp);
7482 }
7483
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7484 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7485 {
7486 if (BNXT_PF(bp))
7487 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7488 else
7489 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7490 }
7491
bnxt_nq_rings_in_use(struct bnxt * bp)7492 int bnxt_nq_rings_in_use(struct bnxt *bp)
7493 {
7494 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7495 }
7496
bnxt_cp_rings_in_use(struct bnxt * bp)7497 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7498 {
7499 int cp;
7500
7501 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7502 return bnxt_nq_rings_in_use(bp);
7503
7504 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7505 return cp;
7506 }
7507
bnxt_get_func_stat_ctxs(struct bnxt * bp)7508 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7509 {
7510 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7511 }
7512
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7513 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7514 {
7515 if (!hwr->grp)
7516 return 0;
7517 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7518 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7519
7520 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7521 rss_ctx *= hwr->vnic;
7522 return rss_ctx;
7523 }
7524 if (BNXT_VF(bp))
7525 return BNXT_VF_MAX_RSS_CTX;
7526 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7527 return hwr->grp + 1;
7528 return 1;
7529 }
7530
7531 /* Check if a default RSS map needs to be setup. This function is only
7532 * used on older firmware that does not require reserving RX rings.
7533 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)7534 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7535 {
7536 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7537
7538 /* The RSS map is valid for RX rings set to resv_rx_rings */
7539 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7540 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7541 if (!netif_is_rxfh_configured(bp->dev))
7542 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7543 }
7544 }
7545
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)7546 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7547 {
7548 if (bp->flags & BNXT_FLAG_RFS) {
7549 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7550 return 2 + bp->num_rss_ctx;
7551 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7552 return rx_rings + 1;
7553 }
7554 return 1;
7555 }
7556
bnxt_need_reserve_rings(struct bnxt * bp)7557 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7558 {
7559 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7560 int cp = bnxt_cp_rings_in_use(bp);
7561 int nq = bnxt_nq_rings_in_use(bp);
7562 int rx = bp->rx_nr_rings, stat;
7563 int vnic, grp = rx;
7564
7565 /* Old firmware does not need RX ring reservations but we still
7566 * need to setup a default RSS map when needed. With new firmware
7567 * we go through RX ring reservations first and then set up the
7568 * RSS map for the successfully reserved RX rings when needed.
7569 */
7570 if (!BNXT_NEW_RM(bp))
7571 bnxt_check_rss_tbl_no_rmgr(bp);
7572
7573 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7574 bp->hwrm_spec_code >= 0x10601)
7575 return true;
7576
7577 if (!BNXT_NEW_RM(bp))
7578 return false;
7579
7580 vnic = bnxt_get_total_vnics(bp, rx);
7581
7582 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7583 rx <<= 1;
7584 stat = bnxt_get_func_stat_ctxs(bp);
7585 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7586 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7587 (hw_resc->resv_hw_ring_grps != grp &&
7588 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7589 return true;
7590 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7591 hw_resc->resv_irqs != nq)
7592 return true;
7593 return false;
7594 }
7595
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7596 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7597 {
7598 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7599
7600 hwr->tx = hw_resc->resv_tx_rings;
7601 if (BNXT_NEW_RM(bp)) {
7602 hwr->rx = hw_resc->resv_rx_rings;
7603 hwr->cp = hw_resc->resv_irqs;
7604 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7605 hwr->cp_p5 = hw_resc->resv_cp_rings;
7606 hwr->grp = hw_resc->resv_hw_ring_grps;
7607 hwr->vnic = hw_resc->resv_vnics;
7608 hwr->stat = hw_resc->resv_stat_ctxs;
7609 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7610 }
7611 }
7612
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)7613 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7614 {
7615 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7616 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7617 }
7618
7619 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7620
__bnxt_reserve_rings(struct bnxt * bp)7621 static int __bnxt_reserve_rings(struct bnxt *bp)
7622 {
7623 struct bnxt_hw_rings hwr = {0};
7624 int rx_rings, old_rx_rings, rc;
7625 int cp = bp->cp_nr_rings;
7626 int ulp_msix = 0;
7627 bool sh = false;
7628 int tx_cp;
7629
7630 if (!bnxt_need_reserve_rings(bp))
7631 return 0;
7632
7633 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
7634 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7635 if (!ulp_msix)
7636 bnxt_set_ulp_stat_ctxs(bp, 0);
7637
7638 if (ulp_msix > bp->ulp_num_msix_want)
7639 ulp_msix = bp->ulp_num_msix_want;
7640 hwr.cp = cp + ulp_msix;
7641 } else {
7642 hwr.cp = bnxt_nq_rings_in_use(bp);
7643 }
7644
7645 hwr.tx = bp->tx_nr_rings;
7646 hwr.rx = bp->rx_nr_rings;
7647 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7648 sh = true;
7649 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7650 hwr.cp_p5 = hwr.rx + hwr.tx;
7651
7652 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7653
7654 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7655 hwr.rx <<= 1;
7656 hwr.grp = bp->rx_nr_rings;
7657 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7658 hwr.stat = bnxt_get_func_stat_ctxs(bp);
7659 old_rx_rings = bp->hw_resc.resv_rx_rings;
7660
7661 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7662 if (rc)
7663 return rc;
7664
7665 bnxt_copy_reserved_rings(bp, &hwr);
7666
7667 rx_rings = hwr.rx;
7668 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7669 if (hwr.rx >= 2) {
7670 rx_rings = hwr.rx >> 1;
7671 } else {
7672 if (netif_running(bp->dev))
7673 return -ENOMEM;
7674
7675 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7676 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7677 bp->dev->hw_features &= ~NETIF_F_LRO;
7678 bp->dev->features &= ~NETIF_F_LRO;
7679 bnxt_set_ring_params(bp);
7680 }
7681 }
7682 rx_rings = min_t(int, rx_rings, hwr.grp);
7683 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
7684 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
7685 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
7686 hwr.cp = min_t(int, hwr.cp, hwr.stat);
7687 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
7688 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7689 hwr.rx = rx_rings << 1;
7690 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
7691 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
7692 bp->tx_nr_rings = hwr.tx;
7693
7694 /* If we cannot reserve all the RX rings, reset the RSS map only
7695 * if absolutely necessary
7696 */
7697 if (rx_rings != bp->rx_nr_rings) {
7698 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
7699 rx_rings, bp->rx_nr_rings);
7700 if (netif_is_rxfh_configured(bp->dev) &&
7701 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
7702 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
7703 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
7704 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
7705 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
7706 }
7707 }
7708 bp->rx_nr_rings = rx_rings;
7709 bp->cp_nr_rings = hwr.cp;
7710
7711 if (!bnxt_rings_ok(bp, &hwr))
7712 return -ENOMEM;
7713
7714 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
7715 !netif_is_rxfh_configured(bp->dev))
7716 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7717
7718 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
7719 int resv_msix, resv_ctx, ulp_ctxs;
7720 struct bnxt_hw_resc *hw_resc;
7721
7722 hw_resc = &bp->hw_resc;
7723 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
7724 ulp_msix = min_t(int, resv_msix, ulp_msix);
7725 bnxt_set_ulp_msix_num(bp, ulp_msix);
7726 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
7727 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
7728 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
7729 }
7730
7731 return rc;
7732 }
7733
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7734 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7735 {
7736 struct hwrm_func_vf_cfg_input *req;
7737 u32 flags;
7738
7739 if (!BNXT_NEW_RM(bp))
7740 return 0;
7741
7742 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7743 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
7744 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7745 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7746 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7747 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
7748 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
7749 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7750 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7751
7752 req->flags = cpu_to_le32(flags);
7753 return hwrm_req_send_silent(bp, req);
7754 }
7755
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7756 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7757 {
7758 struct hwrm_func_cfg_input *req;
7759 u32 flags;
7760
7761 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7762 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
7763 if (BNXT_NEW_RM(bp)) {
7764 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7765 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7766 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7767 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
7768 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7769 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
7770 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
7771 else
7772 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7773 }
7774
7775 req->flags = cpu_to_le32(flags);
7776 return hwrm_req_send_silent(bp, req);
7777 }
7778
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7779 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7780 {
7781 if (bp->hwrm_spec_code < 0x10801)
7782 return 0;
7783
7784 if (BNXT_PF(bp))
7785 return bnxt_hwrm_check_pf_rings(bp, hwr);
7786
7787 return bnxt_hwrm_check_vf_rings(bp, hwr);
7788 }
7789
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)7790 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
7791 {
7792 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7793 struct hwrm_ring_aggint_qcaps_output *resp;
7794 struct hwrm_ring_aggint_qcaps_input *req;
7795 int rc;
7796
7797 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
7798 coal_cap->num_cmpl_dma_aggr_max = 63;
7799 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
7800 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
7801 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
7802 coal_cap->int_lat_tmr_min_max = 65535;
7803 coal_cap->int_lat_tmr_max_max = 65535;
7804 coal_cap->num_cmpl_aggr_int_max = 65535;
7805 coal_cap->timer_units = 80;
7806
7807 if (bp->hwrm_spec_code < 0x10902)
7808 return;
7809
7810 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
7811 return;
7812
7813 resp = hwrm_req_hold(bp, req);
7814 rc = hwrm_req_send_silent(bp, req);
7815 if (!rc) {
7816 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
7817 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
7818 coal_cap->num_cmpl_dma_aggr_max =
7819 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
7820 coal_cap->num_cmpl_dma_aggr_during_int_max =
7821 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
7822 coal_cap->cmpl_aggr_dma_tmr_max =
7823 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
7824 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
7825 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
7826 coal_cap->int_lat_tmr_min_max =
7827 le16_to_cpu(resp->int_lat_tmr_min_max);
7828 coal_cap->int_lat_tmr_max_max =
7829 le16_to_cpu(resp->int_lat_tmr_max_max);
7830 coal_cap->num_cmpl_aggr_int_max =
7831 le16_to_cpu(resp->num_cmpl_aggr_int_max);
7832 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
7833 }
7834 hwrm_req_drop(bp, req);
7835 }
7836
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)7837 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
7838 {
7839 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7840
7841 return usec * 1000 / coal_cap->timer_units;
7842 }
7843
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)7844 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
7845 struct bnxt_coal *hw_coal,
7846 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7847 {
7848 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7849 u16 val, tmr, max, flags = hw_coal->flags;
7850 u32 cmpl_params = coal_cap->cmpl_params;
7851
7852 max = hw_coal->bufs_per_record * 128;
7853 if (hw_coal->budget)
7854 max = hw_coal->bufs_per_record * hw_coal->budget;
7855 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
7856
7857 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
7858 req->num_cmpl_aggr_int = cpu_to_le16(val);
7859
7860 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
7861 req->num_cmpl_dma_aggr = cpu_to_le16(val);
7862
7863 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
7864 coal_cap->num_cmpl_dma_aggr_during_int_max);
7865 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
7866
7867 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
7868 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
7869 req->int_lat_tmr_max = cpu_to_le16(tmr);
7870
7871 /* min timer set to 1/2 of interrupt timer */
7872 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
7873 val = tmr / 2;
7874 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
7875 req->int_lat_tmr_min = cpu_to_le16(val);
7876 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7877 }
7878
7879 /* buf timer set to 1/4 of interrupt timer */
7880 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
7881 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
7882
7883 if (cmpl_params &
7884 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
7885 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
7886 val = clamp_t(u16, tmr, 1,
7887 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
7888 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
7889 req->enables |=
7890 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
7891 }
7892
7893 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
7894 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
7895 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
7896 req->flags = cpu_to_le16(flags);
7897 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
7898 }
7899
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)7900 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
7901 struct bnxt_coal *hw_coal)
7902 {
7903 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
7904 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7905 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7906 u32 nq_params = coal_cap->nq_params;
7907 u16 tmr;
7908 int rc;
7909
7910 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
7911 return 0;
7912
7913 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7914 if (rc)
7915 return rc;
7916
7917 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
7918 req->flags =
7919 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
7920
7921 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
7922 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
7923 req->int_lat_tmr_min = cpu_to_le16(tmr);
7924 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7925 return hwrm_req_send(bp, req);
7926 }
7927
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)7928 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
7929 {
7930 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
7931 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7932 struct bnxt_coal coal;
7933 int rc;
7934
7935 /* Tick values in micro seconds.
7936 * 1 coal_buf x bufs_per_record = 1 completion record.
7937 */
7938 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
7939
7940 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
7941 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
7942
7943 if (!bnapi->rx_ring)
7944 return -ENODEV;
7945
7946 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7947 if (rc)
7948 return rc;
7949
7950 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
7951
7952 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
7953
7954 return hwrm_req_send(bp, req_rx);
7955 }
7956
7957 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)7958 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7959 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7960 {
7961 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
7962
7963 req->ring_id = cpu_to_le16(ring_id);
7964 return hwrm_req_send(bp, req);
7965 }
7966
7967 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)7968 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7969 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7970 {
7971 struct bnxt_tx_ring_info *txr;
7972 int i, rc;
7973
7974 bnxt_for_each_napi_tx(i, bnapi, txr) {
7975 u16 ring_id;
7976
7977 ring_id = bnxt_cp_ring_for_tx(bp, txr);
7978 req->ring_id = cpu_to_le16(ring_id);
7979 rc = hwrm_req_send(bp, req);
7980 if (rc)
7981 return rc;
7982 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7983 return 0;
7984 }
7985 return 0;
7986 }
7987
bnxt_hwrm_set_coal(struct bnxt * bp)7988 int bnxt_hwrm_set_coal(struct bnxt *bp)
7989 {
7990 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
7991 int i, rc;
7992
7993 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7994 if (rc)
7995 return rc;
7996
7997 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7998 if (rc) {
7999 hwrm_req_drop(bp, req_rx);
8000 return rc;
8001 }
8002
8003 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8004 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8005
8006 hwrm_req_hold(bp, req_rx);
8007 hwrm_req_hold(bp, req_tx);
8008 for (i = 0; i < bp->cp_nr_rings; i++) {
8009 struct bnxt_napi *bnapi = bp->bnapi[i];
8010 struct bnxt_coal *hw_coal;
8011
8012 if (!bnapi->rx_ring)
8013 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8014 else
8015 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8016 if (rc)
8017 break;
8018
8019 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8020 continue;
8021
8022 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8023 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8024 if (rc)
8025 break;
8026 }
8027 if (bnapi->rx_ring)
8028 hw_coal = &bp->rx_coal;
8029 else
8030 hw_coal = &bp->tx_coal;
8031 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8032 }
8033 hwrm_req_drop(bp, req_rx);
8034 hwrm_req_drop(bp, req_tx);
8035 return rc;
8036 }
8037
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8038 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8039 {
8040 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8041 struct hwrm_stat_ctx_free_input *req;
8042 int i;
8043
8044 if (!bp->bnapi)
8045 return;
8046
8047 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8048 return;
8049
8050 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8051 return;
8052 if (BNXT_FW_MAJ(bp) <= 20) {
8053 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8054 hwrm_req_drop(bp, req);
8055 return;
8056 }
8057 hwrm_req_hold(bp, req0);
8058 }
8059 hwrm_req_hold(bp, req);
8060 for (i = 0; i < bp->cp_nr_rings; i++) {
8061 struct bnxt_napi *bnapi = bp->bnapi[i];
8062 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8063
8064 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8065 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8066 if (req0) {
8067 req0->stat_ctx_id = req->stat_ctx_id;
8068 hwrm_req_send(bp, req0);
8069 }
8070 hwrm_req_send(bp, req);
8071
8072 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8073 }
8074 }
8075 hwrm_req_drop(bp, req);
8076 if (req0)
8077 hwrm_req_drop(bp, req0);
8078 }
8079
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8080 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8081 {
8082 struct hwrm_stat_ctx_alloc_output *resp;
8083 struct hwrm_stat_ctx_alloc_input *req;
8084 int rc, i;
8085
8086 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8087 return 0;
8088
8089 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8090 if (rc)
8091 return rc;
8092
8093 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8094 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8095
8096 resp = hwrm_req_hold(bp, req);
8097 for (i = 0; i < bp->cp_nr_rings; i++) {
8098 struct bnxt_napi *bnapi = bp->bnapi[i];
8099 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8100
8101 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8102
8103 rc = hwrm_req_send(bp, req);
8104 if (rc)
8105 break;
8106
8107 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8108
8109 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8110 }
8111 hwrm_req_drop(bp, req);
8112 return rc;
8113 }
8114
bnxt_hwrm_func_qcfg(struct bnxt * bp)8115 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8116 {
8117 struct hwrm_func_qcfg_output *resp;
8118 struct hwrm_func_qcfg_input *req;
8119 u16 flags;
8120 int rc;
8121
8122 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8123 if (rc)
8124 return rc;
8125
8126 req->fid = cpu_to_le16(0xffff);
8127 resp = hwrm_req_hold(bp, req);
8128 rc = hwrm_req_send(bp, req);
8129 if (rc)
8130 goto func_qcfg_exit;
8131
8132 #ifdef CONFIG_BNXT_SRIOV
8133 if (BNXT_VF(bp)) {
8134 struct bnxt_vf_info *vf = &bp->vf;
8135
8136 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8137 } else {
8138 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8139 }
8140 #endif
8141 flags = le16_to_cpu(resp->flags);
8142 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8143 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8144 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8145 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8146 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8147 }
8148 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8149 bp->flags |= BNXT_FLAG_MULTI_HOST;
8150
8151 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8152 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8153
8154 switch (resp->port_partition_type) {
8155 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8156 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8157 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8158 bp->port_partition_type = resp->port_partition_type;
8159 break;
8160 }
8161 if (bp->hwrm_spec_code < 0x10707 ||
8162 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8163 bp->br_mode = BRIDGE_MODE_VEB;
8164 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8165 bp->br_mode = BRIDGE_MODE_VEPA;
8166 else
8167 bp->br_mode = BRIDGE_MODE_UNDEF;
8168
8169 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8170 if (!bp->max_mtu)
8171 bp->max_mtu = BNXT_MAX_MTU;
8172
8173 if (bp->db_size)
8174 goto func_qcfg_exit;
8175
8176 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8177 if (BNXT_CHIP_P5(bp)) {
8178 if (BNXT_PF(bp))
8179 bp->db_offset = DB_PF_OFFSET_P5;
8180 else
8181 bp->db_offset = DB_VF_OFFSET_P5;
8182 }
8183 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8184 1024);
8185 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8186 bp->db_size <= bp->db_offset)
8187 bp->db_size = pci_resource_len(bp->pdev, 2);
8188
8189 func_qcfg_exit:
8190 hwrm_req_drop(bp, req);
8191 return rc;
8192 }
8193
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8194 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8195 u8 init_val, u8 init_offset,
8196 bool init_mask_set)
8197 {
8198 ctxm->init_value = init_val;
8199 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8200 if (init_mask_set)
8201 ctxm->init_offset = init_offset * 4;
8202 else
8203 ctxm->init_value = 0;
8204 }
8205
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8206 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8207 {
8208 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8209 u16 type;
8210
8211 for (type = 0; type < ctx_max; type++) {
8212 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8213 int n = 1;
8214
8215 if (!ctxm->max_entries)
8216 continue;
8217
8218 if (ctxm->instance_bmap)
8219 n = hweight32(ctxm->instance_bmap);
8220 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8221 if (!ctxm->pg_info)
8222 return -ENOMEM;
8223 }
8224 return 0;
8225 }
8226
8227 #define BNXT_CTX_INIT_VALID(flags) \
8228 (!!((flags) & \
8229 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8230
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8231 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8232 {
8233 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8234 struct hwrm_func_backing_store_qcaps_v2_input *req;
8235 struct bnxt_ctx_mem_info *ctx;
8236 u16 type;
8237 int rc;
8238
8239 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8240 if (rc)
8241 return rc;
8242
8243 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8244 if (!ctx)
8245 return -ENOMEM;
8246 bp->ctx = ctx;
8247
8248 resp = hwrm_req_hold(bp, req);
8249
8250 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8251 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8252 u8 init_val, init_off, i;
8253 __le32 *p;
8254 u32 flags;
8255
8256 req->type = cpu_to_le16(type);
8257 rc = hwrm_req_send(bp, req);
8258 if (rc)
8259 goto ctx_done;
8260 flags = le32_to_cpu(resp->flags);
8261 type = le16_to_cpu(resp->next_valid_type);
8262 if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
8263 continue;
8264
8265 ctxm->type = le16_to_cpu(resp->type);
8266 ctxm->entry_size = le16_to_cpu(resp->entry_size);
8267 ctxm->flags = flags;
8268 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8269 ctxm->entry_multiple = resp->entry_multiple;
8270 ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
8271 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8272 init_val = resp->ctx_init_value;
8273 init_off = resp->ctx_init_offset;
8274 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8275 BNXT_CTX_INIT_VALID(flags));
8276 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8277 BNXT_MAX_SPLIT_ENTRY);
8278 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8279 i++, p++)
8280 ctxm->split[i] = le32_to_cpu(*p);
8281 }
8282 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8283
8284 ctx_done:
8285 hwrm_req_drop(bp, req);
8286 return rc;
8287 }
8288
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8289 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8290 {
8291 struct hwrm_func_backing_store_qcaps_output *resp;
8292 struct hwrm_func_backing_store_qcaps_input *req;
8293 int rc;
8294
8295 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
8296 return 0;
8297
8298 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8299 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8300
8301 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8302 if (rc)
8303 return rc;
8304
8305 resp = hwrm_req_hold(bp, req);
8306 rc = hwrm_req_send_silent(bp, req);
8307 if (!rc) {
8308 struct bnxt_ctx_mem_type *ctxm;
8309 struct bnxt_ctx_mem_info *ctx;
8310 u8 init_val, init_idx = 0;
8311 u16 init_mask;
8312
8313 ctx = bp->ctx;
8314 if (!ctx) {
8315 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8316 if (!ctx) {
8317 rc = -ENOMEM;
8318 goto ctx_err;
8319 }
8320 bp->ctx = ctx;
8321 }
8322 init_val = resp->ctx_kind_initializer;
8323 init_mask = le16_to_cpu(resp->ctx_init_mask);
8324
8325 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8326 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8327 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8328 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8329 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8330 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8331 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8332 (init_mask & (1 << init_idx++)) != 0);
8333
8334 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8335 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8336 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8337 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8338 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8339 (init_mask & (1 << init_idx++)) != 0);
8340
8341 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8342 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8343 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8344 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8345 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8346 (init_mask & (1 << init_idx++)) != 0);
8347
8348 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8349 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8350 ctxm->max_entries = ctxm->vnic_entries +
8351 le16_to_cpu(resp->vnic_max_ring_table_entries);
8352 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8353 bnxt_init_ctx_initializer(ctxm, init_val,
8354 resp->vnic_init_offset,
8355 (init_mask & (1 << init_idx++)) != 0);
8356
8357 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8358 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8359 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8360 bnxt_init_ctx_initializer(ctxm, init_val,
8361 resp->stat_init_offset,
8362 (init_mask & (1 << init_idx++)) != 0);
8363
8364 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8365 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8366 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8367 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8368 ctxm->entry_multiple = resp->tqm_entries_multiple;
8369 if (!ctxm->entry_multiple)
8370 ctxm->entry_multiple = 1;
8371
8372 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8373
8374 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8375 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8376 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8377 ctxm->mrav_num_entries_units =
8378 le16_to_cpu(resp->mrav_num_entries_units);
8379 bnxt_init_ctx_initializer(ctxm, init_val,
8380 resp->mrav_init_offset,
8381 (init_mask & (1 << init_idx++)) != 0);
8382
8383 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8384 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8385 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8386
8387 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8388 if (!ctx->tqm_fp_rings_count)
8389 ctx->tqm_fp_rings_count = bp->max_q;
8390 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8391 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8392
8393 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8394 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8395 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8396
8397 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8398 } else {
8399 rc = 0;
8400 }
8401 ctx_err:
8402 hwrm_req_drop(bp, req);
8403 return rc;
8404 }
8405
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8406 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8407 __le64 *pg_dir)
8408 {
8409 if (!rmem->nr_pages)
8410 return;
8411
8412 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8413 if (rmem->depth >= 1) {
8414 if (rmem->depth == 2)
8415 *pg_attr |= 2;
8416 else
8417 *pg_attr |= 1;
8418 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8419 } else {
8420 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8421 }
8422 }
8423
8424 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8425 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8426 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8427 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8428 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8429 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8430
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8431 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8432 {
8433 struct hwrm_func_backing_store_cfg_input *req;
8434 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8435 struct bnxt_ctx_pg_info *ctx_pg;
8436 struct bnxt_ctx_mem_type *ctxm;
8437 void **__req = (void **)&req;
8438 u32 req_len = sizeof(*req);
8439 __le32 *num_entries;
8440 __le64 *pg_dir;
8441 u32 flags = 0;
8442 u8 *pg_attr;
8443 u32 ena;
8444 int rc;
8445 int i;
8446
8447 if (!ctx)
8448 return 0;
8449
8450 if (req_len > bp->hwrm_max_ext_req_len)
8451 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8452 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8453 if (rc)
8454 return rc;
8455
8456 req->enables = cpu_to_le32(enables);
8457 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8458 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8459 ctx_pg = ctxm->pg_info;
8460 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8461 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8462 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8463 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8464 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8465 &req->qpc_pg_size_qpc_lvl,
8466 &req->qpc_page_dir);
8467
8468 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8469 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8470 }
8471 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8472 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8473 ctx_pg = ctxm->pg_info;
8474 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8475 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8476 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8477 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8478 &req->srq_pg_size_srq_lvl,
8479 &req->srq_page_dir);
8480 }
8481 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8482 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8483 ctx_pg = ctxm->pg_info;
8484 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8485 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8486 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8487 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8488 &req->cq_pg_size_cq_lvl,
8489 &req->cq_page_dir);
8490 }
8491 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8492 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8493 ctx_pg = ctxm->pg_info;
8494 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8495 req->vnic_num_ring_table_entries =
8496 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8497 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8498 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8499 &req->vnic_pg_size_vnic_lvl,
8500 &req->vnic_page_dir);
8501 }
8502 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8503 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8504 ctx_pg = ctxm->pg_info;
8505 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8506 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8507 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8508 &req->stat_pg_size_stat_lvl,
8509 &req->stat_page_dir);
8510 }
8511 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8512 u32 units;
8513
8514 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8515 ctx_pg = ctxm->pg_info;
8516 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8517 units = ctxm->mrav_num_entries_units;
8518 if (units) {
8519 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8520 u32 entries;
8521
8522 num_mr = ctx_pg->entries - num_ah;
8523 entries = ((num_mr / units) << 16) | (num_ah / units);
8524 req->mrav_num_entries = cpu_to_le32(entries);
8525 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8526 }
8527 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8528 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8529 &req->mrav_pg_size_mrav_lvl,
8530 &req->mrav_page_dir);
8531 }
8532 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8533 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8534 ctx_pg = ctxm->pg_info;
8535 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8536 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8537 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8538 &req->tim_pg_size_tim_lvl,
8539 &req->tim_page_dir);
8540 }
8541 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8542 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8543 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8544 pg_dir = &req->tqm_sp_page_dir,
8545 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8546 ctx_pg = ctxm->pg_info;
8547 i < BNXT_MAX_TQM_RINGS;
8548 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8549 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8550 if (!(enables & ena))
8551 continue;
8552
8553 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8554 *num_entries = cpu_to_le32(ctx_pg->entries);
8555 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8556 }
8557 req->flags = cpu_to_le32(flags);
8558 return hwrm_req_send(bp, req);
8559 }
8560
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8561 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8562 struct bnxt_ctx_pg_info *ctx_pg)
8563 {
8564 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8565
8566 rmem->page_size = BNXT_PAGE_SIZE;
8567 rmem->pg_arr = ctx_pg->ctx_pg_arr;
8568 rmem->dma_arr = ctx_pg->ctx_dma_arr;
8569 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8570 if (rmem->depth >= 1)
8571 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8572 return bnxt_alloc_ring(bp, rmem);
8573 }
8574
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)8575 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8576 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8577 u8 depth, struct bnxt_ctx_mem_type *ctxm)
8578 {
8579 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8580 int rc;
8581
8582 if (!mem_size)
8583 return -EINVAL;
8584
8585 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8586 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8587 ctx_pg->nr_pages = 0;
8588 return -EINVAL;
8589 }
8590 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8591 int nr_tbls, i;
8592
8593 rmem->depth = 2;
8594 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8595 GFP_KERNEL);
8596 if (!ctx_pg->ctx_pg_tbl)
8597 return -ENOMEM;
8598 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8599 rmem->nr_pages = nr_tbls;
8600 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8601 if (rc)
8602 return rc;
8603 for (i = 0; i < nr_tbls; i++) {
8604 struct bnxt_ctx_pg_info *pg_tbl;
8605
8606 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8607 if (!pg_tbl)
8608 return -ENOMEM;
8609 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8610 rmem = &pg_tbl->ring_mem;
8611 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8612 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8613 rmem->depth = 1;
8614 rmem->nr_pages = MAX_CTX_PAGES;
8615 rmem->ctx_mem = ctxm;
8616 if (i == (nr_tbls - 1)) {
8617 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8618
8619 if (rem)
8620 rmem->nr_pages = rem;
8621 }
8622 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8623 if (rc)
8624 break;
8625 }
8626 } else {
8627 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8628 if (rmem->nr_pages > 1 || depth)
8629 rmem->depth = 1;
8630 rmem->ctx_mem = ctxm;
8631 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8632 }
8633 return rc;
8634 }
8635
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8636 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
8637 struct bnxt_ctx_pg_info *ctx_pg)
8638 {
8639 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8640
8641 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
8642 ctx_pg->ctx_pg_tbl) {
8643 int i, nr_tbls = rmem->nr_pages;
8644
8645 for (i = 0; i < nr_tbls; i++) {
8646 struct bnxt_ctx_pg_info *pg_tbl;
8647 struct bnxt_ring_mem_info *rmem2;
8648
8649 pg_tbl = ctx_pg->ctx_pg_tbl[i];
8650 if (!pg_tbl)
8651 continue;
8652 rmem2 = &pg_tbl->ring_mem;
8653 bnxt_free_ring(bp, rmem2);
8654 ctx_pg->ctx_pg_arr[i] = NULL;
8655 kfree(pg_tbl);
8656 ctx_pg->ctx_pg_tbl[i] = NULL;
8657 }
8658 kfree(ctx_pg->ctx_pg_tbl);
8659 ctx_pg->ctx_pg_tbl = NULL;
8660 }
8661 bnxt_free_ring(bp, rmem);
8662 ctx_pg->nr_pages = 0;
8663 }
8664
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)8665 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
8666 struct bnxt_ctx_mem_type *ctxm, u32 entries,
8667 u8 pg_lvl)
8668 {
8669 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8670 int i, rc = 0, n = 1;
8671 u32 mem_size;
8672
8673 if (!ctxm->entry_size || !ctx_pg)
8674 return -EINVAL;
8675 if (ctxm->instance_bmap)
8676 n = hweight32(ctxm->instance_bmap);
8677 if (ctxm->entry_multiple)
8678 entries = roundup(entries, ctxm->entry_multiple);
8679 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
8680 mem_size = entries * ctxm->entry_size;
8681 for (i = 0; i < n && !rc; i++) {
8682 ctx_pg[i].entries = entries;
8683 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8684 ctxm->init_value ? ctxm : NULL);
8685 }
8686 return rc;
8687 }
8688
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)8689 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
8690 struct bnxt_ctx_mem_type *ctxm,
8691 bool last)
8692 {
8693 struct hwrm_func_backing_store_cfg_v2_input *req;
8694 u32 instance_bmap = ctxm->instance_bmap;
8695 int i, j, rc = 0, n = 1;
8696 __le32 *p;
8697
8698 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
8699 return 0;
8700
8701 if (instance_bmap)
8702 n = hweight32(ctxm->instance_bmap);
8703 else
8704 instance_bmap = 1;
8705
8706 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8707 if (rc)
8708 return rc;
8709 hwrm_req_hold(bp, req);
8710 req->type = cpu_to_le16(ctxm->type);
8711 req->entry_size = cpu_to_le16(ctxm->entry_size);
8712 req->subtype_valid_cnt = ctxm->split_entry_cnt;
8713 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
8714 p[i] = cpu_to_le32(ctxm->split[i]);
8715 for (i = 0, j = 0; j < n && !rc; i++) {
8716 struct bnxt_ctx_pg_info *ctx_pg;
8717
8718 if (!(instance_bmap & (1 << i)))
8719 continue;
8720 req->instance = cpu_to_le16(i);
8721 ctx_pg = &ctxm->pg_info[j++];
8722 if (!ctx_pg->entries)
8723 continue;
8724 req->num_entries = cpu_to_le32(ctx_pg->entries);
8725 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8726 &req->page_size_pbl_level,
8727 &req->page_dir);
8728 if (last && j == n)
8729 req->flags =
8730 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
8731 rc = hwrm_req_send(bp, req);
8732 }
8733 hwrm_req_drop(bp, req);
8734 return rc;
8735 }
8736
bnxt_backing_store_cfg_v2(struct bnxt * bp,u32 ena)8737 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
8738 {
8739 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8740 struct bnxt_ctx_mem_type *ctxm;
8741 u16 last_type;
8742 int rc = 0;
8743 u16 type;
8744
8745 if (!ena)
8746 return 0;
8747 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
8748 last_type = BNXT_CTX_MAX - 1;
8749 else
8750 last_type = BNXT_CTX_L2_MAX - 1;
8751 ctx->ctx_arr[last_type].last = 1;
8752
8753 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
8754 ctxm = &ctx->ctx_arr[type];
8755
8756 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
8757 if (rc)
8758 return rc;
8759 }
8760 return 0;
8761 }
8762
bnxt_free_ctx_mem(struct bnxt * bp)8763 void bnxt_free_ctx_mem(struct bnxt *bp)
8764 {
8765 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8766 u16 type;
8767
8768 if (!ctx)
8769 return;
8770
8771 for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
8772 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8773 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8774 int i, n = 1;
8775
8776 if (!ctx_pg)
8777 continue;
8778 if (ctxm->instance_bmap)
8779 n = hweight32(ctxm->instance_bmap);
8780 for (i = 0; i < n; i++)
8781 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
8782
8783 kfree(ctx_pg);
8784 ctxm->pg_info = NULL;
8785 }
8786
8787 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
8788 kfree(ctx);
8789 bp->ctx = NULL;
8790 }
8791
bnxt_alloc_ctx_mem(struct bnxt * bp)8792 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8793 {
8794 struct bnxt_ctx_mem_type *ctxm;
8795 struct bnxt_ctx_mem_info *ctx;
8796 u32 l2_qps, qp1_qps, max_qps;
8797 u32 ena, entries_sp, entries;
8798 u32 srqs, max_srqs, min;
8799 u32 num_mr, num_ah;
8800 u32 extra_srqs = 0;
8801 u32 extra_qps = 0;
8802 u32 fast_qpmd_qps;
8803 u8 pg_lvl = 1;
8804 int i, rc;
8805
8806 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
8807 if (rc) {
8808 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
8809 rc);
8810 return rc;
8811 }
8812 ctx = bp->ctx;
8813 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
8814 return 0;
8815
8816 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8817 l2_qps = ctxm->qp_l2_entries;
8818 qp1_qps = ctxm->qp_qp1_entries;
8819 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
8820 max_qps = ctxm->max_entries;
8821 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8822 srqs = ctxm->srq_l2_entries;
8823 max_srqs = ctxm->max_entries;
8824 ena = 0;
8825 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
8826 pg_lvl = 2;
8827 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
8828 /* allocate extra qps if fw supports RoCE fast qp destroy feature */
8829 extra_qps += fast_qpmd_qps;
8830 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
8831 if (fast_qpmd_qps)
8832 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
8833 }
8834
8835 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8836 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
8837 pg_lvl);
8838 if (rc)
8839 return rc;
8840
8841 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8842 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
8843 if (rc)
8844 return rc;
8845
8846 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8847 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
8848 extra_qps * 2, pg_lvl);
8849 if (rc)
8850 return rc;
8851
8852 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8853 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8854 if (rc)
8855 return rc;
8856
8857 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8858 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8859 if (rc)
8860 return rc;
8861
8862 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
8863 goto skip_rdma;
8864
8865 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8866 /* 128K extra is needed to accommodate static AH context
8867 * allocation by f/w.
8868 */
8869 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
8870 num_ah = min_t(u32, num_mr, 1024 * 128);
8871 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
8872 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
8873 ctxm->mrav_av_entries = num_ah;
8874
8875 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
8876 if (rc)
8877 return rc;
8878 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
8879
8880 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8881 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
8882 if (rc)
8883 return rc;
8884 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
8885
8886 skip_rdma:
8887 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8888 min = ctxm->min_entries;
8889 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
8890 2 * (extra_qps + qp1_qps) + min;
8891 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
8892 if (rc)
8893 return rc;
8894
8895 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8896 entries = l2_qps + 2 * (extra_qps + qp1_qps);
8897 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
8898 if (rc)
8899 return rc;
8900 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
8901 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
8902 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
8903
8904 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8905 rc = bnxt_backing_store_cfg_v2(bp, ena);
8906 else
8907 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
8908 if (rc) {
8909 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
8910 rc);
8911 return rc;
8912 }
8913 ctx->flags |= BNXT_CTX_FLAG_INITED;
8914 return 0;
8915 }
8916
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)8917 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
8918 {
8919 struct hwrm_dbg_crashdump_medium_cfg_input *req;
8920 u16 page_attr;
8921 int rc;
8922
8923 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
8924 return 0;
8925
8926 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
8927 if (rc)
8928 return rc;
8929
8930 if (BNXT_PAGE_SIZE == 0x2000)
8931 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
8932 else if (BNXT_PAGE_SIZE == 0x10000)
8933 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
8934 else
8935 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
8936 req->pg_size_lvl = cpu_to_le16(page_attr |
8937 bp->fw_crash_mem->ring_mem.depth);
8938 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
8939 req->size = cpu_to_le32(bp->fw_crash_len);
8940 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
8941 return hwrm_req_send(bp, req);
8942 }
8943
bnxt_free_crash_dump_mem(struct bnxt * bp)8944 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
8945 {
8946 if (bp->fw_crash_mem) {
8947 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
8948 kfree(bp->fw_crash_mem);
8949 bp->fw_crash_mem = NULL;
8950 }
8951 }
8952
bnxt_alloc_crash_dump_mem(struct bnxt * bp)8953 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
8954 {
8955 u32 mem_size = 0;
8956 int rc;
8957
8958 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
8959 return 0;
8960
8961 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
8962 if (rc)
8963 return rc;
8964
8965 mem_size = round_up(mem_size, 4);
8966
8967 /* keep and use the existing pages */
8968 if (bp->fw_crash_mem &&
8969 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
8970 goto alloc_done;
8971
8972 if (bp->fw_crash_mem)
8973 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
8974 else
8975 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
8976 GFP_KERNEL);
8977 if (!bp->fw_crash_mem)
8978 return -ENOMEM;
8979
8980 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
8981 if (rc) {
8982 bnxt_free_crash_dump_mem(bp);
8983 return rc;
8984 }
8985
8986 alloc_done:
8987 bp->fw_crash_len = mem_size;
8988 return 0;
8989 }
8990
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)8991 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
8992 {
8993 struct hwrm_func_resource_qcaps_output *resp;
8994 struct hwrm_func_resource_qcaps_input *req;
8995 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8996 int rc;
8997
8998 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
8999 if (rc)
9000 return rc;
9001
9002 req->fid = cpu_to_le16(0xffff);
9003 resp = hwrm_req_hold(bp, req);
9004 rc = hwrm_req_send_silent(bp, req);
9005 if (rc)
9006 goto hwrm_func_resc_qcaps_exit;
9007
9008 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9009 if (!all)
9010 goto hwrm_func_resc_qcaps_exit;
9011
9012 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9013 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9014 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9015 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9016 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9017 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9018 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9019 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9020 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9021 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9022 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9023 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9024 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9025 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9026 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9027 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9028
9029 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9030 u16 max_msix = le16_to_cpu(resp->max_msix);
9031
9032 hw_resc->max_nqs = max_msix;
9033 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9034 }
9035
9036 if (BNXT_PF(bp)) {
9037 struct bnxt_pf_info *pf = &bp->pf;
9038
9039 pf->vf_resv_strategy =
9040 le16_to_cpu(resp->vf_reservation_strategy);
9041 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9042 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9043 }
9044 hwrm_func_resc_qcaps_exit:
9045 hwrm_req_drop(bp, req);
9046 return rc;
9047 }
9048
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9049 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9050 {
9051 struct hwrm_port_mac_ptp_qcfg_output *resp;
9052 struct hwrm_port_mac_ptp_qcfg_input *req;
9053 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9054 bool phc_cfg;
9055 u8 flags;
9056 int rc;
9057
9058 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9059 rc = -ENODEV;
9060 goto no_ptp;
9061 }
9062
9063 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9064 if (rc)
9065 goto no_ptp;
9066
9067 req->port_id = cpu_to_le16(bp->pf.port_id);
9068 resp = hwrm_req_hold(bp, req);
9069 rc = hwrm_req_send(bp, req);
9070 if (rc)
9071 goto exit;
9072
9073 flags = resp->flags;
9074 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9075 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9076 rc = -ENODEV;
9077 goto exit;
9078 }
9079 if (!ptp) {
9080 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
9081 if (!ptp) {
9082 rc = -ENOMEM;
9083 goto exit;
9084 }
9085 ptp->bp = bp;
9086 bp->ptp_cfg = ptp;
9087 }
9088
9089 if (flags &
9090 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9091 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9092 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9093 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9094 } else if (BNXT_CHIP_P5(bp)) {
9095 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9096 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9097 } else {
9098 rc = -ENODEV;
9099 goto exit;
9100 }
9101 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9102 rc = bnxt_ptp_init(bp, phc_cfg);
9103 if (rc)
9104 netdev_warn(bp->dev, "PTP initialization failed.\n");
9105 exit:
9106 hwrm_req_drop(bp, req);
9107 if (!rc)
9108 return 0;
9109
9110 no_ptp:
9111 bnxt_ptp_clear(bp);
9112 kfree(ptp);
9113 bp->ptp_cfg = NULL;
9114 return rc;
9115 }
9116
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9117 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9118 {
9119 struct hwrm_func_qcaps_output *resp;
9120 struct hwrm_func_qcaps_input *req;
9121 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9122 u32 flags, flags_ext, flags_ext2;
9123 int rc;
9124
9125 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9126 if (rc)
9127 return rc;
9128
9129 req->fid = cpu_to_le16(0xffff);
9130 resp = hwrm_req_hold(bp, req);
9131 rc = hwrm_req_send(bp, req);
9132 if (rc)
9133 goto hwrm_func_qcaps_exit;
9134
9135 flags = le32_to_cpu(resp->flags);
9136 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9137 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9138 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9139 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9140 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9141 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9142 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9143 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9144 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9145 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9146 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9147 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9148 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9149 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9150 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9151 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9152 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9153 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9154
9155 flags_ext = le32_to_cpu(resp->flags_ext);
9156 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9157 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9158 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9159 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9160 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9161 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9162 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9163 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9164 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9165 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9166 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9167 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9168 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9169 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9170 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9171 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9172
9173 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9174 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9175 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9176 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9177 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9178 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9179 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9180
9181 bp->tx_push_thresh = 0;
9182 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9183 BNXT_FW_MAJ(bp) > 217)
9184 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9185
9186 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9187 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9188 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9189 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9190 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9191 if (!hw_resc->max_hw_ring_grps)
9192 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9193 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9194 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9195 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9196
9197 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9198 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9199 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9200 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9201 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9202 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9203
9204 if (BNXT_PF(bp)) {
9205 struct bnxt_pf_info *pf = &bp->pf;
9206
9207 pf->fw_fid = le16_to_cpu(resp->fid);
9208 pf->port_id = le16_to_cpu(resp->port_id);
9209 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9210 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9211 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9212 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9213 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9214 bp->flags |= BNXT_FLAG_WOL_CAP;
9215 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9216 bp->fw_cap |= BNXT_FW_CAP_PTP;
9217 } else {
9218 bnxt_ptp_clear(bp);
9219 kfree(bp->ptp_cfg);
9220 bp->ptp_cfg = NULL;
9221 }
9222 } else {
9223 #ifdef CONFIG_BNXT_SRIOV
9224 struct bnxt_vf_info *vf = &bp->vf;
9225
9226 vf->fw_fid = le16_to_cpu(resp->fid);
9227 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9228 #endif
9229 }
9230 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9231
9232 hwrm_func_qcaps_exit:
9233 hwrm_req_drop(bp, req);
9234 return rc;
9235 }
9236
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9237 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9238 {
9239 struct hwrm_dbg_qcaps_output *resp;
9240 struct hwrm_dbg_qcaps_input *req;
9241 int rc;
9242
9243 bp->fw_dbg_cap = 0;
9244 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9245 return;
9246
9247 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9248 if (rc)
9249 return;
9250
9251 req->fid = cpu_to_le16(0xffff);
9252 resp = hwrm_req_hold(bp, req);
9253 rc = hwrm_req_send(bp, req);
9254 if (rc)
9255 goto hwrm_dbg_qcaps_exit;
9256
9257 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9258
9259 hwrm_dbg_qcaps_exit:
9260 hwrm_req_drop(bp, req);
9261 }
9262
9263 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9264
bnxt_hwrm_func_qcaps(struct bnxt * bp)9265 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9266 {
9267 int rc;
9268
9269 rc = __bnxt_hwrm_func_qcaps(bp);
9270 if (rc)
9271 return rc;
9272
9273 bnxt_hwrm_dbg_qcaps(bp);
9274
9275 rc = bnxt_hwrm_queue_qportcfg(bp);
9276 if (rc) {
9277 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9278 return rc;
9279 }
9280 if (bp->hwrm_spec_code >= 0x10803) {
9281 rc = bnxt_alloc_ctx_mem(bp);
9282 if (rc)
9283 return rc;
9284 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9285 if (!rc)
9286 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9287 }
9288 return 0;
9289 }
9290
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9291 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9292 {
9293 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9294 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9295 u32 flags;
9296 int rc;
9297
9298 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9299 return 0;
9300
9301 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9302 if (rc)
9303 return rc;
9304
9305 resp = hwrm_req_hold(bp, req);
9306 rc = hwrm_req_send(bp, req);
9307 if (rc)
9308 goto hwrm_cfa_adv_qcaps_exit;
9309
9310 flags = le32_to_cpu(resp->flags);
9311 if (flags &
9312 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9313 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9314
9315 if (flags &
9316 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9317 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9318
9319 if (flags &
9320 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9321 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9322
9323 hwrm_cfa_adv_qcaps_exit:
9324 hwrm_req_drop(bp, req);
9325 return rc;
9326 }
9327
__bnxt_alloc_fw_health(struct bnxt * bp)9328 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9329 {
9330 if (bp->fw_health)
9331 return 0;
9332
9333 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9334 if (!bp->fw_health)
9335 return -ENOMEM;
9336
9337 mutex_init(&bp->fw_health->lock);
9338 return 0;
9339 }
9340
bnxt_alloc_fw_health(struct bnxt * bp)9341 static int bnxt_alloc_fw_health(struct bnxt *bp)
9342 {
9343 int rc;
9344
9345 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9346 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9347 return 0;
9348
9349 rc = __bnxt_alloc_fw_health(bp);
9350 if (rc) {
9351 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9352 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9353 return rc;
9354 }
9355
9356 return 0;
9357 }
9358
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)9359 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9360 {
9361 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9362 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9363 BNXT_FW_HEALTH_WIN_MAP_OFF);
9364 }
9365
bnxt_inv_fw_health_reg(struct bnxt * bp)9366 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9367 {
9368 struct bnxt_fw_health *fw_health = bp->fw_health;
9369 u32 reg_type;
9370
9371 if (!fw_health)
9372 return;
9373
9374 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9375 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9376 fw_health->status_reliable = false;
9377
9378 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9379 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9380 fw_health->resets_reliable = false;
9381 }
9382
bnxt_try_map_fw_health_reg(struct bnxt * bp)9383 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9384 {
9385 void __iomem *hs;
9386 u32 status_loc;
9387 u32 reg_type;
9388 u32 sig;
9389
9390 if (bp->fw_health)
9391 bp->fw_health->status_reliable = false;
9392
9393 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9394 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9395
9396 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9397 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9398 if (!bp->chip_num) {
9399 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9400 bp->chip_num = readl(bp->bar0 +
9401 BNXT_FW_HEALTH_WIN_BASE +
9402 BNXT_GRC_REG_CHIP_NUM);
9403 }
9404 if (!BNXT_CHIP_P5_PLUS(bp))
9405 return;
9406
9407 status_loc = BNXT_GRC_REG_STATUS_P5 |
9408 BNXT_FW_HEALTH_REG_TYPE_BAR0;
9409 } else {
9410 status_loc = readl(hs + offsetof(struct hcomm_status,
9411 fw_status_loc));
9412 }
9413
9414 if (__bnxt_alloc_fw_health(bp)) {
9415 netdev_warn(bp->dev, "no memory for firmware status checks\n");
9416 return;
9417 }
9418
9419 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9420 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9421 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9422 __bnxt_map_fw_health_reg(bp, status_loc);
9423 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9424 BNXT_FW_HEALTH_WIN_OFF(status_loc);
9425 }
9426
9427 bp->fw_health->status_reliable = true;
9428 }
9429
bnxt_map_fw_health_regs(struct bnxt * bp)9430 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9431 {
9432 struct bnxt_fw_health *fw_health = bp->fw_health;
9433 u32 reg_base = 0xffffffff;
9434 int i;
9435
9436 bp->fw_health->status_reliable = false;
9437 bp->fw_health->resets_reliable = false;
9438 /* Only pre-map the monitoring GRC registers using window 3 */
9439 for (i = 0; i < 4; i++) {
9440 u32 reg = fw_health->regs[i];
9441
9442 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9443 continue;
9444 if (reg_base == 0xffffffff)
9445 reg_base = reg & BNXT_GRC_BASE_MASK;
9446 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9447 return -ERANGE;
9448 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9449 }
9450 bp->fw_health->status_reliable = true;
9451 bp->fw_health->resets_reliable = true;
9452 if (reg_base == 0xffffffff)
9453 return 0;
9454
9455 __bnxt_map_fw_health_reg(bp, reg_base);
9456 return 0;
9457 }
9458
bnxt_remap_fw_health_regs(struct bnxt * bp)9459 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9460 {
9461 if (!bp->fw_health)
9462 return;
9463
9464 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9465 bp->fw_health->status_reliable = true;
9466 bp->fw_health->resets_reliable = true;
9467 } else {
9468 bnxt_try_map_fw_health_reg(bp);
9469 }
9470 }
9471
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)9472 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9473 {
9474 struct bnxt_fw_health *fw_health = bp->fw_health;
9475 struct hwrm_error_recovery_qcfg_output *resp;
9476 struct hwrm_error_recovery_qcfg_input *req;
9477 int rc, i;
9478
9479 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9480 return 0;
9481
9482 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9483 if (rc)
9484 return rc;
9485
9486 resp = hwrm_req_hold(bp, req);
9487 rc = hwrm_req_send(bp, req);
9488 if (rc)
9489 goto err_recovery_out;
9490 fw_health->flags = le32_to_cpu(resp->flags);
9491 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
9492 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
9493 rc = -EINVAL;
9494 goto err_recovery_out;
9495 }
9496 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
9497 fw_health->master_func_wait_dsecs =
9498 le32_to_cpu(resp->master_func_wait_period);
9499 fw_health->normal_func_wait_dsecs =
9500 le32_to_cpu(resp->normal_func_wait_period);
9501 fw_health->post_reset_wait_dsecs =
9502 le32_to_cpu(resp->master_func_wait_period_after_reset);
9503 fw_health->post_reset_max_wait_dsecs =
9504 le32_to_cpu(resp->max_bailout_time_after_reset);
9505 fw_health->regs[BNXT_FW_HEALTH_REG] =
9506 le32_to_cpu(resp->fw_health_status_reg);
9507 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
9508 le32_to_cpu(resp->fw_heartbeat_reg);
9509 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
9510 le32_to_cpu(resp->fw_reset_cnt_reg);
9511 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
9512 le32_to_cpu(resp->reset_inprogress_reg);
9513 fw_health->fw_reset_inprog_reg_mask =
9514 le32_to_cpu(resp->reset_inprogress_reg_mask);
9515 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
9516 if (fw_health->fw_reset_seq_cnt >= 16) {
9517 rc = -EINVAL;
9518 goto err_recovery_out;
9519 }
9520 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
9521 fw_health->fw_reset_seq_regs[i] =
9522 le32_to_cpu(resp->reset_reg[i]);
9523 fw_health->fw_reset_seq_vals[i] =
9524 le32_to_cpu(resp->reset_reg_val[i]);
9525 fw_health->fw_reset_seq_delay_msec[i] =
9526 resp->delay_after_reset[i];
9527 }
9528 err_recovery_out:
9529 hwrm_req_drop(bp, req);
9530 if (!rc)
9531 rc = bnxt_map_fw_health_regs(bp);
9532 if (rc)
9533 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9534 return rc;
9535 }
9536
bnxt_hwrm_func_reset(struct bnxt * bp)9537 static int bnxt_hwrm_func_reset(struct bnxt *bp)
9538 {
9539 struct hwrm_func_reset_input *req;
9540 int rc;
9541
9542 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9543 if (rc)
9544 return rc;
9545
9546 req->enables = 0;
9547 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
9548 return hwrm_req_send(bp, req);
9549 }
9550
bnxt_nvm_cfg_ver_get(struct bnxt * bp)9551 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
9552 {
9553 struct hwrm_nvm_get_dev_info_output nvm_info;
9554
9555 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
9556 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
9557 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
9558 nvm_info.nvm_cfg_ver_upd);
9559 }
9560
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)9561 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
9562 {
9563 struct hwrm_queue_qportcfg_output *resp;
9564 struct hwrm_queue_qportcfg_input *req;
9565 u8 i, j, *qptr;
9566 bool no_rdma;
9567 int rc = 0;
9568
9569 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9570 if (rc)
9571 return rc;
9572
9573 resp = hwrm_req_hold(bp, req);
9574 rc = hwrm_req_send(bp, req);
9575 if (rc)
9576 goto qportcfg_exit;
9577
9578 if (!resp->max_configurable_queues) {
9579 rc = -EINVAL;
9580 goto qportcfg_exit;
9581 }
9582 bp->max_tc = resp->max_configurable_queues;
9583 bp->max_lltc = resp->max_configurable_lossless_queues;
9584 if (bp->max_tc > BNXT_MAX_QUEUE)
9585 bp->max_tc = BNXT_MAX_QUEUE;
9586
9587 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
9588 qptr = &resp->queue_id0;
9589 for (i = 0, j = 0; i < bp->max_tc; i++) {
9590 bp->q_info[j].queue_id = *qptr;
9591 bp->q_ids[i] = *qptr++;
9592 bp->q_info[j].queue_profile = *qptr++;
9593 bp->tc_to_qidx[j] = j;
9594 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
9595 (no_rdma && BNXT_PF(bp)))
9596 j++;
9597 }
9598 bp->max_q = bp->max_tc;
9599 bp->max_tc = max_t(u8, j, 1);
9600
9601 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
9602 bp->max_tc = 1;
9603
9604 if (bp->max_lltc > bp->max_tc)
9605 bp->max_lltc = bp->max_tc;
9606
9607 qportcfg_exit:
9608 hwrm_req_drop(bp, req);
9609 return rc;
9610 }
9611
bnxt_hwrm_poll(struct bnxt * bp)9612 static int bnxt_hwrm_poll(struct bnxt *bp)
9613 {
9614 struct hwrm_ver_get_input *req;
9615 int rc;
9616
9617 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9618 if (rc)
9619 return rc;
9620
9621 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9622 req->hwrm_intf_min = HWRM_VERSION_MINOR;
9623 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9624
9625 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
9626 rc = hwrm_req_send(bp, req);
9627 return rc;
9628 }
9629
bnxt_hwrm_ver_get(struct bnxt * bp)9630 static int bnxt_hwrm_ver_get(struct bnxt *bp)
9631 {
9632 struct hwrm_ver_get_output *resp;
9633 struct hwrm_ver_get_input *req;
9634 u16 fw_maj, fw_min, fw_bld, fw_rsv;
9635 u32 dev_caps_cfg, hwrm_ver;
9636 int rc, len;
9637
9638 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9639 if (rc)
9640 return rc;
9641
9642 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9643 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
9644 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9645 req->hwrm_intf_min = HWRM_VERSION_MINOR;
9646 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9647
9648 resp = hwrm_req_hold(bp, req);
9649 rc = hwrm_req_send(bp, req);
9650 if (rc)
9651 goto hwrm_ver_get_exit;
9652
9653 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
9654
9655 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
9656 resp->hwrm_intf_min_8b << 8 |
9657 resp->hwrm_intf_upd_8b;
9658 if (resp->hwrm_intf_maj_8b < 1) {
9659 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
9660 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9661 resp->hwrm_intf_upd_8b);
9662 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
9663 }
9664
9665 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
9666 HWRM_VERSION_UPDATE;
9667
9668 if (bp->hwrm_spec_code > hwrm_ver)
9669 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9670 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
9671 HWRM_VERSION_UPDATE);
9672 else
9673 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9674 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9675 resp->hwrm_intf_upd_8b);
9676
9677 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
9678 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
9679 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
9680 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
9681 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
9682 len = FW_VER_STR_LEN;
9683 } else {
9684 fw_maj = resp->hwrm_fw_maj_8b;
9685 fw_min = resp->hwrm_fw_min_8b;
9686 fw_bld = resp->hwrm_fw_bld_8b;
9687 fw_rsv = resp->hwrm_fw_rsvd_8b;
9688 len = BC_HWRM_STR_LEN;
9689 }
9690 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
9691 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
9692 fw_rsv);
9693
9694 if (strlen(resp->active_pkg_name)) {
9695 int fw_ver_len = strlen(bp->fw_ver_str);
9696
9697 snprintf(bp->fw_ver_str + fw_ver_len,
9698 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
9699 resp->active_pkg_name);
9700 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
9701 }
9702
9703 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
9704 if (!bp->hwrm_cmd_timeout)
9705 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
9706 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
9707 if (!bp->hwrm_cmd_max_timeout)
9708 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
9709 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
9710 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
9711 bp->hwrm_cmd_max_timeout / 1000);
9712
9713 if (resp->hwrm_intf_maj_8b >= 1) {
9714 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
9715 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
9716 }
9717 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
9718 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
9719
9720 bp->chip_num = le16_to_cpu(resp->chip_num);
9721 bp->chip_rev = resp->chip_rev;
9722 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
9723 !resp->chip_metal)
9724 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
9725
9726 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
9727 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
9728 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
9729 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
9730
9731 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
9732 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
9733
9734 if (dev_caps_cfg &
9735 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
9736 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
9737
9738 if (dev_caps_cfg &
9739 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
9740 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
9741
9742 if (dev_caps_cfg &
9743 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
9744 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
9745
9746 hwrm_ver_get_exit:
9747 hwrm_req_drop(bp, req);
9748 return rc;
9749 }
9750
bnxt_hwrm_fw_set_time(struct bnxt * bp)9751 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
9752 {
9753 struct hwrm_fw_set_time_input *req;
9754 struct tm tm;
9755 time64_t now = ktime_get_real_seconds();
9756 int rc;
9757
9758 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
9759 bp->hwrm_spec_code < 0x10400)
9760 return -EOPNOTSUPP;
9761
9762 time64_to_tm(now, 0, &tm);
9763 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
9764 if (rc)
9765 return rc;
9766
9767 req->year = cpu_to_le16(1900 + tm.tm_year);
9768 req->month = 1 + tm.tm_mon;
9769 req->day = tm.tm_mday;
9770 req->hour = tm.tm_hour;
9771 req->minute = tm.tm_min;
9772 req->second = tm.tm_sec;
9773 return hwrm_req_send(bp, req);
9774 }
9775
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)9776 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
9777 {
9778 u64 sw_tmp;
9779
9780 hw &= mask;
9781 sw_tmp = (*sw & ~mask) | hw;
9782 if (hw < (*sw & mask))
9783 sw_tmp += mask + 1;
9784 WRITE_ONCE(*sw, sw_tmp);
9785 }
9786
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)9787 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
9788 int count, bool ignore_zero)
9789 {
9790 int i;
9791
9792 for (i = 0; i < count; i++) {
9793 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
9794
9795 if (ignore_zero && !hw)
9796 continue;
9797
9798 if (masks[i] == -1ULL)
9799 sw_stats[i] = hw;
9800 else
9801 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
9802 }
9803 }
9804
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)9805 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
9806 {
9807 if (!stats->hw_stats)
9808 return;
9809
9810 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9811 stats->hw_masks, stats->len / 8, false);
9812 }
9813
bnxt_accumulate_all_stats(struct bnxt * bp)9814 static void bnxt_accumulate_all_stats(struct bnxt *bp)
9815 {
9816 struct bnxt_stats_mem *ring0_stats;
9817 bool ignore_zero = false;
9818 int i;
9819
9820 /* Chip bug. Counter intermittently becomes 0. */
9821 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9822 ignore_zero = true;
9823
9824 for (i = 0; i < bp->cp_nr_rings; i++) {
9825 struct bnxt_napi *bnapi = bp->bnapi[i];
9826 struct bnxt_cp_ring_info *cpr;
9827 struct bnxt_stats_mem *stats;
9828
9829 cpr = &bnapi->cp_ring;
9830 stats = &cpr->stats;
9831 if (!i)
9832 ring0_stats = stats;
9833 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9834 ring0_stats->hw_masks,
9835 ring0_stats->len / 8, ignore_zero);
9836 }
9837 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9838 struct bnxt_stats_mem *stats = &bp->port_stats;
9839 __le64 *hw_stats = stats->hw_stats;
9840 u64 *sw_stats = stats->sw_stats;
9841 u64 *masks = stats->hw_masks;
9842 int cnt;
9843
9844 cnt = sizeof(struct rx_port_stats) / 8;
9845 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9846
9847 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9848 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9849 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9850 cnt = sizeof(struct tx_port_stats) / 8;
9851 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9852 }
9853 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
9854 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
9855 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
9856 }
9857 }
9858
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)9859 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
9860 {
9861 struct hwrm_port_qstats_input *req;
9862 struct bnxt_pf_info *pf = &bp->pf;
9863 int rc;
9864
9865 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
9866 return 0;
9867
9868 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9869 return -EOPNOTSUPP;
9870
9871 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
9872 if (rc)
9873 return rc;
9874
9875 req->flags = flags;
9876 req->port_id = cpu_to_le16(pf->port_id);
9877 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
9878 BNXT_TX_PORT_STATS_BYTE_OFFSET);
9879 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
9880 return hwrm_req_send(bp, req);
9881 }
9882
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)9883 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
9884 {
9885 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
9886 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
9887 struct hwrm_port_qstats_ext_output *resp_qs;
9888 struct hwrm_port_qstats_ext_input *req_qs;
9889 struct bnxt_pf_info *pf = &bp->pf;
9890 u32 tx_stat_size;
9891 int rc;
9892
9893 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
9894 return 0;
9895
9896 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9897 return -EOPNOTSUPP;
9898
9899 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
9900 if (rc)
9901 return rc;
9902
9903 req_qs->flags = flags;
9904 req_qs->port_id = cpu_to_le16(pf->port_id);
9905 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
9906 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
9907 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
9908 sizeof(struct tx_port_stats_ext) : 0;
9909 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
9910 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
9911 resp_qs = hwrm_req_hold(bp, req_qs);
9912 rc = hwrm_req_send(bp, req_qs);
9913 if (!rc) {
9914 bp->fw_rx_stats_ext_size =
9915 le16_to_cpu(resp_qs->rx_stat_size) / 8;
9916 if (BNXT_FW_MAJ(bp) < 220 &&
9917 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
9918 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
9919
9920 bp->fw_tx_stats_ext_size = tx_stat_size ?
9921 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
9922 } else {
9923 bp->fw_rx_stats_ext_size = 0;
9924 bp->fw_tx_stats_ext_size = 0;
9925 }
9926 hwrm_req_drop(bp, req_qs);
9927
9928 if (flags)
9929 return rc;
9930
9931 if (bp->fw_tx_stats_ext_size <=
9932 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
9933 bp->pri2cos_valid = 0;
9934 return rc;
9935 }
9936
9937 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
9938 if (rc)
9939 return rc;
9940
9941 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
9942
9943 resp_qc = hwrm_req_hold(bp, req_qc);
9944 rc = hwrm_req_send(bp, req_qc);
9945 if (!rc) {
9946 u8 *pri2cos;
9947 int i, j;
9948
9949 pri2cos = &resp_qc->pri0_cos_queue_id;
9950 for (i = 0; i < 8; i++) {
9951 u8 queue_id = pri2cos[i];
9952 u8 queue_idx;
9953
9954 /* Per port queue IDs start from 0, 10, 20, etc */
9955 queue_idx = queue_id % 10;
9956 if (queue_idx > BNXT_MAX_QUEUE) {
9957 bp->pri2cos_valid = false;
9958 hwrm_req_drop(bp, req_qc);
9959 return rc;
9960 }
9961 for (j = 0; j < bp->max_q; j++) {
9962 if (bp->q_ids[j] == queue_id)
9963 bp->pri2cos_idx[i] = queue_idx;
9964 }
9965 }
9966 bp->pri2cos_valid = true;
9967 }
9968 hwrm_req_drop(bp, req_qc);
9969
9970 return rc;
9971 }
9972
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)9973 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
9974 {
9975 bnxt_hwrm_tunnel_dst_port_free(bp,
9976 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9977 bnxt_hwrm_tunnel_dst_port_free(bp,
9978 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9979 }
9980
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)9981 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
9982 {
9983 int rc, i;
9984 u32 tpa_flags = 0;
9985
9986 if (set_tpa)
9987 tpa_flags = bp->flags & BNXT_FLAG_TPA;
9988 else if (BNXT_NO_FW_ACCESS(bp))
9989 return 0;
9990 for (i = 0; i < bp->nr_vnics; i++) {
9991 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
9992 if (rc) {
9993 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
9994 i, rc);
9995 return rc;
9996 }
9997 }
9998 return 0;
9999 }
10000
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10001 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10002 {
10003 int i;
10004
10005 for (i = 0; i < bp->nr_vnics; i++)
10006 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10007 }
10008
bnxt_clear_vnic(struct bnxt * bp)10009 static void bnxt_clear_vnic(struct bnxt *bp)
10010 {
10011 if (!bp->vnic_info)
10012 return;
10013
10014 bnxt_hwrm_clear_vnic_filter(bp);
10015 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10016 /* clear all RSS setting before free vnic ctx */
10017 bnxt_hwrm_clear_vnic_rss(bp);
10018 bnxt_hwrm_vnic_ctx_free(bp);
10019 }
10020 /* before free the vnic, undo the vnic tpa settings */
10021 if (bp->flags & BNXT_FLAG_TPA)
10022 bnxt_set_tpa(bp, false);
10023 bnxt_hwrm_vnic_free(bp);
10024 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10025 bnxt_hwrm_vnic_ctx_free(bp);
10026 }
10027
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10028 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10029 bool irq_re_init)
10030 {
10031 bnxt_clear_vnic(bp);
10032 bnxt_hwrm_ring_free(bp, close_path);
10033 bnxt_hwrm_ring_grp_free(bp);
10034 if (irq_re_init) {
10035 bnxt_hwrm_stat_ctx_free(bp);
10036 bnxt_hwrm_free_tunnel_ports(bp);
10037 }
10038 }
10039
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10040 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10041 {
10042 struct hwrm_func_cfg_input *req;
10043 u8 evb_mode;
10044 int rc;
10045
10046 if (br_mode == BRIDGE_MODE_VEB)
10047 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10048 else if (br_mode == BRIDGE_MODE_VEPA)
10049 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10050 else
10051 return -EINVAL;
10052
10053 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10054 if (rc)
10055 return rc;
10056
10057 req->fid = cpu_to_le16(0xffff);
10058 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10059 req->evb_mode = evb_mode;
10060 return hwrm_req_send(bp, req);
10061 }
10062
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10063 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10064 {
10065 struct hwrm_func_cfg_input *req;
10066 int rc;
10067
10068 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10069 return 0;
10070
10071 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10072 if (rc)
10073 return rc;
10074
10075 req->fid = cpu_to_le16(0xffff);
10076 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10077 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10078 if (size == 128)
10079 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10080
10081 return hwrm_req_send(bp, req);
10082 }
10083
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10084 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10085 {
10086 int rc;
10087
10088 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10089 goto skip_rss_ctx;
10090
10091 /* allocate context for vnic */
10092 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10093 if (rc) {
10094 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10095 vnic->vnic_id, rc);
10096 goto vnic_setup_err;
10097 }
10098 bp->rsscos_nr_ctxs++;
10099
10100 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10101 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10102 if (rc) {
10103 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10104 vnic->vnic_id, rc);
10105 goto vnic_setup_err;
10106 }
10107 bp->rsscos_nr_ctxs++;
10108 }
10109
10110 skip_rss_ctx:
10111 /* configure default vnic, ring grp */
10112 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10113 if (rc) {
10114 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10115 vnic->vnic_id, rc);
10116 goto vnic_setup_err;
10117 }
10118
10119 /* Enable RSS hashing on vnic */
10120 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10121 if (rc) {
10122 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10123 vnic->vnic_id, rc);
10124 goto vnic_setup_err;
10125 }
10126
10127 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10128 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10129 if (rc) {
10130 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10131 vnic->vnic_id, rc);
10132 }
10133 }
10134
10135 vnic_setup_err:
10136 return rc;
10137 }
10138
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10139 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10140 u8 valid)
10141 {
10142 struct hwrm_vnic_update_input *req;
10143 int rc;
10144
10145 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10146 if (rc)
10147 return rc;
10148
10149 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10150
10151 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10152 req->mru = cpu_to_le16(vnic->mru);
10153
10154 req->enables = cpu_to_le32(valid);
10155
10156 return hwrm_req_send(bp, req);
10157 }
10158
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10159 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10160 {
10161 int rc;
10162
10163 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10164 if (rc) {
10165 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10166 vnic->vnic_id, rc);
10167 return rc;
10168 }
10169 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10170 if (rc)
10171 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10172 vnic->vnic_id, rc);
10173 return rc;
10174 }
10175
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10176 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10177 {
10178 int rc, i, nr_ctxs;
10179
10180 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10181 for (i = 0; i < nr_ctxs; i++) {
10182 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10183 if (rc) {
10184 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10185 vnic->vnic_id, i, rc);
10186 break;
10187 }
10188 bp->rsscos_nr_ctxs++;
10189 }
10190 if (i < nr_ctxs)
10191 return -ENOMEM;
10192
10193 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10194 if (rc)
10195 return rc;
10196
10197 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10198 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10199 if (rc) {
10200 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10201 vnic->vnic_id, rc);
10202 }
10203 }
10204 return rc;
10205 }
10206
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10207 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10208 {
10209 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10210 return __bnxt_setup_vnic_p5(bp, vnic);
10211 else
10212 return __bnxt_setup_vnic(bp, vnic);
10213 }
10214
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10215 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10216 struct bnxt_vnic_info *vnic,
10217 u16 start_rx_ring_idx, int rx_rings)
10218 {
10219 int rc;
10220
10221 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10222 if (rc) {
10223 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10224 vnic->vnic_id, rc);
10225 return rc;
10226 }
10227 return bnxt_setup_vnic(bp, vnic);
10228 }
10229
bnxt_alloc_rfs_vnics(struct bnxt * bp)10230 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10231 {
10232 struct bnxt_vnic_info *vnic;
10233 int i, rc = 0;
10234
10235 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10236 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10237 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10238 }
10239
10240 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10241 return 0;
10242
10243 for (i = 0; i < bp->rx_nr_rings; i++) {
10244 u16 vnic_id = i + 1;
10245 u16 ring_id = i;
10246
10247 if (vnic_id >= bp->nr_vnics)
10248 break;
10249
10250 vnic = &bp->vnic_info[vnic_id];
10251 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10252 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10253 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10254 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10255 break;
10256 }
10257 return rc;
10258 }
10259
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10260 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10261 bool all)
10262 {
10263 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10264 struct bnxt_filter_base *usr_fltr, *tmp;
10265 struct bnxt_ntuple_filter *ntp_fltr;
10266 int i;
10267
10268 if (netif_running(bp->dev)) {
10269 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10270 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10271 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10272 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10273 }
10274 }
10275 if (!all)
10276 return;
10277
10278 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10279 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10280 usr_fltr->fw_vnic_id == rss_ctx->index) {
10281 ntp_fltr = container_of(usr_fltr,
10282 struct bnxt_ntuple_filter,
10283 base);
10284 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10285 bnxt_del_ntp_filter(bp, ntp_fltr);
10286 bnxt_del_one_usr_fltr(bp, usr_fltr);
10287 }
10288 }
10289
10290 if (vnic->rss_table)
10291 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10292 vnic->rss_table,
10293 vnic->rss_table_dma_addr);
10294 bp->num_rss_ctx--;
10295 }
10296
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)10297 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10298 {
10299 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10300 struct ethtool_rxfh_context *ctx;
10301 unsigned long context;
10302
10303 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10304 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10305 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10306
10307 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10308 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10309 __bnxt_setup_vnic_p5(bp, vnic)) {
10310 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10311 rss_ctx->index);
10312 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10313 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10314 }
10315 }
10316 }
10317
bnxt_clear_rss_ctxs(struct bnxt * bp)10318 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10319 {
10320 struct ethtool_rxfh_context *ctx;
10321 unsigned long context;
10322
10323 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10324 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10325
10326 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
10327 }
10328 }
10329
10330 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)10331 static bool bnxt_promisc_ok(struct bnxt *bp)
10332 {
10333 #ifdef CONFIG_BNXT_SRIOV
10334 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10335 return false;
10336 #endif
10337 return true;
10338 }
10339
bnxt_setup_nitroa0_vnic(struct bnxt * bp)10340 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10341 {
10342 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10343 unsigned int rc = 0;
10344
10345 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10346 if (rc) {
10347 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10348 rc);
10349 return rc;
10350 }
10351
10352 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10353 if (rc) {
10354 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10355 rc);
10356 return rc;
10357 }
10358 return rc;
10359 }
10360
10361 static int bnxt_cfg_rx_mode(struct bnxt *);
10362 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10363
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)10364 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10365 {
10366 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10367 int rc = 0;
10368 unsigned int rx_nr_rings = bp->rx_nr_rings;
10369
10370 if (irq_re_init) {
10371 rc = bnxt_hwrm_stat_ctx_alloc(bp);
10372 if (rc) {
10373 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10374 rc);
10375 goto err_out;
10376 }
10377 }
10378
10379 rc = bnxt_hwrm_ring_alloc(bp);
10380 if (rc) {
10381 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
10382 goto err_out;
10383 }
10384
10385 rc = bnxt_hwrm_ring_grp_alloc(bp);
10386 if (rc) {
10387 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
10388 goto err_out;
10389 }
10390
10391 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10392 rx_nr_rings--;
10393
10394 /* default vnic 0 */
10395 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
10396 if (rc) {
10397 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
10398 goto err_out;
10399 }
10400
10401 if (BNXT_VF(bp))
10402 bnxt_hwrm_func_qcfg(bp);
10403
10404 rc = bnxt_setup_vnic(bp, vnic);
10405 if (rc)
10406 goto err_out;
10407 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
10408 bnxt_hwrm_update_rss_hash_cfg(bp);
10409
10410 if (bp->flags & BNXT_FLAG_RFS) {
10411 rc = bnxt_alloc_rfs_vnics(bp);
10412 if (rc)
10413 goto err_out;
10414 }
10415
10416 if (bp->flags & BNXT_FLAG_TPA) {
10417 rc = bnxt_set_tpa(bp, true);
10418 if (rc)
10419 goto err_out;
10420 }
10421
10422 if (BNXT_VF(bp))
10423 bnxt_update_vf_mac(bp);
10424
10425 /* Filter for default vnic 0 */
10426 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10427 if (rc) {
10428 if (BNXT_VF(bp) && rc == -ENODEV)
10429 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
10430 else
10431 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10432 goto err_out;
10433 }
10434 vnic->uc_filter_count = 1;
10435
10436 vnic->rx_mask = 0;
10437 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
10438 goto skip_rx_mask;
10439
10440 if (bp->dev->flags & IFF_BROADCAST)
10441 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10442
10443 if (bp->dev->flags & IFF_PROMISC)
10444 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10445
10446 if (bp->dev->flags & IFF_ALLMULTI) {
10447 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10448 vnic->mc_list_count = 0;
10449 } else if (bp->dev->flags & IFF_MULTICAST) {
10450 u32 mask = 0;
10451
10452 bnxt_mc_list_updated(bp, &mask);
10453 vnic->rx_mask |= mask;
10454 }
10455
10456 rc = bnxt_cfg_rx_mode(bp);
10457 if (rc)
10458 goto err_out;
10459
10460 skip_rx_mask:
10461 rc = bnxt_hwrm_set_coal(bp);
10462 if (rc)
10463 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
10464 rc);
10465
10466 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10467 rc = bnxt_setup_nitroa0_vnic(bp);
10468 if (rc)
10469 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10470 rc);
10471 }
10472
10473 if (BNXT_VF(bp)) {
10474 bnxt_hwrm_func_qcfg(bp);
10475 netdev_update_features(bp->dev);
10476 }
10477
10478 return 0;
10479
10480 err_out:
10481 bnxt_hwrm_resource_free(bp, 0, true);
10482
10483 return rc;
10484 }
10485
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)10486 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
10487 {
10488 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
10489 return 0;
10490 }
10491
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)10492 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
10493 {
10494 bnxt_init_cp_rings(bp);
10495 bnxt_init_rx_rings(bp);
10496 bnxt_init_tx_rings(bp);
10497 bnxt_init_ring_grps(bp, irq_re_init);
10498 bnxt_init_vnics(bp);
10499
10500 return bnxt_init_chip(bp, irq_re_init);
10501 }
10502
bnxt_set_real_num_queues(struct bnxt * bp)10503 static int bnxt_set_real_num_queues(struct bnxt *bp)
10504 {
10505 int rc;
10506 struct net_device *dev = bp->dev;
10507
10508 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
10509 bp->tx_nr_rings_xdp);
10510 if (rc)
10511 return rc;
10512
10513 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
10514 if (rc)
10515 return rc;
10516
10517 #ifdef CONFIG_RFS_ACCEL
10518 if (bp->flags & BNXT_FLAG_RFS)
10519 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
10520 #endif
10521
10522 return rc;
10523 }
10524
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)10525 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10526 bool shared)
10527 {
10528 int _rx = *rx, _tx = *tx;
10529
10530 if (shared) {
10531 *rx = min_t(int, _rx, max);
10532 *tx = min_t(int, _tx, max);
10533 } else {
10534 if (max < 2)
10535 return -ENOMEM;
10536
10537 while (_rx + _tx > max) {
10538 if (_rx > _tx && _rx > 1)
10539 _rx--;
10540 else if (_tx > 1)
10541 _tx--;
10542 }
10543 *rx = _rx;
10544 *tx = _tx;
10545 }
10546 return 0;
10547 }
10548
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)10549 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
10550 {
10551 return (tx - tx_xdp) / tx_sets + tx_xdp;
10552 }
10553
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)10554 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
10555 {
10556 int tcs = bp->num_tc;
10557
10558 if (!tcs)
10559 tcs = 1;
10560 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
10561 }
10562
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)10563 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
10564 {
10565 int tcs = bp->num_tc;
10566
10567 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
10568 bp->tx_nr_rings_xdp;
10569 }
10570
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)10571 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10572 bool sh)
10573 {
10574 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
10575
10576 if (tx_cp != *tx) {
10577 int tx_saved = tx_cp, rc;
10578
10579 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
10580 if (rc)
10581 return rc;
10582 if (tx_cp != tx_saved)
10583 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
10584 return 0;
10585 }
10586 return __bnxt_trim_rings(bp, rx, tx, max, sh);
10587 }
10588
bnxt_setup_msix(struct bnxt * bp)10589 static void bnxt_setup_msix(struct bnxt *bp)
10590 {
10591 const int len = sizeof(bp->irq_tbl[0].name);
10592 struct net_device *dev = bp->dev;
10593 int tcs, i;
10594
10595 tcs = bp->num_tc;
10596 if (tcs) {
10597 int i, off, count;
10598
10599 for (i = 0; i < tcs; i++) {
10600 count = bp->tx_nr_rings_per_tc;
10601 off = BNXT_TC_TO_RING_BASE(bp, i);
10602 netdev_set_tc_queue(dev, i, count, off);
10603 }
10604 }
10605
10606 for (i = 0; i < bp->cp_nr_rings; i++) {
10607 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10608 char *attr;
10609
10610 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10611 attr = "TxRx";
10612 else if (i < bp->rx_nr_rings)
10613 attr = "rx";
10614 else
10615 attr = "tx";
10616
10617 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
10618 attr, i);
10619 bp->irq_tbl[map_idx].handler = bnxt_msix;
10620 }
10621 }
10622
10623 static int bnxt_init_int_mode(struct bnxt *bp);
10624
bnxt_change_msix(struct bnxt * bp,int total)10625 static int bnxt_change_msix(struct bnxt *bp, int total)
10626 {
10627 struct msi_map map;
10628 int i;
10629
10630 /* add MSIX to the end if needed */
10631 for (i = bp->total_irqs; i < total; i++) {
10632 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
10633 if (map.index < 0)
10634 return bp->total_irqs;
10635 bp->irq_tbl[i].vector = map.virq;
10636 bp->total_irqs++;
10637 }
10638
10639 /* trim MSIX from the end if needed */
10640 for (i = bp->total_irqs; i > total; i--) {
10641 map.index = i - 1;
10642 map.virq = bp->irq_tbl[i - 1].vector;
10643 pci_msix_free_irq(bp->pdev, map);
10644 bp->total_irqs--;
10645 }
10646 return bp->total_irqs;
10647 }
10648
bnxt_setup_int_mode(struct bnxt * bp)10649 static int bnxt_setup_int_mode(struct bnxt *bp)
10650 {
10651 int rc;
10652
10653 if (!bp->irq_tbl) {
10654 rc = bnxt_init_int_mode(bp);
10655 if (rc || !bp->irq_tbl)
10656 return rc ?: -ENODEV;
10657 }
10658
10659 bnxt_setup_msix(bp);
10660
10661 rc = bnxt_set_real_num_queues(bp);
10662 return rc;
10663 }
10664
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)10665 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
10666 {
10667 return bp->hw_resc.max_rsscos_ctxs;
10668 }
10669
bnxt_get_max_func_vnics(struct bnxt * bp)10670 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
10671 {
10672 return bp->hw_resc.max_vnics;
10673 }
10674
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)10675 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
10676 {
10677 return bp->hw_resc.max_stat_ctxs;
10678 }
10679
bnxt_get_max_func_cp_rings(struct bnxt * bp)10680 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
10681 {
10682 return bp->hw_resc.max_cp_rings;
10683 }
10684
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)10685 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
10686 {
10687 unsigned int cp = bp->hw_resc.max_cp_rings;
10688
10689 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10690 cp -= bnxt_get_ulp_msix_num(bp);
10691
10692 return cp;
10693 }
10694
bnxt_get_max_func_irqs(struct bnxt * bp)10695 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
10696 {
10697 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10698
10699 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10700 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
10701
10702 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
10703 }
10704
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)10705 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
10706 {
10707 bp->hw_resc.max_irqs = max_irqs;
10708 }
10709
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)10710 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
10711 {
10712 unsigned int cp;
10713
10714 cp = bnxt_get_max_func_cp_rings_for_en(bp);
10715 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10716 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
10717 else
10718 return cp - bp->cp_nr_rings;
10719 }
10720
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)10721 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
10722 {
10723 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
10724 }
10725
bnxt_get_avail_msix(struct bnxt * bp,int num)10726 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
10727 {
10728 int max_irq = bnxt_get_max_func_irqs(bp);
10729 int total_req = bp->cp_nr_rings + num;
10730
10731 if (max_irq < total_req) {
10732 num = max_irq - bp->cp_nr_rings;
10733 if (num <= 0)
10734 return 0;
10735 }
10736 return num;
10737 }
10738
bnxt_get_num_msix(struct bnxt * bp)10739 static int bnxt_get_num_msix(struct bnxt *bp)
10740 {
10741 if (!BNXT_NEW_RM(bp))
10742 return bnxt_get_max_func_irqs(bp);
10743
10744 return bnxt_nq_rings_in_use(bp);
10745 }
10746
bnxt_init_int_mode(struct bnxt * bp)10747 static int bnxt_init_int_mode(struct bnxt *bp)
10748 {
10749 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
10750
10751 total_vecs = bnxt_get_num_msix(bp);
10752 max = bnxt_get_max_func_irqs(bp);
10753 if (total_vecs > max)
10754 total_vecs = max;
10755
10756 if (!total_vecs)
10757 return 0;
10758
10759 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
10760 min = 2;
10761
10762 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
10763 PCI_IRQ_MSIX);
10764 ulp_msix = bnxt_get_ulp_msix_num(bp);
10765 if (total_vecs < 0 || total_vecs < ulp_msix) {
10766 rc = -ENODEV;
10767 goto msix_setup_exit;
10768 }
10769
10770 tbl_size = total_vecs;
10771 if (pci_msix_can_alloc_dyn(bp->pdev))
10772 tbl_size = max;
10773 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
10774 if (bp->irq_tbl) {
10775 for (i = 0; i < total_vecs; i++)
10776 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
10777
10778 bp->total_irqs = total_vecs;
10779 /* Trim rings based upon num of vectors allocated */
10780 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
10781 total_vecs - ulp_msix, min == 1);
10782 if (rc)
10783 goto msix_setup_exit;
10784
10785 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
10786 bp->cp_nr_rings = (min == 1) ?
10787 max_t(int, tx_cp, bp->rx_nr_rings) :
10788 tx_cp + bp->rx_nr_rings;
10789
10790 } else {
10791 rc = -ENOMEM;
10792 goto msix_setup_exit;
10793 }
10794 return 0;
10795
10796 msix_setup_exit:
10797 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
10798 kfree(bp->irq_tbl);
10799 bp->irq_tbl = NULL;
10800 pci_free_irq_vectors(bp->pdev);
10801 return rc;
10802 }
10803
bnxt_clear_int_mode(struct bnxt * bp)10804 static void bnxt_clear_int_mode(struct bnxt *bp)
10805 {
10806 pci_free_irq_vectors(bp->pdev);
10807
10808 kfree(bp->irq_tbl);
10809 bp->irq_tbl = NULL;
10810 }
10811
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)10812 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
10813 {
10814 bool irq_cleared = false;
10815 bool irq_change = false;
10816 int tcs = bp->num_tc;
10817 int irqs_required;
10818 int rc;
10819
10820 if (!bnxt_need_reserve_rings(bp))
10821 return 0;
10822
10823 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
10824 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
10825
10826 if (ulp_msix > bp->ulp_num_msix_want)
10827 ulp_msix = bp->ulp_num_msix_want;
10828 irqs_required = ulp_msix + bp->cp_nr_rings;
10829 } else {
10830 irqs_required = bnxt_get_num_msix(bp);
10831 }
10832
10833 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
10834 irq_change = true;
10835 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
10836 bnxt_ulp_irq_stop(bp);
10837 bnxt_clear_int_mode(bp);
10838 irq_cleared = true;
10839 }
10840 }
10841 rc = __bnxt_reserve_rings(bp);
10842 if (irq_cleared) {
10843 if (!rc)
10844 rc = bnxt_init_int_mode(bp);
10845 bnxt_ulp_irq_restart(bp, rc);
10846 } else if (irq_change && !rc) {
10847 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
10848 rc = -ENOSPC;
10849 }
10850 if (rc) {
10851 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
10852 return rc;
10853 }
10854 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
10855 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
10856 netdev_err(bp->dev, "tx ring reservation failure\n");
10857 netdev_reset_tc(bp->dev);
10858 bp->num_tc = 0;
10859 if (bp->tx_nr_rings_xdp)
10860 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
10861 else
10862 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10863 return -ENOMEM;
10864 }
10865 return 0;
10866 }
10867
bnxt_free_irq(struct bnxt * bp)10868 static void bnxt_free_irq(struct bnxt *bp)
10869 {
10870 struct bnxt_irq *irq;
10871 int i;
10872
10873 #ifdef CONFIG_RFS_ACCEL
10874 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
10875 bp->dev->rx_cpu_rmap = NULL;
10876 #endif
10877 if (!bp->irq_tbl || !bp->bnapi)
10878 return;
10879
10880 for (i = 0; i < bp->cp_nr_rings; i++) {
10881 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10882
10883 irq = &bp->irq_tbl[map_idx];
10884 if (irq->requested) {
10885 if (irq->have_cpumask) {
10886 irq_set_affinity_hint(irq->vector, NULL);
10887 free_cpumask_var(irq->cpu_mask);
10888 irq->have_cpumask = 0;
10889 }
10890 free_irq(irq->vector, bp->bnapi[i]);
10891 }
10892
10893 irq->requested = 0;
10894 }
10895 }
10896
bnxt_request_irq(struct bnxt * bp)10897 static int bnxt_request_irq(struct bnxt *bp)
10898 {
10899 int i, j, rc = 0;
10900 unsigned long flags = 0;
10901 #ifdef CONFIG_RFS_ACCEL
10902 struct cpu_rmap *rmap;
10903 #endif
10904
10905 rc = bnxt_setup_int_mode(bp);
10906 if (rc) {
10907 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
10908 rc);
10909 return rc;
10910 }
10911 #ifdef CONFIG_RFS_ACCEL
10912 rmap = bp->dev->rx_cpu_rmap;
10913 #endif
10914 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
10915 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10916 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
10917
10918 #ifdef CONFIG_RFS_ACCEL
10919 if (rmap && bp->bnapi[i]->rx_ring) {
10920 rc = irq_cpu_rmap_add(rmap, irq->vector);
10921 if (rc)
10922 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
10923 j);
10924 j++;
10925 }
10926 #endif
10927 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
10928 bp->bnapi[i]);
10929 if (rc)
10930 break;
10931
10932 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
10933 irq->requested = 1;
10934
10935 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
10936 int numa_node = dev_to_node(&bp->pdev->dev);
10937
10938 irq->have_cpumask = 1;
10939 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
10940 irq->cpu_mask);
10941 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
10942 if (rc) {
10943 netdev_warn(bp->dev,
10944 "Set affinity failed, IRQ = %d\n",
10945 irq->vector);
10946 break;
10947 }
10948 }
10949 }
10950 return rc;
10951 }
10952
bnxt_del_napi(struct bnxt * bp)10953 static void bnxt_del_napi(struct bnxt *bp)
10954 {
10955 int i;
10956
10957 if (!bp->bnapi)
10958 return;
10959
10960 for (i = 0; i < bp->rx_nr_rings; i++)
10961 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
10962 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
10963 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
10964
10965 for (i = 0; i < bp->cp_nr_rings; i++) {
10966 struct bnxt_napi *bnapi = bp->bnapi[i];
10967
10968 __netif_napi_del(&bnapi->napi);
10969 }
10970 /* We called __netif_napi_del(), we need
10971 * to respect an RCU grace period before freeing napi structures.
10972 */
10973 synchronize_net();
10974 }
10975
bnxt_init_napi(struct bnxt * bp)10976 static void bnxt_init_napi(struct bnxt *bp)
10977 {
10978 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
10979 unsigned int cp_nr_rings = bp->cp_nr_rings;
10980 struct bnxt_napi *bnapi;
10981 int i;
10982
10983 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10984 poll_fn = bnxt_poll_p5;
10985 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10986 cp_nr_rings--;
10987 for (i = 0; i < cp_nr_rings; i++) {
10988 bnapi = bp->bnapi[i];
10989 netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
10990 }
10991 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10992 bnapi = bp->bnapi[cp_nr_rings];
10993 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
10994 }
10995 }
10996
bnxt_disable_napi(struct bnxt * bp)10997 static void bnxt_disable_napi(struct bnxt *bp)
10998 {
10999 int i;
11000
11001 if (!bp->bnapi ||
11002 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11003 return;
11004
11005 for (i = 0; i < bp->cp_nr_rings; i++) {
11006 struct bnxt_napi *bnapi = bp->bnapi[i];
11007 struct bnxt_cp_ring_info *cpr;
11008
11009 cpr = &bnapi->cp_ring;
11010 if (bnapi->tx_fault)
11011 cpr->sw_stats->tx.tx_resets++;
11012 if (bnapi->in_reset)
11013 cpr->sw_stats->rx.rx_resets++;
11014 napi_disable(&bnapi->napi);
11015 if (bnapi->rx_ring)
11016 cancel_work_sync(&cpr->dim.work);
11017 }
11018 }
11019
bnxt_enable_napi(struct bnxt * bp)11020 static void bnxt_enable_napi(struct bnxt *bp)
11021 {
11022 int i;
11023
11024 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11025 for (i = 0; i < bp->cp_nr_rings; i++) {
11026 struct bnxt_napi *bnapi = bp->bnapi[i];
11027 struct bnxt_cp_ring_info *cpr;
11028
11029 bnapi->tx_fault = 0;
11030
11031 cpr = &bnapi->cp_ring;
11032 bnapi->in_reset = false;
11033
11034 if (bnapi->rx_ring) {
11035 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11036 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11037 }
11038 napi_enable(&bnapi->napi);
11039 }
11040 }
11041
bnxt_tx_disable(struct bnxt * bp)11042 void bnxt_tx_disable(struct bnxt *bp)
11043 {
11044 int i;
11045 struct bnxt_tx_ring_info *txr;
11046
11047 if (bp->tx_ring) {
11048 for (i = 0; i < bp->tx_nr_rings; i++) {
11049 txr = &bp->tx_ring[i];
11050 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11051 }
11052 }
11053 /* Make sure napi polls see @dev_state change */
11054 synchronize_net();
11055 /* Drop carrier first to prevent TX timeout */
11056 netif_carrier_off(bp->dev);
11057 /* Stop all TX queues */
11058 netif_tx_disable(bp->dev);
11059 }
11060
bnxt_tx_enable(struct bnxt * bp)11061 void bnxt_tx_enable(struct bnxt *bp)
11062 {
11063 int i;
11064 struct bnxt_tx_ring_info *txr;
11065
11066 for (i = 0; i < bp->tx_nr_rings; i++) {
11067 txr = &bp->tx_ring[i];
11068 WRITE_ONCE(txr->dev_state, 0);
11069 }
11070 /* Make sure napi polls see @dev_state change */
11071 synchronize_net();
11072 netif_tx_wake_all_queues(bp->dev);
11073 if (BNXT_LINK_IS_UP(bp))
11074 netif_carrier_on(bp->dev);
11075 }
11076
bnxt_report_fec(struct bnxt_link_info * link_info)11077 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11078 {
11079 u8 active_fec = link_info->active_fec_sig_mode &
11080 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11081
11082 switch (active_fec) {
11083 default:
11084 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11085 return "None";
11086 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11087 return "Clause 74 BaseR";
11088 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11089 return "Clause 91 RS(528,514)";
11090 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11091 return "Clause 91 RS544_1XN";
11092 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11093 return "Clause 91 RS(544,514)";
11094 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11095 return "Clause 91 RS272_1XN";
11096 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11097 return "Clause 91 RS(272,257)";
11098 }
11099 }
11100
bnxt_report_link(struct bnxt * bp)11101 void bnxt_report_link(struct bnxt *bp)
11102 {
11103 if (BNXT_LINK_IS_UP(bp)) {
11104 const char *signal = "";
11105 const char *flow_ctrl;
11106 const char *duplex;
11107 u32 speed;
11108 u16 fec;
11109
11110 netif_carrier_on(bp->dev);
11111 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11112 if (speed == SPEED_UNKNOWN) {
11113 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11114 return;
11115 }
11116 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11117 duplex = "full";
11118 else
11119 duplex = "half";
11120 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11121 flow_ctrl = "ON - receive & transmit";
11122 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11123 flow_ctrl = "ON - transmit";
11124 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11125 flow_ctrl = "ON - receive";
11126 else
11127 flow_ctrl = "none";
11128 if (bp->link_info.phy_qcfg_resp.option_flags &
11129 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11130 u8 sig_mode = bp->link_info.active_fec_sig_mode &
11131 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
11132 switch (sig_mode) {
11133 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
11134 signal = "(NRZ) ";
11135 break;
11136 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
11137 signal = "(PAM4 56Gbps) ";
11138 break;
11139 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
11140 signal = "(PAM4 112Gbps) ";
11141 break;
11142 default:
11143 break;
11144 }
11145 }
11146 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
11147 speed, signal, duplex, flow_ctrl);
11148 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
11149 netdev_info(bp->dev, "EEE is %s\n",
11150 bp->eee.eee_active ? "active" :
11151 "not active");
11152 fec = bp->link_info.fec_cfg;
11153 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
11154 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
11155 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
11156 bnxt_report_fec(&bp->link_info));
11157 } else {
11158 netif_carrier_off(bp->dev);
11159 netdev_err(bp->dev, "NIC Link is Down\n");
11160 }
11161 }
11162
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)11163 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
11164 {
11165 if (!resp->supported_speeds_auto_mode &&
11166 !resp->supported_speeds_force_mode &&
11167 !resp->supported_pam4_speeds_auto_mode &&
11168 !resp->supported_pam4_speeds_force_mode &&
11169 !resp->supported_speeds2_auto_mode &&
11170 !resp->supported_speeds2_force_mode)
11171 return true;
11172 return false;
11173 }
11174
bnxt_hwrm_phy_qcaps(struct bnxt * bp)11175 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
11176 {
11177 struct bnxt_link_info *link_info = &bp->link_info;
11178 struct hwrm_port_phy_qcaps_output *resp;
11179 struct hwrm_port_phy_qcaps_input *req;
11180 int rc = 0;
11181
11182 if (bp->hwrm_spec_code < 0x10201)
11183 return 0;
11184
11185 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
11186 if (rc)
11187 return rc;
11188
11189 resp = hwrm_req_hold(bp, req);
11190 rc = hwrm_req_send(bp, req);
11191 if (rc)
11192 goto hwrm_phy_qcaps_exit;
11193
11194 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11195 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11196 struct ethtool_keee *eee = &bp->eee;
11197 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11198
11199 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11200 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11201 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11202 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11203 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11204 }
11205
11206 if (bp->hwrm_spec_code >= 0x10a01) {
11207 if (bnxt_phy_qcaps_no_speed(resp)) {
11208 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
11209 netdev_warn(bp->dev, "Ethernet link disabled\n");
11210 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
11211 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
11212 netdev_info(bp->dev, "Ethernet link enabled\n");
11213 /* Phy re-enabled, reprobe the speeds */
11214 link_info->support_auto_speeds = 0;
11215 link_info->support_pam4_auto_speeds = 0;
11216 link_info->support_auto_speeds2 = 0;
11217 }
11218 }
11219 if (resp->supported_speeds_auto_mode)
11220 link_info->support_auto_speeds =
11221 le16_to_cpu(resp->supported_speeds_auto_mode);
11222 if (resp->supported_pam4_speeds_auto_mode)
11223 link_info->support_pam4_auto_speeds =
11224 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
11225 if (resp->supported_speeds2_auto_mode)
11226 link_info->support_auto_speeds2 =
11227 le16_to_cpu(resp->supported_speeds2_auto_mode);
11228
11229 bp->port_count = resp->port_cnt;
11230
11231 hwrm_phy_qcaps_exit:
11232 hwrm_req_drop(bp, req);
11233 return rc;
11234 }
11235
bnxt_support_dropped(u16 advertising,u16 supported)11236 static bool bnxt_support_dropped(u16 advertising, u16 supported)
11237 {
11238 u16 diff = advertising ^ supported;
11239
11240 return ((supported | diff) != supported);
11241 }
11242
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)11243 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
11244 {
11245 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
11246
11247 /* Check if any advertised speeds are no longer supported. The caller
11248 * holds the link_lock mutex, so we can modify link_info settings.
11249 */
11250 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11251 if (bnxt_support_dropped(link_info->advertising,
11252 link_info->support_auto_speeds2)) {
11253 link_info->advertising = link_info->support_auto_speeds2;
11254 return true;
11255 }
11256 return false;
11257 }
11258 if (bnxt_support_dropped(link_info->advertising,
11259 link_info->support_auto_speeds)) {
11260 link_info->advertising = link_info->support_auto_speeds;
11261 return true;
11262 }
11263 if (bnxt_support_dropped(link_info->advertising_pam4,
11264 link_info->support_pam4_auto_speeds)) {
11265 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
11266 return true;
11267 }
11268 return false;
11269 }
11270
bnxt_update_link(struct bnxt * bp,bool chng_link_state)11271 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
11272 {
11273 struct bnxt_link_info *link_info = &bp->link_info;
11274 struct hwrm_port_phy_qcfg_output *resp;
11275 struct hwrm_port_phy_qcfg_input *req;
11276 u8 link_state = link_info->link_state;
11277 bool support_changed;
11278 int rc;
11279
11280 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
11281 if (rc)
11282 return rc;
11283
11284 resp = hwrm_req_hold(bp, req);
11285 rc = hwrm_req_send(bp, req);
11286 if (rc) {
11287 hwrm_req_drop(bp, req);
11288 if (BNXT_VF(bp) && rc == -ENODEV) {
11289 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
11290 rc = 0;
11291 }
11292 return rc;
11293 }
11294
11295 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
11296 link_info->phy_link_status = resp->link;
11297 link_info->duplex = resp->duplex_cfg;
11298 if (bp->hwrm_spec_code >= 0x10800)
11299 link_info->duplex = resp->duplex_state;
11300 link_info->pause = resp->pause;
11301 link_info->auto_mode = resp->auto_mode;
11302 link_info->auto_pause_setting = resp->auto_pause;
11303 link_info->lp_pause = resp->link_partner_adv_pause;
11304 link_info->force_pause_setting = resp->force_pause;
11305 link_info->duplex_setting = resp->duplex_cfg;
11306 if (link_info->phy_link_status == BNXT_LINK_LINK) {
11307 link_info->link_speed = le16_to_cpu(resp->link_speed);
11308 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
11309 link_info->active_lanes = resp->active_lanes;
11310 } else {
11311 link_info->link_speed = 0;
11312 link_info->active_lanes = 0;
11313 }
11314 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
11315 link_info->force_pam4_link_speed =
11316 le16_to_cpu(resp->force_pam4_link_speed);
11317 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
11318 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
11319 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
11320 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
11321 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
11322 link_info->auto_pam4_link_speeds =
11323 le16_to_cpu(resp->auto_pam4_link_speed_mask);
11324 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
11325 link_info->lp_auto_link_speeds =
11326 le16_to_cpu(resp->link_partner_adv_speeds);
11327 link_info->lp_auto_pam4_link_speeds =
11328 resp->link_partner_pam4_adv_speeds;
11329 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
11330 link_info->phy_ver[0] = resp->phy_maj;
11331 link_info->phy_ver[1] = resp->phy_min;
11332 link_info->phy_ver[2] = resp->phy_bld;
11333 link_info->media_type = resp->media_type;
11334 link_info->phy_type = resp->phy_type;
11335 link_info->transceiver = resp->xcvr_pkg_type;
11336 link_info->phy_addr = resp->eee_config_phy_addr &
11337 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
11338 link_info->module_status = resp->module_status;
11339
11340 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
11341 struct ethtool_keee *eee = &bp->eee;
11342 u16 fw_speeds;
11343
11344 eee->eee_active = 0;
11345 if (resp->eee_config_phy_addr &
11346 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
11347 eee->eee_active = 1;
11348 fw_speeds = le16_to_cpu(
11349 resp->link_partner_adv_eee_link_speed_mask);
11350 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
11351 }
11352
11353 /* Pull initial EEE config */
11354 if (!chng_link_state) {
11355 if (resp->eee_config_phy_addr &
11356 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
11357 eee->eee_enabled = 1;
11358
11359 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
11360 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
11361
11362 if (resp->eee_config_phy_addr &
11363 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
11364 __le32 tmr;
11365
11366 eee->tx_lpi_enabled = 1;
11367 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
11368 eee->tx_lpi_timer = le32_to_cpu(tmr) &
11369 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
11370 }
11371 }
11372 }
11373
11374 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
11375 if (bp->hwrm_spec_code >= 0x10504) {
11376 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
11377 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
11378 }
11379 /* TODO: need to add more logic to report VF link */
11380 if (chng_link_state) {
11381 if (link_info->phy_link_status == BNXT_LINK_LINK)
11382 link_info->link_state = BNXT_LINK_STATE_UP;
11383 else
11384 link_info->link_state = BNXT_LINK_STATE_DOWN;
11385 if (link_state != link_info->link_state)
11386 bnxt_report_link(bp);
11387 } else {
11388 /* always link down if not require to update link state */
11389 link_info->link_state = BNXT_LINK_STATE_DOWN;
11390 }
11391 hwrm_req_drop(bp, req);
11392
11393 if (!BNXT_PHY_CFG_ABLE(bp))
11394 return 0;
11395
11396 support_changed = bnxt_support_speed_dropped(link_info);
11397 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
11398 bnxt_hwrm_set_link_setting(bp, true, false);
11399 return 0;
11400 }
11401
bnxt_get_port_module_status(struct bnxt * bp)11402 static void bnxt_get_port_module_status(struct bnxt *bp)
11403 {
11404 struct bnxt_link_info *link_info = &bp->link_info;
11405 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
11406 u8 module_status;
11407
11408 if (bnxt_update_link(bp, true))
11409 return;
11410
11411 module_status = link_info->module_status;
11412 switch (module_status) {
11413 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
11414 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
11415 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
11416 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
11417 bp->pf.port_id);
11418 if (bp->hwrm_spec_code >= 0x10201) {
11419 netdev_warn(bp->dev, "Module part number %s\n",
11420 resp->phy_vendor_partnumber);
11421 }
11422 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
11423 netdev_warn(bp->dev, "TX is disabled\n");
11424 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
11425 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
11426 }
11427 }
11428
11429 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11430 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11431 {
11432 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
11433 if (bp->hwrm_spec_code >= 0x10201)
11434 req->auto_pause =
11435 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
11436 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11437 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
11438 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11439 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
11440 req->enables |=
11441 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11442 } else {
11443 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11444 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
11445 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11446 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
11447 req->enables |=
11448 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
11449 if (bp->hwrm_spec_code >= 0x10201) {
11450 req->auto_pause = req->force_pause;
11451 req->enables |= cpu_to_le32(
11452 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11453 }
11454 }
11455 }
11456
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11457 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11458 {
11459 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
11460 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
11461 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11462 req->enables |=
11463 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
11464 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
11465 } else if (bp->link_info.advertising) {
11466 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
11467 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
11468 }
11469 if (bp->link_info.advertising_pam4) {
11470 req->enables |=
11471 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
11472 req->auto_link_pam4_speed_mask =
11473 cpu_to_le16(bp->link_info.advertising_pam4);
11474 }
11475 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
11476 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
11477 } else {
11478 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
11479 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11480 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
11481 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
11482 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
11483 (u32)bp->link_info.req_link_speed);
11484 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
11485 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11486 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
11487 } else {
11488 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11489 }
11490 }
11491
11492 /* tell chimp that the setting takes effect immediately */
11493 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
11494 }
11495
bnxt_hwrm_set_pause(struct bnxt * bp)11496 int bnxt_hwrm_set_pause(struct bnxt *bp)
11497 {
11498 struct hwrm_port_phy_cfg_input *req;
11499 int rc;
11500
11501 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11502 if (rc)
11503 return rc;
11504
11505 bnxt_hwrm_set_pause_common(bp, req);
11506
11507 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
11508 bp->link_info.force_link_chng)
11509 bnxt_hwrm_set_link_common(bp, req);
11510
11511 rc = hwrm_req_send(bp, req);
11512 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
11513 /* since changing of pause setting doesn't trigger any link
11514 * change event, the driver needs to update the current pause
11515 * result upon successfully return of the phy_cfg command
11516 */
11517 bp->link_info.pause =
11518 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
11519 bp->link_info.auto_pause_setting = 0;
11520 if (!bp->link_info.force_link_chng)
11521 bnxt_report_link(bp);
11522 }
11523 bp->link_info.force_link_chng = false;
11524 return rc;
11525 }
11526
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11527 static void bnxt_hwrm_set_eee(struct bnxt *bp,
11528 struct hwrm_port_phy_cfg_input *req)
11529 {
11530 struct ethtool_keee *eee = &bp->eee;
11531
11532 if (eee->eee_enabled) {
11533 u16 eee_speeds;
11534 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
11535
11536 if (eee->tx_lpi_enabled)
11537 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
11538 else
11539 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
11540
11541 req->flags |= cpu_to_le32(flags);
11542 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
11543 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
11544 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
11545 } else {
11546 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
11547 }
11548 }
11549
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)11550 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
11551 {
11552 struct hwrm_port_phy_cfg_input *req;
11553 int rc;
11554
11555 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11556 if (rc)
11557 return rc;
11558
11559 if (set_pause)
11560 bnxt_hwrm_set_pause_common(bp, req);
11561
11562 bnxt_hwrm_set_link_common(bp, req);
11563
11564 if (set_eee)
11565 bnxt_hwrm_set_eee(bp, req);
11566 return hwrm_req_send(bp, req);
11567 }
11568
bnxt_hwrm_shutdown_link(struct bnxt * bp)11569 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
11570 {
11571 struct hwrm_port_phy_cfg_input *req;
11572 int rc;
11573
11574 if (!BNXT_SINGLE_PF(bp))
11575 return 0;
11576
11577 if (pci_num_vf(bp->pdev) &&
11578 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
11579 return 0;
11580
11581 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11582 if (rc)
11583 return rc;
11584
11585 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
11586 rc = hwrm_req_send(bp, req);
11587 if (!rc) {
11588 mutex_lock(&bp->link_lock);
11589 /* Device is not obliged link down in certain scenarios, even
11590 * when forced. Setting the state unknown is consistent with
11591 * driver startup and will force link state to be reported
11592 * during subsequent open based on PORT_PHY_QCFG.
11593 */
11594 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
11595 mutex_unlock(&bp->link_lock);
11596 }
11597 return rc;
11598 }
11599
bnxt_fw_reset_via_optee(struct bnxt * bp)11600 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11601 {
11602 #ifdef CONFIG_TEE_BNXT_FW
11603 int rc = tee_bnxt_fw_load();
11604
11605 if (rc)
11606 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11607
11608 return rc;
11609 #else
11610 netdev_err(bp->dev, "OP-TEE not supported\n");
11611 return -ENODEV;
11612 #endif
11613 }
11614
bnxt_try_recover_fw(struct bnxt * bp)11615 static int bnxt_try_recover_fw(struct bnxt *bp)
11616 {
11617 if (bp->fw_health && bp->fw_health->status_reliable) {
11618 int retry = 0, rc;
11619 u32 sts;
11620
11621 do {
11622 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11623 rc = bnxt_hwrm_poll(bp);
11624 if (!BNXT_FW_IS_BOOTING(sts) &&
11625 !BNXT_FW_IS_RECOVERING(sts))
11626 break;
11627 retry++;
11628 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
11629
11630 if (!BNXT_FW_IS_HEALTHY(sts)) {
11631 netdev_err(bp->dev,
11632 "Firmware not responding, status: 0x%x\n",
11633 sts);
11634 rc = -ENODEV;
11635 }
11636 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
11637 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11638 return bnxt_fw_reset_via_optee(bp);
11639 }
11640 return rc;
11641 }
11642
11643 return -ENODEV;
11644 }
11645
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)11646 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
11647 {
11648 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11649
11650 if (!BNXT_NEW_RM(bp))
11651 return; /* no resource reservations required */
11652
11653 hw_resc->resv_cp_rings = 0;
11654 hw_resc->resv_stat_ctxs = 0;
11655 hw_resc->resv_irqs = 0;
11656 hw_resc->resv_tx_rings = 0;
11657 hw_resc->resv_rx_rings = 0;
11658 hw_resc->resv_hw_ring_grps = 0;
11659 hw_resc->resv_vnics = 0;
11660 hw_resc->resv_rsscos_ctxs = 0;
11661 if (!fw_reset) {
11662 bp->tx_nr_rings = 0;
11663 bp->rx_nr_rings = 0;
11664 }
11665 }
11666
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)11667 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
11668 {
11669 int rc;
11670
11671 if (!BNXT_NEW_RM(bp))
11672 return 0; /* no resource reservations required */
11673
11674 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
11675 if (rc)
11676 netdev_err(bp->dev, "resc_qcaps failed\n");
11677
11678 bnxt_clear_reservations(bp, fw_reset);
11679
11680 return rc;
11681 }
11682
bnxt_hwrm_if_change(struct bnxt * bp,bool up)11683 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
11684 {
11685 struct hwrm_func_drv_if_change_output *resp;
11686 struct hwrm_func_drv_if_change_input *req;
11687 bool fw_reset = !bp->irq_tbl;
11688 bool resc_reinit = false;
11689 int rc, retry = 0;
11690 u32 flags = 0;
11691
11692 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
11693 return 0;
11694
11695 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
11696 if (rc)
11697 return rc;
11698
11699 if (up)
11700 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
11701 resp = hwrm_req_hold(bp, req);
11702
11703 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
11704 while (retry < BNXT_FW_IF_RETRY) {
11705 rc = hwrm_req_send(bp, req);
11706 if (rc != -EAGAIN)
11707 break;
11708
11709 msleep(50);
11710 retry++;
11711 }
11712
11713 if (rc == -EAGAIN) {
11714 hwrm_req_drop(bp, req);
11715 return rc;
11716 } else if (!rc) {
11717 flags = le32_to_cpu(resp->flags);
11718 } else if (up) {
11719 rc = bnxt_try_recover_fw(bp);
11720 fw_reset = true;
11721 }
11722 hwrm_req_drop(bp, req);
11723 if (rc)
11724 return rc;
11725
11726 if (!up) {
11727 bnxt_inv_fw_health_reg(bp);
11728 return 0;
11729 }
11730
11731 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
11732 resc_reinit = true;
11733 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
11734 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
11735 fw_reset = true;
11736 else
11737 bnxt_remap_fw_health_regs(bp);
11738
11739 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
11740 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
11741 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11742 return -ENODEV;
11743 }
11744 if (resc_reinit || fw_reset) {
11745 if (fw_reset) {
11746 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11747 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11748 bnxt_ulp_irq_stop(bp);
11749 bnxt_free_ctx_mem(bp);
11750 bnxt_dcb_free(bp);
11751 rc = bnxt_fw_init_one(bp);
11752 if (rc) {
11753 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11754 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11755 return rc;
11756 }
11757 bnxt_clear_int_mode(bp);
11758 rc = bnxt_init_int_mode(bp);
11759 if (rc) {
11760 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11761 netdev_err(bp->dev, "init int mode failed\n");
11762 return rc;
11763 }
11764 }
11765 rc = bnxt_cancel_reservations(bp, fw_reset);
11766 }
11767 return rc;
11768 }
11769
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)11770 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
11771 {
11772 struct hwrm_port_led_qcaps_output *resp;
11773 struct hwrm_port_led_qcaps_input *req;
11774 struct bnxt_pf_info *pf = &bp->pf;
11775 int rc;
11776
11777 bp->num_leds = 0;
11778 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
11779 return 0;
11780
11781 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
11782 if (rc)
11783 return rc;
11784
11785 req->port_id = cpu_to_le16(pf->port_id);
11786 resp = hwrm_req_hold(bp, req);
11787 rc = hwrm_req_send(bp, req);
11788 if (rc) {
11789 hwrm_req_drop(bp, req);
11790 return rc;
11791 }
11792 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
11793 int i;
11794
11795 bp->num_leds = resp->num_leds;
11796 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
11797 bp->num_leds);
11798 for (i = 0; i < bp->num_leds; i++) {
11799 struct bnxt_led_info *led = &bp->leds[i];
11800 __le16 caps = led->led_state_caps;
11801
11802 if (!led->led_group_id ||
11803 !BNXT_LED_ALT_BLINK_CAP(caps)) {
11804 bp->num_leds = 0;
11805 break;
11806 }
11807 }
11808 }
11809 hwrm_req_drop(bp, req);
11810 return 0;
11811 }
11812
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)11813 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
11814 {
11815 struct hwrm_wol_filter_alloc_output *resp;
11816 struct hwrm_wol_filter_alloc_input *req;
11817 int rc;
11818
11819 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
11820 if (rc)
11821 return rc;
11822
11823 req->port_id = cpu_to_le16(bp->pf.port_id);
11824 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
11825 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
11826 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
11827
11828 resp = hwrm_req_hold(bp, req);
11829 rc = hwrm_req_send(bp, req);
11830 if (!rc)
11831 bp->wol_filter_id = resp->wol_filter_id;
11832 hwrm_req_drop(bp, req);
11833 return rc;
11834 }
11835
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)11836 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
11837 {
11838 struct hwrm_wol_filter_free_input *req;
11839 int rc;
11840
11841 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
11842 if (rc)
11843 return rc;
11844
11845 req->port_id = cpu_to_le16(bp->pf.port_id);
11846 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
11847 req->wol_filter_id = bp->wol_filter_id;
11848
11849 return hwrm_req_send(bp, req);
11850 }
11851
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)11852 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
11853 {
11854 struct hwrm_wol_filter_qcfg_output *resp;
11855 struct hwrm_wol_filter_qcfg_input *req;
11856 u16 next_handle = 0;
11857 int rc;
11858
11859 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
11860 if (rc)
11861 return rc;
11862
11863 req->port_id = cpu_to_le16(bp->pf.port_id);
11864 req->handle = cpu_to_le16(handle);
11865 resp = hwrm_req_hold(bp, req);
11866 rc = hwrm_req_send(bp, req);
11867 if (!rc) {
11868 next_handle = le16_to_cpu(resp->next_handle);
11869 if (next_handle != 0) {
11870 if (resp->wol_type ==
11871 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
11872 bp->wol = 1;
11873 bp->wol_filter_id = resp->wol_filter_id;
11874 }
11875 }
11876 }
11877 hwrm_req_drop(bp, req);
11878 return next_handle;
11879 }
11880
bnxt_get_wol_settings(struct bnxt * bp)11881 static void bnxt_get_wol_settings(struct bnxt *bp)
11882 {
11883 u16 handle = 0;
11884
11885 bp->wol = 0;
11886 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
11887 return;
11888
11889 do {
11890 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
11891 } while (handle && handle != 0xffff);
11892 }
11893
bnxt_eee_config_ok(struct bnxt * bp)11894 static bool bnxt_eee_config_ok(struct bnxt *bp)
11895 {
11896 struct ethtool_keee *eee = &bp->eee;
11897 struct bnxt_link_info *link_info = &bp->link_info;
11898
11899 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
11900 return true;
11901
11902 if (eee->eee_enabled) {
11903 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
11904 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
11905
11906 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
11907
11908 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11909 eee->eee_enabled = 0;
11910 return false;
11911 }
11912 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
11913 linkmode_and(eee->advertised, advertising,
11914 eee->supported);
11915 return false;
11916 }
11917 }
11918 return true;
11919 }
11920
bnxt_update_phy_setting(struct bnxt * bp)11921 static int bnxt_update_phy_setting(struct bnxt *bp)
11922 {
11923 int rc;
11924 bool update_link = false;
11925 bool update_pause = false;
11926 bool update_eee = false;
11927 struct bnxt_link_info *link_info = &bp->link_info;
11928
11929 rc = bnxt_update_link(bp, true);
11930 if (rc) {
11931 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
11932 rc);
11933 return rc;
11934 }
11935 if (!BNXT_SINGLE_PF(bp))
11936 return 0;
11937
11938 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11939 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
11940 link_info->req_flow_ctrl)
11941 update_pause = true;
11942 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11943 link_info->force_pause_setting != link_info->req_flow_ctrl)
11944 update_pause = true;
11945 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11946 if (BNXT_AUTO_MODE(link_info->auto_mode))
11947 update_link = true;
11948 if (bnxt_force_speed_updated(link_info))
11949 update_link = true;
11950 if (link_info->req_duplex != link_info->duplex_setting)
11951 update_link = true;
11952 } else {
11953 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
11954 update_link = true;
11955 if (bnxt_auto_speed_updated(link_info))
11956 update_link = true;
11957 }
11958
11959 /* The last close may have shutdown the link, so need to call
11960 * PHY_CFG to bring it back up.
11961 */
11962 if (!BNXT_LINK_IS_UP(bp))
11963 update_link = true;
11964
11965 if (!bnxt_eee_config_ok(bp))
11966 update_eee = true;
11967
11968 if (update_link)
11969 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
11970 else if (update_pause)
11971 rc = bnxt_hwrm_set_pause(bp);
11972 if (rc) {
11973 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
11974 rc);
11975 return rc;
11976 }
11977
11978 return rc;
11979 }
11980
11981 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
11982
bnxt_reinit_after_abort(struct bnxt * bp)11983 static int bnxt_reinit_after_abort(struct bnxt *bp)
11984 {
11985 int rc;
11986
11987 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11988 return -EBUSY;
11989
11990 if (bp->dev->reg_state == NETREG_UNREGISTERED)
11991 return -ENODEV;
11992
11993 rc = bnxt_fw_init_one(bp);
11994 if (!rc) {
11995 bnxt_clear_int_mode(bp);
11996 rc = bnxt_init_int_mode(bp);
11997 if (!rc) {
11998 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11999 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12000 }
12001 }
12002 return rc;
12003 }
12004
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)12005 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12006 {
12007 struct bnxt_ntuple_filter *ntp_fltr;
12008 struct bnxt_l2_filter *l2_fltr;
12009
12010 if (list_empty(&fltr->list))
12011 return;
12012
12013 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12014 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12015 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12016 atomic_inc(&l2_fltr->refcnt);
12017 ntp_fltr->l2_fltr = l2_fltr;
12018 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12019 bnxt_del_ntp_filter(bp, ntp_fltr);
12020 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12021 fltr->sw_id);
12022 }
12023 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12024 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12025 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12026 bnxt_del_l2_filter(bp, l2_fltr);
12027 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12028 fltr->sw_id);
12029 }
12030 }
12031 }
12032
bnxt_cfg_usr_fltrs(struct bnxt * bp)12033 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12034 {
12035 struct bnxt_filter_base *usr_fltr, *tmp;
12036
12037 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12038 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12039 }
12040
bnxt_set_xps_mapping(struct bnxt * bp)12041 static int bnxt_set_xps_mapping(struct bnxt *bp)
12042 {
12043 int numa_node = dev_to_node(&bp->pdev->dev);
12044 unsigned int q_idx, map_idx, cpu, i;
12045 const struct cpumask *cpu_mask_ptr;
12046 int nr_cpus = num_online_cpus();
12047 cpumask_t *q_map;
12048 int rc = 0;
12049
12050 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
12051 if (!q_map)
12052 return -ENOMEM;
12053
12054 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12055 * Each TC has the same number of TX queues. The nth TX queue for each
12056 * TC will have the same CPU mask.
12057 */
12058 for (i = 0; i < nr_cpus; i++) {
12059 map_idx = i % bp->tx_nr_rings_per_tc;
12060 cpu = cpumask_local_spread(i, numa_node);
12061 cpu_mask_ptr = get_cpu_mask(cpu);
12062 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12063 }
12064
12065 /* Register CPU mask for each TX queue except the ones marked for XDP */
12066 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12067 map_idx = q_idx % bp->tx_nr_rings_per_tc;
12068 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12069 if (rc) {
12070 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12071 q_idx);
12072 break;
12073 }
12074 }
12075
12076 kfree(q_map);
12077
12078 return rc;
12079 }
12080
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12081 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12082 {
12083 int rc = 0;
12084
12085 netif_carrier_off(bp->dev);
12086 if (irq_re_init) {
12087 /* Reserve rings now if none were reserved at driver probe. */
12088 rc = bnxt_init_dflt_ring_mode(bp);
12089 if (rc) {
12090 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12091 return rc;
12092 }
12093 }
12094 rc = bnxt_reserve_rings(bp, irq_re_init);
12095 if (rc)
12096 return rc;
12097
12098 rc = bnxt_alloc_mem(bp, irq_re_init);
12099 if (rc) {
12100 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12101 goto open_err_free_mem;
12102 }
12103
12104 if (irq_re_init) {
12105 bnxt_init_napi(bp);
12106 rc = bnxt_request_irq(bp);
12107 if (rc) {
12108 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
12109 goto open_err_irq;
12110 }
12111 }
12112
12113 rc = bnxt_init_nic(bp, irq_re_init);
12114 if (rc) {
12115 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12116 goto open_err_irq;
12117 }
12118
12119 bnxt_enable_napi(bp);
12120 bnxt_debug_dev_init(bp);
12121
12122 if (link_re_init) {
12123 mutex_lock(&bp->link_lock);
12124 rc = bnxt_update_phy_setting(bp);
12125 mutex_unlock(&bp->link_lock);
12126 if (rc) {
12127 netdev_warn(bp->dev, "failed to update phy settings\n");
12128 if (BNXT_SINGLE_PF(bp)) {
12129 bp->link_info.phy_retry = true;
12130 bp->link_info.phy_retry_expires =
12131 jiffies + 5 * HZ;
12132 }
12133 }
12134 }
12135
12136 if (irq_re_init) {
12137 udp_tunnel_nic_reset_ntf(bp->dev);
12138 rc = bnxt_set_xps_mapping(bp);
12139 if (rc)
12140 netdev_warn(bp->dev, "failed to set xps mapping\n");
12141 }
12142
12143 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
12144 if (!static_key_enabled(&bnxt_xdp_locking_key))
12145 static_branch_enable(&bnxt_xdp_locking_key);
12146 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
12147 static_branch_disable(&bnxt_xdp_locking_key);
12148 }
12149 set_bit(BNXT_STATE_OPEN, &bp->state);
12150 bnxt_enable_int(bp);
12151 /* Enable TX queues */
12152 bnxt_tx_enable(bp);
12153 mod_timer(&bp->timer, jiffies + bp->current_interval);
12154 /* Poll link status and check for SFP+ module status */
12155 mutex_lock(&bp->link_lock);
12156 bnxt_get_port_module_status(bp);
12157 mutex_unlock(&bp->link_lock);
12158
12159 /* VF-reps may need to be re-opened after the PF is re-opened */
12160 if (BNXT_PF(bp))
12161 bnxt_vf_reps_open(bp);
12162 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
12163 WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
12164 bnxt_ptp_init_rtc(bp, true);
12165 bnxt_ptp_cfg_tstamp_filters(bp);
12166 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12167 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
12168 bnxt_cfg_usr_fltrs(bp);
12169 return 0;
12170
12171 open_err_irq:
12172 bnxt_del_napi(bp);
12173
12174 open_err_free_mem:
12175 bnxt_free_skbs(bp);
12176 bnxt_free_irq(bp);
12177 bnxt_free_mem(bp, true);
12178 return rc;
12179 }
12180
12181 /* rtnl_lock held */
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12182 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12183 {
12184 int rc = 0;
12185
12186 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
12187 rc = -EIO;
12188 if (!rc)
12189 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
12190 if (rc) {
12191 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
12192 dev_close(bp->dev);
12193 }
12194 return rc;
12195 }
12196
12197 /* rtnl_lock held, open the NIC half way by allocating all resources, but
12198 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
12199 * self tests.
12200 */
bnxt_half_open_nic(struct bnxt * bp)12201 int bnxt_half_open_nic(struct bnxt *bp)
12202 {
12203 int rc = 0;
12204
12205 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12206 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
12207 rc = -ENODEV;
12208 goto half_open_err;
12209 }
12210
12211 rc = bnxt_alloc_mem(bp, true);
12212 if (rc) {
12213 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12214 goto half_open_err;
12215 }
12216 bnxt_init_napi(bp);
12217 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12218 rc = bnxt_init_nic(bp, true);
12219 if (rc) {
12220 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12221 bnxt_del_napi(bp);
12222 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12223 goto half_open_err;
12224 }
12225 return 0;
12226
12227 half_open_err:
12228 bnxt_free_skbs(bp);
12229 bnxt_free_mem(bp, true);
12230 dev_close(bp->dev);
12231 return rc;
12232 }
12233
12234 /* rtnl_lock held, this call can only be made after a previous successful
12235 * call to bnxt_half_open_nic().
12236 */
bnxt_half_close_nic(struct bnxt * bp)12237 void bnxt_half_close_nic(struct bnxt *bp)
12238 {
12239 bnxt_hwrm_resource_free(bp, false, true);
12240 bnxt_del_napi(bp);
12241 bnxt_free_skbs(bp);
12242 bnxt_free_mem(bp, true);
12243 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12244 }
12245
bnxt_reenable_sriov(struct bnxt * bp)12246 void bnxt_reenable_sriov(struct bnxt *bp)
12247 {
12248 if (BNXT_PF(bp)) {
12249 struct bnxt_pf_info *pf = &bp->pf;
12250 int n = pf->active_vfs;
12251
12252 if (n)
12253 bnxt_cfg_hw_sriov(bp, &n, true);
12254 }
12255 }
12256
bnxt_open(struct net_device * dev)12257 static int bnxt_open(struct net_device *dev)
12258 {
12259 struct bnxt *bp = netdev_priv(dev);
12260 int rc;
12261
12262 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12263 rc = bnxt_reinit_after_abort(bp);
12264 if (rc) {
12265 if (rc == -EBUSY)
12266 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
12267 else
12268 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
12269 return -ENODEV;
12270 }
12271 }
12272
12273 rc = bnxt_hwrm_if_change(bp, true);
12274 if (rc)
12275 return rc;
12276
12277 rc = __bnxt_open_nic(bp, true, true);
12278 if (rc) {
12279 bnxt_hwrm_if_change(bp, false);
12280 } else {
12281 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12282 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12283 bnxt_queue_sp_work(bp,
12284 BNXT_RESTART_ULP_SP_EVENT);
12285 }
12286 }
12287
12288 return rc;
12289 }
12290
bnxt_drv_busy(struct bnxt * bp)12291 static bool bnxt_drv_busy(struct bnxt *bp)
12292 {
12293 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
12294 test_bit(BNXT_STATE_READ_STATS, &bp->state));
12295 }
12296
12297 static void bnxt_get_ring_stats(struct bnxt *bp,
12298 struct rtnl_link_stats64 *stats);
12299
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12300 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
12301 bool link_re_init)
12302 {
12303 /* Close the VF-reps before closing PF */
12304 if (BNXT_PF(bp))
12305 bnxt_vf_reps_close(bp);
12306
12307 /* Change device state to avoid TX queue wake up's */
12308 bnxt_tx_disable(bp);
12309
12310 clear_bit(BNXT_STATE_OPEN, &bp->state);
12311 smp_mb__after_atomic();
12312 while (bnxt_drv_busy(bp))
12313 msleep(20);
12314
12315 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12316 bnxt_clear_rss_ctxs(bp);
12317 /* Flush rings and disable interrupts */
12318 bnxt_shutdown_nic(bp, irq_re_init);
12319
12320 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
12321
12322 bnxt_debug_dev_exit(bp);
12323 bnxt_disable_napi(bp);
12324 del_timer_sync(&bp->timer);
12325 bnxt_free_skbs(bp);
12326
12327 /* Save ring stats before shutdown */
12328 if (bp->bnapi && irq_re_init) {
12329 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
12330 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
12331 }
12332 if (irq_re_init) {
12333 bnxt_free_irq(bp);
12334 bnxt_del_napi(bp);
12335 }
12336 bnxt_free_mem(bp, irq_re_init);
12337 }
12338
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12339 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12340 {
12341 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12342 /* If we get here, it means firmware reset is in progress
12343 * while we are trying to close. We can safely proceed with
12344 * the close because we are holding rtnl_lock(). Some firmware
12345 * messages may fail as we proceed to close. We set the
12346 * ABORT_ERR flag here so that the FW reset thread will later
12347 * abort when it gets the rtnl_lock() and sees the flag.
12348 */
12349 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
12350 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12351 }
12352
12353 #ifdef CONFIG_BNXT_SRIOV
12354 if (bp->sriov_cfg) {
12355 int rc;
12356
12357 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
12358 !bp->sriov_cfg,
12359 BNXT_SRIOV_CFG_WAIT_TMO);
12360 if (!rc)
12361 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
12362 else if (rc < 0)
12363 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
12364 }
12365 #endif
12366 __bnxt_close_nic(bp, irq_re_init, link_re_init);
12367 }
12368
bnxt_close(struct net_device * dev)12369 static int bnxt_close(struct net_device *dev)
12370 {
12371 struct bnxt *bp = netdev_priv(dev);
12372
12373 bnxt_close_nic(bp, true, true);
12374 bnxt_hwrm_shutdown_link(bp);
12375 bnxt_hwrm_if_change(bp, false);
12376 return 0;
12377 }
12378
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)12379 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
12380 u16 *val)
12381 {
12382 struct hwrm_port_phy_mdio_read_output *resp;
12383 struct hwrm_port_phy_mdio_read_input *req;
12384 int rc;
12385
12386 if (bp->hwrm_spec_code < 0x10a00)
12387 return -EOPNOTSUPP;
12388
12389 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
12390 if (rc)
12391 return rc;
12392
12393 req->port_id = cpu_to_le16(bp->pf.port_id);
12394 req->phy_addr = phy_addr;
12395 req->reg_addr = cpu_to_le16(reg & 0x1f);
12396 if (mdio_phy_id_is_c45(phy_addr)) {
12397 req->cl45_mdio = 1;
12398 req->phy_addr = mdio_phy_id_prtad(phy_addr);
12399 req->dev_addr = mdio_phy_id_devad(phy_addr);
12400 req->reg_addr = cpu_to_le16(reg);
12401 }
12402
12403 resp = hwrm_req_hold(bp, req);
12404 rc = hwrm_req_send(bp, req);
12405 if (!rc)
12406 *val = le16_to_cpu(resp->reg_data);
12407 hwrm_req_drop(bp, req);
12408 return rc;
12409 }
12410
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)12411 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
12412 u16 val)
12413 {
12414 struct hwrm_port_phy_mdio_write_input *req;
12415 int rc;
12416
12417 if (bp->hwrm_spec_code < 0x10a00)
12418 return -EOPNOTSUPP;
12419
12420 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
12421 if (rc)
12422 return rc;
12423
12424 req->port_id = cpu_to_le16(bp->pf.port_id);
12425 req->phy_addr = phy_addr;
12426 req->reg_addr = cpu_to_le16(reg & 0x1f);
12427 if (mdio_phy_id_is_c45(phy_addr)) {
12428 req->cl45_mdio = 1;
12429 req->phy_addr = mdio_phy_id_prtad(phy_addr);
12430 req->dev_addr = mdio_phy_id_devad(phy_addr);
12431 req->reg_addr = cpu_to_le16(reg);
12432 }
12433 req->reg_data = cpu_to_le16(val);
12434
12435 return hwrm_req_send(bp, req);
12436 }
12437
12438 /* rtnl_lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)12439 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12440 {
12441 struct mii_ioctl_data *mdio = if_mii(ifr);
12442 struct bnxt *bp = netdev_priv(dev);
12443 int rc;
12444
12445 switch (cmd) {
12446 case SIOCGMIIPHY:
12447 mdio->phy_id = bp->link_info.phy_addr;
12448
12449 fallthrough;
12450 case SIOCGMIIREG: {
12451 u16 mii_regval = 0;
12452
12453 if (!netif_running(dev))
12454 return -EAGAIN;
12455
12456 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
12457 &mii_regval);
12458 mdio->val_out = mii_regval;
12459 return rc;
12460 }
12461
12462 case SIOCSMIIREG:
12463 if (!netif_running(dev))
12464 return -EAGAIN;
12465
12466 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
12467 mdio->val_in);
12468
12469 case SIOCSHWTSTAMP:
12470 return bnxt_hwtstamp_set(dev, ifr);
12471
12472 case SIOCGHWTSTAMP:
12473 return bnxt_hwtstamp_get(dev, ifr);
12474
12475 default:
12476 /* do nothing */
12477 break;
12478 }
12479 return -EOPNOTSUPP;
12480 }
12481
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)12482 static void bnxt_get_ring_stats(struct bnxt *bp,
12483 struct rtnl_link_stats64 *stats)
12484 {
12485 int i;
12486
12487 for (i = 0; i < bp->cp_nr_rings; i++) {
12488 struct bnxt_napi *bnapi = bp->bnapi[i];
12489 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12490 u64 *sw = cpr->stats.sw_stats;
12491
12492 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
12493 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12494 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
12495
12496 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
12497 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
12498 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
12499
12500 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
12501 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
12502 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
12503
12504 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
12505 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
12506 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
12507
12508 stats->rx_missed_errors +=
12509 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
12510
12511 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12512
12513 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
12514
12515 stats->rx_dropped +=
12516 cpr->sw_stats->rx.rx_netpoll_discards +
12517 cpr->sw_stats->rx.rx_oom_discards;
12518 }
12519 }
12520
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)12521 static void bnxt_add_prev_stats(struct bnxt *bp,
12522 struct rtnl_link_stats64 *stats)
12523 {
12524 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
12525
12526 stats->rx_packets += prev_stats->rx_packets;
12527 stats->tx_packets += prev_stats->tx_packets;
12528 stats->rx_bytes += prev_stats->rx_bytes;
12529 stats->tx_bytes += prev_stats->tx_bytes;
12530 stats->rx_missed_errors += prev_stats->rx_missed_errors;
12531 stats->multicast += prev_stats->multicast;
12532 stats->rx_dropped += prev_stats->rx_dropped;
12533 stats->tx_dropped += prev_stats->tx_dropped;
12534 }
12535
12536 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)12537 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
12538 {
12539 struct bnxt *bp = netdev_priv(dev);
12540
12541 set_bit(BNXT_STATE_READ_STATS, &bp->state);
12542 /* Make sure bnxt_close_nic() sees that we are reading stats before
12543 * we check the BNXT_STATE_OPEN flag.
12544 */
12545 smp_mb__after_atomic();
12546 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12547 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12548 *stats = bp->net_stats_prev;
12549 return;
12550 }
12551
12552 bnxt_get_ring_stats(bp, stats);
12553 bnxt_add_prev_stats(bp, stats);
12554
12555 if (bp->flags & BNXT_FLAG_PORT_STATS) {
12556 u64 *rx = bp->port_stats.sw_stats;
12557 u64 *tx = bp->port_stats.sw_stats +
12558 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
12559
12560 stats->rx_crc_errors =
12561 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
12562 stats->rx_frame_errors =
12563 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
12564 stats->rx_length_errors =
12565 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
12566 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
12567 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
12568 stats->rx_errors =
12569 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
12570 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
12571 stats->collisions =
12572 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
12573 stats->tx_fifo_errors =
12574 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
12575 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
12576 }
12577 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12578 }
12579
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)12580 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
12581 struct bnxt_total_ring_err_stats *stats,
12582 struct bnxt_cp_ring_info *cpr)
12583 {
12584 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
12585 u64 *hw_stats = cpr->stats.sw_stats;
12586
12587 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
12588 stats->rx_total_resets += sw_stats->rx.rx_resets;
12589 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
12590 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
12591 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
12592 stats->rx_total_ring_discards +=
12593 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
12594 stats->tx_total_resets += sw_stats->tx.tx_resets;
12595 stats->tx_total_ring_discards +=
12596 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
12597 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
12598 }
12599
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)12600 void bnxt_get_ring_err_stats(struct bnxt *bp,
12601 struct bnxt_total_ring_err_stats *stats)
12602 {
12603 int i;
12604
12605 for (i = 0; i < bp->cp_nr_rings; i++)
12606 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
12607 }
12608
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)12609 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
12610 {
12611 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12612 struct net_device *dev = bp->dev;
12613 struct netdev_hw_addr *ha;
12614 u8 *haddr;
12615 int mc_count = 0;
12616 bool update = false;
12617 int off = 0;
12618
12619 netdev_for_each_mc_addr(ha, dev) {
12620 if (mc_count >= BNXT_MAX_MC_ADDRS) {
12621 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12622 vnic->mc_list_count = 0;
12623 return false;
12624 }
12625 haddr = ha->addr;
12626 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
12627 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
12628 update = true;
12629 }
12630 off += ETH_ALEN;
12631 mc_count++;
12632 }
12633 if (mc_count)
12634 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12635
12636 if (mc_count != vnic->mc_list_count) {
12637 vnic->mc_list_count = mc_count;
12638 update = true;
12639 }
12640 return update;
12641 }
12642
bnxt_uc_list_updated(struct bnxt * bp)12643 static bool bnxt_uc_list_updated(struct bnxt *bp)
12644 {
12645 struct net_device *dev = bp->dev;
12646 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12647 struct netdev_hw_addr *ha;
12648 int off = 0;
12649
12650 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
12651 return true;
12652
12653 netdev_for_each_uc_addr(ha, dev) {
12654 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
12655 return true;
12656
12657 off += ETH_ALEN;
12658 }
12659 return false;
12660 }
12661
bnxt_set_rx_mode(struct net_device * dev)12662 static void bnxt_set_rx_mode(struct net_device *dev)
12663 {
12664 struct bnxt *bp = netdev_priv(dev);
12665 struct bnxt_vnic_info *vnic;
12666 bool mc_update = false;
12667 bool uc_update;
12668 u32 mask;
12669
12670 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
12671 return;
12672
12673 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12674 mask = vnic->rx_mask;
12675 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
12676 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
12677 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
12678 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
12679
12680 if (dev->flags & IFF_PROMISC)
12681 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12682
12683 uc_update = bnxt_uc_list_updated(bp);
12684
12685 if (dev->flags & IFF_BROADCAST)
12686 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
12687 if (dev->flags & IFF_ALLMULTI) {
12688 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12689 vnic->mc_list_count = 0;
12690 } else if (dev->flags & IFF_MULTICAST) {
12691 mc_update = bnxt_mc_list_updated(bp, &mask);
12692 }
12693
12694 if (mask != vnic->rx_mask || uc_update || mc_update) {
12695 vnic->rx_mask = mask;
12696
12697 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12698 }
12699 }
12700
bnxt_cfg_rx_mode(struct bnxt * bp)12701 static int bnxt_cfg_rx_mode(struct bnxt *bp)
12702 {
12703 struct net_device *dev = bp->dev;
12704 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12705 struct netdev_hw_addr *ha;
12706 int i, off = 0, rc;
12707 bool uc_update;
12708
12709 netif_addr_lock_bh(dev);
12710 uc_update = bnxt_uc_list_updated(bp);
12711 netif_addr_unlock_bh(dev);
12712
12713 if (!uc_update)
12714 goto skip_uc;
12715
12716 for (i = 1; i < vnic->uc_filter_count; i++) {
12717 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
12718
12719 bnxt_hwrm_l2_filter_free(bp, fltr);
12720 bnxt_del_l2_filter(bp, fltr);
12721 }
12722
12723 vnic->uc_filter_count = 1;
12724
12725 netif_addr_lock_bh(dev);
12726 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
12727 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12728 } else {
12729 netdev_for_each_uc_addr(ha, dev) {
12730 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
12731 off += ETH_ALEN;
12732 vnic->uc_filter_count++;
12733 }
12734 }
12735 netif_addr_unlock_bh(dev);
12736
12737 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
12738 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
12739 if (rc) {
12740 if (BNXT_VF(bp) && rc == -ENODEV) {
12741 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12742 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
12743 else
12744 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
12745 rc = 0;
12746 } else {
12747 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
12748 }
12749 vnic->uc_filter_count = i;
12750 return rc;
12751 }
12752 }
12753 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12754 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
12755
12756 skip_uc:
12757 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
12758 !bnxt_promisc_ok(bp))
12759 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12760 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12761 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
12762 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
12763 rc);
12764 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12765 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12766 vnic->mc_list_count = 0;
12767 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12768 }
12769 if (rc)
12770 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
12771 rc);
12772
12773 return rc;
12774 }
12775
bnxt_can_reserve_rings(struct bnxt * bp)12776 static bool bnxt_can_reserve_rings(struct bnxt *bp)
12777 {
12778 #ifdef CONFIG_BNXT_SRIOV
12779 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
12780 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12781
12782 /* No minimum rings were provisioned by the PF. Don't
12783 * reserve rings by default when device is down.
12784 */
12785 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
12786 return true;
12787
12788 if (!netif_running(bp->dev))
12789 return false;
12790 }
12791 #endif
12792 return true;
12793 }
12794
12795 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)12796 static bool bnxt_rfs_supported(struct bnxt *bp)
12797 {
12798 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
12799 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
12800 return true;
12801 return false;
12802 }
12803 /* 212 firmware is broken for aRFS */
12804 if (BNXT_FW_MAJ(bp) == 212)
12805 return false;
12806 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
12807 return true;
12808 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
12809 return true;
12810 return false;
12811 }
12812
12813 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)12814 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
12815 {
12816 struct bnxt_hw_rings hwr = {0};
12817 int max_vnics, max_rss_ctxs;
12818
12819 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
12820 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
12821 return bnxt_rfs_supported(bp);
12822
12823 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
12824 return false;
12825
12826 hwr.grp = bp->rx_nr_rings;
12827 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
12828 if (new_rss_ctx)
12829 hwr.vnic++;
12830 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
12831 max_vnics = bnxt_get_max_func_vnics(bp);
12832 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
12833
12834 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
12835 if (bp->rx_nr_rings > 1)
12836 netdev_warn(bp->dev,
12837 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
12838 min(max_rss_ctxs - 1, max_vnics - 1));
12839 return false;
12840 }
12841
12842 if (!BNXT_NEW_RM(bp))
12843 return true;
12844
12845 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
12846 * issue that will mess up the default VNIC if we reduce the
12847 * reservations.
12848 */
12849 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
12850 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12851 return true;
12852
12853 bnxt_hwrm_reserve_rings(bp, &hwr);
12854 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
12855 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12856 return true;
12857
12858 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
12859 hwr.vnic = 1;
12860 hwr.rss_ctx = 0;
12861 bnxt_hwrm_reserve_rings(bp, &hwr);
12862 return false;
12863 }
12864
bnxt_fix_features(struct net_device * dev,netdev_features_t features)12865 static netdev_features_t bnxt_fix_features(struct net_device *dev,
12866 netdev_features_t features)
12867 {
12868 struct bnxt *bp = netdev_priv(dev);
12869 netdev_features_t vlan_features;
12870
12871 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
12872 features &= ~NETIF_F_NTUPLE;
12873
12874 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
12875 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12876
12877 if (!(features & NETIF_F_GRO))
12878 features &= ~NETIF_F_GRO_HW;
12879
12880 if (features & NETIF_F_GRO_HW)
12881 features &= ~NETIF_F_LRO;
12882
12883 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
12884 * turned on or off together.
12885 */
12886 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
12887 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
12888 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12889 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12890 else if (vlan_features)
12891 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12892 }
12893 #ifdef CONFIG_BNXT_SRIOV
12894 if (BNXT_VF(bp) && bp->vf.vlan)
12895 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12896 #endif
12897 return features;
12898 }
12899
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)12900 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
12901 bool link_re_init, u32 flags, bool update_tpa)
12902 {
12903 bnxt_close_nic(bp, irq_re_init, link_re_init);
12904 bp->flags = flags;
12905 if (update_tpa)
12906 bnxt_set_ring_params(bp);
12907 return bnxt_open_nic(bp, irq_re_init, link_re_init);
12908 }
12909
bnxt_set_features(struct net_device * dev,netdev_features_t features)12910 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
12911 {
12912 bool update_tpa = false, update_ntuple = false;
12913 struct bnxt *bp = netdev_priv(dev);
12914 u32 flags = bp->flags;
12915 u32 changes;
12916 int rc = 0;
12917 bool re_init = false;
12918
12919 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
12920 if (features & NETIF_F_GRO_HW)
12921 flags |= BNXT_FLAG_GRO;
12922 else if (features & NETIF_F_LRO)
12923 flags |= BNXT_FLAG_LRO;
12924
12925 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
12926 flags &= ~BNXT_FLAG_TPA;
12927
12928 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12929 flags |= BNXT_FLAG_STRIP_VLAN;
12930
12931 if (features & NETIF_F_NTUPLE)
12932 flags |= BNXT_FLAG_RFS;
12933 else
12934 bnxt_clear_usr_fltrs(bp, true);
12935
12936 changes = flags ^ bp->flags;
12937 if (changes & BNXT_FLAG_TPA) {
12938 update_tpa = true;
12939 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
12940 (flags & BNXT_FLAG_TPA) == 0 ||
12941 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
12942 re_init = true;
12943 }
12944
12945 if (changes & ~BNXT_FLAG_TPA)
12946 re_init = true;
12947
12948 if (changes & BNXT_FLAG_RFS)
12949 update_ntuple = true;
12950
12951 if (flags != bp->flags) {
12952 u32 old_flags = bp->flags;
12953
12954 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12955 bp->flags = flags;
12956 if (update_tpa)
12957 bnxt_set_ring_params(bp);
12958 return rc;
12959 }
12960
12961 if (update_ntuple)
12962 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
12963
12964 if (re_init)
12965 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
12966
12967 if (update_tpa) {
12968 bp->flags = flags;
12969 rc = bnxt_set_tpa(bp,
12970 (flags & BNXT_FLAG_TPA) ?
12971 true : false);
12972 if (rc)
12973 bp->flags = old_flags;
12974 }
12975 }
12976 return rc;
12977 }
12978
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)12979 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
12980 u8 **nextp)
12981 {
12982 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
12983 struct hop_jumbo_hdr *jhdr;
12984 int hdr_count = 0;
12985 u8 *nexthdr;
12986 int start;
12987
12988 /* Check that there are at most 2 IPv6 extension headers, no
12989 * fragment header, and each is <= 64 bytes.
12990 */
12991 start = nw_off + sizeof(*ip6h);
12992 nexthdr = &ip6h->nexthdr;
12993 while (ipv6_ext_hdr(*nexthdr)) {
12994 struct ipv6_opt_hdr *hp;
12995 int hdrlen;
12996
12997 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
12998 *nexthdr == NEXTHDR_FRAGMENT)
12999 return false;
13000 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13001 skb_headlen(skb), NULL);
13002 if (!hp)
13003 return false;
13004 if (*nexthdr == NEXTHDR_AUTH)
13005 hdrlen = ipv6_authlen(hp);
13006 else
13007 hdrlen = ipv6_optlen(hp);
13008
13009 if (hdrlen > 64)
13010 return false;
13011
13012 /* The ext header may be a hop-by-hop header inserted for
13013 * big TCP purposes. This will be removed before sending
13014 * from NIC, so do not count it.
13015 */
13016 if (*nexthdr == NEXTHDR_HOP) {
13017 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
13018 goto increment_hdr;
13019
13020 jhdr = (struct hop_jumbo_hdr *)hp;
13021 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
13022 jhdr->nexthdr != IPPROTO_TCP)
13023 goto increment_hdr;
13024
13025 goto next_hdr;
13026 }
13027 increment_hdr:
13028 hdr_count++;
13029 next_hdr:
13030 nexthdr = &hp->nexthdr;
13031 start += hdrlen;
13032 }
13033 if (nextp) {
13034 /* Caller will check inner protocol */
13035 if (skb->encapsulation) {
13036 *nextp = nexthdr;
13037 return true;
13038 }
13039 *nextp = NULL;
13040 }
13041 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13042 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13043 }
13044
13045 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)13046 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13047 {
13048 struct udphdr *uh = udp_hdr(skb);
13049 __be16 udp_port = uh->dest;
13050
13051 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13052 udp_port != bp->vxlan_gpe_port)
13053 return false;
13054 if (skb->inner_protocol == htons(ETH_P_TEB)) {
13055 struct ethhdr *eh = inner_eth_hdr(skb);
13056
13057 switch (eh->h_proto) {
13058 case htons(ETH_P_IP):
13059 return true;
13060 case htons(ETH_P_IPV6):
13061 return bnxt_exthdr_check(bp, skb,
13062 skb_inner_network_offset(skb),
13063 NULL);
13064 }
13065 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13066 return true;
13067 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13068 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13069 NULL);
13070 }
13071 return false;
13072 }
13073
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)13074 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13075 {
13076 switch (l4_proto) {
13077 case IPPROTO_UDP:
13078 return bnxt_udp_tunl_check(bp, skb);
13079 case IPPROTO_IPIP:
13080 return true;
13081 case IPPROTO_GRE: {
13082 switch (skb->inner_protocol) {
13083 default:
13084 return false;
13085 case htons(ETH_P_IP):
13086 return true;
13087 case htons(ETH_P_IPV6):
13088 fallthrough;
13089 }
13090 }
13091 case IPPROTO_IPV6:
13092 /* Check ext headers of inner ipv6 */
13093 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13094 NULL);
13095 }
13096 return false;
13097 }
13098
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)13099 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13100 struct net_device *dev,
13101 netdev_features_t features)
13102 {
13103 struct bnxt *bp = netdev_priv(dev);
13104 u8 *l4_proto;
13105
13106 features = vlan_features_check(skb, features);
13107 switch (vlan_get_protocol(skb)) {
13108 case htons(ETH_P_IP):
13109 if (!skb->encapsulation)
13110 return features;
13111 l4_proto = &ip_hdr(skb)->protocol;
13112 if (bnxt_tunl_check(bp, skb, *l4_proto))
13113 return features;
13114 break;
13115 case htons(ETH_P_IPV6):
13116 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
13117 &l4_proto))
13118 break;
13119 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
13120 return features;
13121 break;
13122 }
13123 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13124 }
13125
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)13126 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
13127 u32 *reg_buf)
13128 {
13129 struct hwrm_dbg_read_direct_output *resp;
13130 struct hwrm_dbg_read_direct_input *req;
13131 __le32 *dbg_reg_buf;
13132 dma_addr_t mapping;
13133 int rc, i;
13134
13135 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
13136 if (rc)
13137 return rc;
13138
13139 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
13140 &mapping);
13141 if (!dbg_reg_buf) {
13142 rc = -ENOMEM;
13143 goto dbg_rd_reg_exit;
13144 }
13145
13146 req->host_dest_addr = cpu_to_le64(mapping);
13147
13148 resp = hwrm_req_hold(bp, req);
13149 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
13150 req->read_len32 = cpu_to_le32(num_words);
13151
13152 rc = hwrm_req_send(bp, req);
13153 if (rc || resp->error_code) {
13154 rc = -EIO;
13155 goto dbg_rd_reg_exit;
13156 }
13157 for (i = 0; i < num_words; i++)
13158 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
13159
13160 dbg_rd_reg_exit:
13161 hwrm_req_drop(bp, req);
13162 return rc;
13163 }
13164
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)13165 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
13166 u32 ring_id, u32 *prod, u32 *cons)
13167 {
13168 struct hwrm_dbg_ring_info_get_output *resp;
13169 struct hwrm_dbg_ring_info_get_input *req;
13170 int rc;
13171
13172 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
13173 if (rc)
13174 return rc;
13175
13176 req->ring_type = ring_type;
13177 req->fw_ring_id = cpu_to_le32(ring_id);
13178 resp = hwrm_req_hold(bp, req);
13179 rc = hwrm_req_send(bp, req);
13180 if (!rc) {
13181 *prod = le32_to_cpu(resp->producer_index);
13182 *cons = le32_to_cpu(resp->consumer_index);
13183 }
13184 hwrm_req_drop(bp, req);
13185 return rc;
13186 }
13187
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)13188 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
13189 {
13190 struct bnxt_tx_ring_info *txr;
13191 int i = bnapi->index, j;
13192
13193 bnxt_for_each_napi_tx(j, bnapi, txr)
13194 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
13195 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
13196 txr->tx_cons);
13197 }
13198
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)13199 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
13200 {
13201 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
13202 int i = bnapi->index;
13203
13204 if (!rxr)
13205 return;
13206
13207 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
13208 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
13209 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
13210 rxr->rx_sw_agg_prod);
13211 }
13212
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)13213 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
13214 {
13215 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13216 int i = bnapi->index;
13217
13218 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
13219 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
13220 }
13221
bnxt_dbg_dump_states(struct bnxt * bp)13222 static void bnxt_dbg_dump_states(struct bnxt *bp)
13223 {
13224 int i;
13225 struct bnxt_napi *bnapi;
13226
13227 for (i = 0; i < bp->cp_nr_rings; i++) {
13228 bnapi = bp->bnapi[i];
13229 if (netif_msg_drv(bp)) {
13230 bnxt_dump_tx_sw_state(bnapi);
13231 bnxt_dump_rx_sw_state(bnapi);
13232 bnxt_dump_cp_sw_state(bnapi);
13233 }
13234 }
13235 }
13236
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)13237 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
13238 {
13239 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
13240 struct hwrm_ring_reset_input *req;
13241 struct bnxt_napi *bnapi = rxr->bnapi;
13242 struct bnxt_cp_ring_info *cpr;
13243 u16 cp_ring_id;
13244 int rc;
13245
13246 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
13247 if (rc)
13248 return rc;
13249
13250 cpr = &bnapi->cp_ring;
13251 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
13252 req->cmpl_ring = cpu_to_le16(cp_ring_id);
13253 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
13254 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
13255 return hwrm_req_send_silent(bp, req);
13256 }
13257
bnxt_reset_task(struct bnxt * bp,bool silent)13258 static void bnxt_reset_task(struct bnxt *bp, bool silent)
13259 {
13260 if (!silent)
13261 bnxt_dbg_dump_states(bp);
13262 if (netif_running(bp->dev)) {
13263 bnxt_close_nic(bp, !silent, false);
13264 bnxt_open_nic(bp, !silent, false);
13265 }
13266 }
13267
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)13268 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
13269 {
13270 struct bnxt *bp = netdev_priv(dev);
13271
13272 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
13273 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
13274 }
13275
bnxt_fw_health_check(struct bnxt * bp)13276 static void bnxt_fw_health_check(struct bnxt *bp)
13277 {
13278 struct bnxt_fw_health *fw_health = bp->fw_health;
13279 struct pci_dev *pdev = bp->pdev;
13280 u32 val;
13281
13282 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13283 return;
13284
13285 /* Make sure it is enabled before checking the tmr_counter. */
13286 smp_rmb();
13287 if (fw_health->tmr_counter) {
13288 fw_health->tmr_counter--;
13289 return;
13290 }
13291
13292 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13293 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
13294 fw_health->arrests++;
13295 goto fw_reset;
13296 }
13297
13298 fw_health->last_fw_heartbeat = val;
13299
13300 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13301 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
13302 fw_health->discoveries++;
13303 goto fw_reset;
13304 }
13305
13306 fw_health->tmr_counter = fw_health->tmr_multiplier;
13307 return;
13308
13309 fw_reset:
13310 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
13311 }
13312
bnxt_timer(struct timer_list * t)13313 static void bnxt_timer(struct timer_list *t)
13314 {
13315 struct bnxt *bp = from_timer(bp, t, timer);
13316 struct net_device *dev = bp->dev;
13317
13318 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
13319 return;
13320
13321 if (atomic_read(&bp->intr_sem) != 0)
13322 goto bnxt_restart_timer;
13323
13324 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
13325 bnxt_fw_health_check(bp);
13326
13327 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
13328 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
13329
13330 if (bnxt_tc_flower_enabled(bp))
13331 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
13332
13333 #ifdef CONFIG_RFS_ACCEL
13334 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
13335 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
13336 #endif /*CONFIG_RFS_ACCEL*/
13337
13338 if (bp->link_info.phy_retry) {
13339 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
13340 bp->link_info.phy_retry = false;
13341 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
13342 } else {
13343 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
13344 }
13345 }
13346
13347 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13348 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13349
13350 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
13351 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
13352
13353 bnxt_restart_timer:
13354 mod_timer(&bp->timer, jiffies + bp->current_interval);
13355 }
13356
bnxt_rtnl_lock_sp(struct bnxt * bp)13357 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
13358 {
13359 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
13360 * set. If the device is being closed, bnxt_close() may be holding
13361 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
13362 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
13363 */
13364 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13365 rtnl_lock();
13366 }
13367
bnxt_rtnl_unlock_sp(struct bnxt * bp)13368 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
13369 {
13370 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13371 rtnl_unlock();
13372 }
13373
13374 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)13375 static void bnxt_reset(struct bnxt *bp, bool silent)
13376 {
13377 bnxt_rtnl_lock_sp(bp);
13378 if (test_bit(BNXT_STATE_OPEN, &bp->state))
13379 bnxt_reset_task(bp, silent);
13380 bnxt_rtnl_unlock_sp(bp);
13381 }
13382
13383 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)13384 static void bnxt_rx_ring_reset(struct bnxt *bp)
13385 {
13386 int i;
13387
13388 bnxt_rtnl_lock_sp(bp);
13389 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13390 bnxt_rtnl_unlock_sp(bp);
13391 return;
13392 }
13393 /* Disable and flush TPA before resetting the RX ring */
13394 if (bp->flags & BNXT_FLAG_TPA)
13395 bnxt_set_tpa(bp, false);
13396 for (i = 0; i < bp->rx_nr_rings; i++) {
13397 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
13398 struct bnxt_cp_ring_info *cpr;
13399 int rc;
13400
13401 if (!rxr->bnapi->in_reset)
13402 continue;
13403
13404 rc = bnxt_hwrm_rx_ring_reset(bp, i);
13405 if (rc) {
13406 if (rc == -EINVAL || rc == -EOPNOTSUPP)
13407 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
13408 else
13409 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13410 rc);
13411 bnxt_reset_task(bp, true);
13412 break;
13413 }
13414 bnxt_free_one_rx_ring_skbs(bp, i);
13415 rxr->rx_prod = 0;
13416 rxr->rx_agg_prod = 0;
13417 rxr->rx_sw_agg_prod = 0;
13418 rxr->rx_next_cons = 0;
13419 rxr->bnapi->in_reset = false;
13420 bnxt_alloc_one_rx_ring(bp, i);
13421 cpr = &rxr->bnapi->cp_ring;
13422 cpr->sw_stats->rx.rx_resets++;
13423 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13424 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
13425 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
13426 }
13427 if (bp->flags & BNXT_FLAG_TPA)
13428 bnxt_set_tpa(bp, true);
13429 bnxt_rtnl_unlock_sp(bp);
13430 }
13431
bnxt_fw_fatal_close(struct bnxt * bp)13432 static void bnxt_fw_fatal_close(struct bnxt *bp)
13433 {
13434 bnxt_tx_disable(bp);
13435 bnxt_disable_napi(bp);
13436 bnxt_disable_int_sync(bp);
13437 bnxt_free_irq(bp);
13438 bnxt_clear_int_mode(bp);
13439 pci_disable_device(bp->pdev);
13440 }
13441
bnxt_fw_reset_close(struct bnxt * bp)13442 static void bnxt_fw_reset_close(struct bnxt *bp)
13443 {
13444 /* When firmware is in fatal state, quiesce device and disable
13445 * bus master to prevent any potential bad DMAs before freeing
13446 * kernel memory.
13447 */
13448 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
13449 u16 val = 0;
13450
13451 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13452 if (val == 0xffff)
13453 bp->fw_reset_min_dsecs = 0;
13454 bnxt_fw_fatal_close(bp);
13455 }
13456 __bnxt_close_nic(bp, true, false);
13457 bnxt_vf_reps_free(bp);
13458 bnxt_clear_int_mode(bp);
13459 bnxt_hwrm_func_drv_unrgtr(bp);
13460 if (pci_is_enabled(bp->pdev))
13461 pci_disable_device(bp->pdev);
13462 bnxt_free_ctx_mem(bp);
13463 }
13464
is_bnxt_fw_ok(struct bnxt * bp)13465 static bool is_bnxt_fw_ok(struct bnxt *bp)
13466 {
13467 struct bnxt_fw_health *fw_health = bp->fw_health;
13468 bool no_heartbeat = false, has_reset = false;
13469 u32 val;
13470
13471 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13472 if (val == fw_health->last_fw_heartbeat)
13473 no_heartbeat = true;
13474
13475 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13476 if (val != fw_health->last_fw_reset_cnt)
13477 has_reset = true;
13478
13479 if (!no_heartbeat && has_reset)
13480 return true;
13481
13482 return false;
13483 }
13484
13485 /* rtnl_lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)13486 static void bnxt_force_fw_reset(struct bnxt *bp)
13487 {
13488 struct bnxt_fw_health *fw_health = bp->fw_health;
13489 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13490 u32 wait_dsecs;
13491
13492 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
13493 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13494 return;
13495
13496 if (ptp) {
13497 spin_lock_bh(&ptp->ptp_lock);
13498 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13499 spin_unlock_bh(&ptp->ptp_lock);
13500 } else {
13501 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13502 }
13503 bnxt_fw_reset_close(bp);
13504 wait_dsecs = fw_health->master_func_wait_dsecs;
13505 if (fw_health->primary) {
13506 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
13507 wait_dsecs = 0;
13508 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13509 } else {
13510 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
13511 wait_dsecs = fw_health->normal_func_wait_dsecs;
13512 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13513 }
13514
13515 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
13516 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
13517 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13518 }
13519
bnxt_fw_exception(struct bnxt * bp)13520 void bnxt_fw_exception(struct bnxt *bp)
13521 {
13522 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
13523 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13524 bnxt_ulp_stop(bp);
13525 bnxt_rtnl_lock_sp(bp);
13526 bnxt_force_fw_reset(bp);
13527 bnxt_rtnl_unlock_sp(bp);
13528 }
13529
13530 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
13531 * < 0 on error.
13532 */
bnxt_get_registered_vfs(struct bnxt * bp)13533 static int bnxt_get_registered_vfs(struct bnxt *bp)
13534 {
13535 #ifdef CONFIG_BNXT_SRIOV
13536 int rc;
13537
13538 if (!BNXT_PF(bp))
13539 return 0;
13540
13541 rc = bnxt_hwrm_func_qcfg(bp);
13542 if (rc) {
13543 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
13544 return rc;
13545 }
13546 if (bp->pf.registered_vfs)
13547 return bp->pf.registered_vfs;
13548 if (bp->sriov_cfg)
13549 return 1;
13550 #endif
13551 return 0;
13552 }
13553
bnxt_fw_reset(struct bnxt * bp)13554 void bnxt_fw_reset(struct bnxt *bp)
13555 {
13556 bnxt_ulp_stop(bp);
13557 bnxt_rtnl_lock_sp(bp);
13558 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
13559 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13560 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13561 int n = 0, tmo;
13562
13563 if (ptp) {
13564 spin_lock_bh(&ptp->ptp_lock);
13565 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13566 spin_unlock_bh(&ptp->ptp_lock);
13567 } else {
13568 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13569 }
13570 if (bp->pf.active_vfs &&
13571 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
13572 n = bnxt_get_registered_vfs(bp);
13573 if (n < 0) {
13574 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
13575 n);
13576 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13577 dev_close(bp->dev);
13578 goto fw_reset_exit;
13579 } else if (n > 0) {
13580 u16 vf_tmo_dsecs = n * 10;
13581
13582 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
13583 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
13584 bp->fw_reset_state =
13585 BNXT_FW_RESET_STATE_POLL_VF;
13586 bnxt_queue_fw_reset_work(bp, HZ / 10);
13587 goto fw_reset_exit;
13588 }
13589 bnxt_fw_reset_close(bp);
13590 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13591 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13592 tmo = HZ / 10;
13593 } else {
13594 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13595 tmo = bp->fw_reset_min_dsecs * HZ / 10;
13596 }
13597 bnxt_queue_fw_reset_work(bp, tmo);
13598 }
13599 fw_reset_exit:
13600 bnxt_rtnl_unlock_sp(bp);
13601 }
13602
bnxt_chk_missed_irq(struct bnxt * bp)13603 static void bnxt_chk_missed_irq(struct bnxt *bp)
13604 {
13605 int i;
13606
13607 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13608 return;
13609
13610 for (i = 0; i < bp->cp_nr_rings; i++) {
13611 struct bnxt_napi *bnapi = bp->bnapi[i];
13612 struct bnxt_cp_ring_info *cpr;
13613 u32 fw_ring_id;
13614 int j;
13615
13616 if (!bnapi)
13617 continue;
13618
13619 cpr = &bnapi->cp_ring;
13620 for (j = 0; j < cpr->cp_ring_count; j++) {
13621 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
13622 u32 val[2];
13623
13624 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
13625 continue;
13626
13627 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
13628 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
13629 continue;
13630 }
13631 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
13632 bnxt_dbg_hwrm_ring_info_get(bp,
13633 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
13634 fw_ring_id, &val[0], &val[1]);
13635 cpr->sw_stats->cmn.missed_irqs++;
13636 }
13637 }
13638 }
13639
13640 static void bnxt_cfg_ntp_filters(struct bnxt *);
13641
bnxt_init_ethtool_link_settings(struct bnxt * bp)13642 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
13643 {
13644 struct bnxt_link_info *link_info = &bp->link_info;
13645
13646 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
13647 link_info->autoneg = BNXT_AUTONEG_SPEED;
13648 if (bp->hwrm_spec_code >= 0x10201) {
13649 if (link_info->auto_pause_setting &
13650 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
13651 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
13652 } else {
13653 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
13654 }
13655 bnxt_set_auto_speed(link_info);
13656 } else {
13657 bnxt_set_force_speed(link_info);
13658 link_info->req_duplex = link_info->duplex_setting;
13659 }
13660 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
13661 link_info->req_flow_ctrl =
13662 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
13663 else
13664 link_info->req_flow_ctrl = link_info->force_pause_setting;
13665 }
13666
bnxt_fw_echo_reply(struct bnxt * bp)13667 static void bnxt_fw_echo_reply(struct bnxt *bp)
13668 {
13669 struct bnxt_fw_health *fw_health = bp->fw_health;
13670 struct hwrm_func_echo_response_input *req;
13671 int rc;
13672
13673 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
13674 if (rc)
13675 return;
13676 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
13677 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
13678 hwrm_req_send(bp, req);
13679 }
13680
bnxt_ulp_restart(struct bnxt * bp)13681 static void bnxt_ulp_restart(struct bnxt *bp)
13682 {
13683 bnxt_ulp_stop(bp);
13684 bnxt_ulp_start(bp, 0);
13685 }
13686
bnxt_sp_task(struct work_struct * work)13687 static void bnxt_sp_task(struct work_struct *work)
13688 {
13689 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
13690
13691 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13692 smp_mb__after_atomic();
13693 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13694 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13695 return;
13696 }
13697
13698 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
13699 bnxt_ulp_restart(bp);
13700 bnxt_reenable_sriov(bp);
13701 }
13702
13703 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
13704 bnxt_cfg_rx_mode(bp);
13705
13706 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
13707 bnxt_cfg_ntp_filters(bp);
13708 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
13709 bnxt_hwrm_exec_fwd_req(bp);
13710 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
13711 netdev_info(bp->dev, "Receive PF driver unload event!\n");
13712 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
13713 bnxt_hwrm_port_qstats(bp, 0);
13714 bnxt_hwrm_port_qstats_ext(bp, 0);
13715 bnxt_accumulate_all_stats(bp);
13716 }
13717
13718 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
13719 int rc;
13720
13721 mutex_lock(&bp->link_lock);
13722 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
13723 &bp->sp_event))
13724 bnxt_hwrm_phy_qcaps(bp);
13725
13726 rc = bnxt_update_link(bp, true);
13727 if (rc)
13728 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
13729 rc);
13730
13731 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
13732 &bp->sp_event))
13733 bnxt_init_ethtool_link_settings(bp);
13734 mutex_unlock(&bp->link_lock);
13735 }
13736 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
13737 int rc;
13738
13739 mutex_lock(&bp->link_lock);
13740 rc = bnxt_update_phy_setting(bp);
13741 mutex_unlock(&bp->link_lock);
13742 if (rc) {
13743 netdev_warn(bp->dev, "update phy settings retry failed\n");
13744 } else {
13745 bp->link_info.phy_retry = false;
13746 netdev_info(bp->dev, "update phy settings retry succeeded\n");
13747 }
13748 }
13749 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
13750 mutex_lock(&bp->link_lock);
13751 bnxt_get_port_module_status(bp);
13752 mutex_unlock(&bp->link_lock);
13753 }
13754
13755 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
13756 bnxt_tc_flow_stats_work(bp);
13757
13758 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
13759 bnxt_chk_missed_irq(bp);
13760
13761 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
13762 bnxt_fw_echo_reply(bp);
13763
13764 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
13765 bnxt_hwmon_notify_event(bp);
13766
13767 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
13768 * must be the last functions to be called before exiting.
13769 */
13770 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
13771 bnxt_reset(bp, false);
13772
13773 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
13774 bnxt_reset(bp, true);
13775
13776 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
13777 bnxt_rx_ring_reset(bp);
13778
13779 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
13780 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
13781 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
13782 bnxt_devlink_health_fw_report(bp);
13783 else
13784 bnxt_fw_reset(bp);
13785 }
13786
13787 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
13788 if (!is_bnxt_fw_ok(bp))
13789 bnxt_devlink_health_fw_report(bp);
13790 }
13791
13792 smp_mb__before_atomic();
13793 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13794 }
13795
13796 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13797 int *max_cp);
13798
13799 /* Under rtnl_lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)13800 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
13801 int tx_xdp)
13802 {
13803 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
13804 struct bnxt_hw_rings hwr = {0};
13805 int rx_rings = rx;
13806 int rc;
13807
13808 if (tcs)
13809 tx_sets = tcs;
13810
13811 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
13812
13813 if (max_rx < rx_rings)
13814 return -ENOMEM;
13815
13816 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13817 rx_rings <<= 1;
13818
13819 hwr.rx = rx_rings;
13820 hwr.tx = tx * tx_sets + tx_xdp;
13821 if (max_tx < hwr.tx)
13822 return -ENOMEM;
13823
13824 hwr.vnic = bnxt_get_total_vnics(bp, rx);
13825
13826 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
13827 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
13828 if (max_cp < hwr.cp)
13829 return -ENOMEM;
13830 hwr.stat = hwr.cp;
13831 if (BNXT_NEW_RM(bp)) {
13832 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
13833 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
13834 hwr.grp = rx;
13835 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13836 }
13837 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
13838 hwr.cp_p5 = hwr.tx + rx;
13839 rc = bnxt_hwrm_check_rings(bp, &hwr);
13840 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
13841 if (!bnxt_ulp_registered(bp->edev)) {
13842 hwr.cp += bnxt_get_ulp_msix_num(bp);
13843 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
13844 }
13845 if (hwr.cp > bp->total_irqs) {
13846 int total_msix = bnxt_change_msix(bp, hwr.cp);
13847
13848 if (total_msix < hwr.cp) {
13849 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
13850 hwr.cp, total_msix);
13851 rc = -ENOSPC;
13852 }
13853 }
13854 }
13855 return rc;
13856 }
13857
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)13858 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
13859 {
13860 if (bp->bar2) {
13861 pci_iounmap(pdev, bp->bar2);
13862 bp->bar2 = NULL;
13863 }
13864
13865 if (bp->bar1) {
13866 pci_iounmap(pdev, bp->bar1);
13867 bp->bar1 = NULL;
13868 }
13869
13870 if (bp->bar0) {
13871 pci_iounmap(pdev, bp->bar0);
13872 bp->bar0 = NULL;
13873 }
13874 }
13875
bnxt_cleanup_pci(struct bnxt * bp)13876 static void bnxt_cleanup_pci(struct bnxt *bp)
13877 {
13878 bnxt_unmap_bars(bp, bp->pdev);
13879 pci_release_regions(bp->pdev);
13880 if (pci_is_enabled(bp->pdev))
13881 pci_disable_device(bp->pdev);
13882 }
13883
bnxt_init_dflt_coal(struct bnxt * bp)13884 static void bnxt_init_dflt_coal(struct bnxt *bp)
13885 {
13886 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
13887 struct bnxt_coal *coal;
13888 u16 flags = 0;
13889
13890 if (coal_cap->cmpl_params &
13891 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
13892 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
13893
13894 /* Tick values in micro seconds.
13895 * 1 coal_buf x bufs_per_record = 1 completion record.
13896 */
13897 coal = &bp->rx_coal;
13898 coal->coal_ticks = 10;
13899 coal->coal_bufs = 30;
13900 coal->coal_ticks_irq = 1;
13901 coal->coal_bufs_irq = 2;
13902 coal->idle_thresh = 50;
13903 coal->bufs_per_record = 2;
13904 coal->budget = 64; /* NAPI budget */
13905 coal->flags = flags;
13906
13907 coal = &bp->tx_coal;
13908 coal->coal_ticks = 28;
13909 coal->coal_bufs = 30;
13910 coal->coal_ticks_irq = 2;
13911 coal->coal_bufs_irq = 2;
13912 coal->bufs_per_record = 1;
13913 coal->flags = flags;
13914
13915 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
13916 }
13917
13918 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)13919 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
13920 {
13921 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
13922
13923 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13924 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
13925 return true;
13926 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13927 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
13928 return true;
13929 return false;
13930 }
13931
bnxt_fw_init_one_p1(struct bnxt * bp)13932 static int bnxt_fw_init_one_p1(struct bnxt *bp)
13933 {
13934 int rc;
13935
13936 bp->fw_cap = 0;
13937 rc = bnxt_hwrm_ver_get(bp);
13938 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
13939 * so wait before continuing with recovery.
13940 */
13941 if (rc)
13942 msleep(100);
13943 bnxt_try_map_fw_health_reg(bp);
13944 if (rc) {
13945 rc = bnxt_try_recover_fw(bp);
13946 if (rc)
13947 return rc;
13948 rc = bnxt_hwrm_ver_get(bp);
13949 if (rc)
13950 return rc;
13951 }
13952
13953 bnxt_nvm_cfg_ver_get(bp);
13954
13955 rc = bnxt_hwrm_func_reset(bp);
13956 if (rc)
13957 return -ENODEV;
13958
13959 bnxt_hwrm_fw_set_time(bp);
13960 return 0;
13961 }
13962
bnxt_fw_init_one_p2(struct bnxt * bp)13963 static int bnxt_fw_init_one_p2(struct bnxt *bp)
13964 {
13965 int rc;
13966
13967 /* Get the MAX capabilities for this function */
13968 rc = bnxt_hwrm_func_qcaps(bp);
13969 if (rc) {
13970 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
13971 rc);
13972 return -ENODEV;
13973 }
13974
13975 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
13976 if (rc)
13977 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
13978 rc);
13979
13980 if (bnxt_alloc_fw_health(bp)) {
13981 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
13982 } else {
13983 rc = bnxt_hwrm_error_recovery_qcfg(bp);
13984 if (rc)
13985 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
13986 rc);
13987 }
13988
13989 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
13990 if (rc)
13991 return -ENODEV;
13992
13993 rc = bnxt_alloc_crash_dump_mem(bp);
13994 if (rc)
13995 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
13996 rc);
13997 if (!rc) {
13998 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
13999 if (rc) {
14000 bnxt_free_crash_dump_mem(bp);
14001 netdev_warn(bp->dev,
14002 "hwrm crash dump mem failure rc: %d\n", rc);
14003 }
14004 }
14005
14006 if (bnxt_fw_pre_resv_vnics(bp))
14007 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14008
14009 bnxt_hwrm_func_qcfg(bp);
14010 bnxt_hwrm_vnic_qcaps(bp);
14011 bnxt_hwrm_port_led_qcaps(bp);
14012 bnxt_ethtool_init(bp);
14013 if (bp->fw_cap & BNXT_FW_CAP_PTP)
14014 __bnxt_hwrm_ptp_qcfg(bp);
14015 bnxt_dcb_init(bp);
14016 bnxt_hwmon_init(bp);
14017 return 0;
14018 }
14019
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)14020 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14021 {
14022 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14023 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14024 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14025 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14026 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14027 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14028 bp->rss_hash_delta = bp->rss_hash_cfg;
14029 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14030 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14031 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14032 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14033 }
14034 }
14035
bnxt_set_dflt_rfs(struct bnxt * bp)14036 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14037 {
14038 struct net_device *dev = bp->dev;
14039
14040 dev->hw_features &= ~NETIF_F_NTUPLE;
14041 dev->features &= ~NETIF_F_NTUPLE;
14042 bp->flags &= ~BNXT_FLAG_RFS;
14043 if (bnxt_rfs_supported(bp)) {
14044 dev->hw_features |= NETIF_F_NTUPLE;
14045 if (bnxt_rfs_capable(bp, false)) {
14046 bp->flags |= BNXT_FLAG_RFS;
14047 dev->features |= NETIF_F_NTUPLE;
14048 }
14049 }
14050 }
14051
bnxt_fw_init_one_p3(struct bnxt * bp)14052 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14053 {
14054 struct pci_dev *pdev = bp->pdev;
14055
14056 bnxt_set_dflt_rss_hash_type(bp);
14057 bnxt_set_dflt_rfs(bp);
14058
14059 bnxt_get_wol_settings(bp);
14060 if (bp->flags & BNXT_FLAG_WOL_CAP)
14061 device_set_wakeup_enable(&pdev->dev, bp->wol);
14062 else
14063 device_set_wakeup_capable(&pdev->dev, false);
14064
14065 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14066 bnxt_hwrm_coal_params_qcaps(bp);
14067 }
14068
14069 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14070
bnxt_fw_init_one(struct bnxt * bp)14071 int bnxt_fw_init_one(struct bnxt *bp)
14072 {
14073 int rc;
14074
14075 rc = bnxt_fw_init_one_p1(bp);
14076 if (rc) {
14077 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
14078 return rc;
14079 }
14080 rc = bnxt_fw_init_one_p2(bp);
14081 if (rc) {
14082 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
14083 return rc;
14084 }
14085 rc = bnxt_probe_phy(bp, false);
14086 if (rc)
14087 return rc;
14088 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
14089 if (rc)
14090 return rc;
14091
14092 bnxt_fw_init_one_p3(bp);
14093 return 0;
14094 }
14095
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)14096 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
14097 {
14098 struct bnxt_fw_health *fw_health = bp->fw_health;
14099 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
14100 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
14101 u32 reg_type, reg_off, delay_msecs;
14102
14103 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
14104 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
14105 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
14106 switch (reg_type) {
14107 case BNXT_FW_HEALTH_REG_TYPE_CFG:
14108 pci_write_config_dword(bp->pdev, reg_off, val);
14109 break;
14110 case BNXT_FW_HEALTH_REG_TYPE_GRC:
14111 writel(reg_off & BNXT_GRC_BASE_MASK,
14112 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
14113 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
14114 fallthrough;
14115 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
14116 writel(val, bp->bar0 + reg_off);
14117 break;
14118 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
14119 writel(val, bp->bar1 + reg_off);
14120 break;
14121 }
14122 if (delay_msecs) {
14123 pci_read_config_dword(bp->pdev, 0, &val);
14124 msleep(delay_msecs);
14125 }
14126 }
14127
bnxt_hwrm_reset_permitted(struct bnxt * bp)14128 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
14129 {
14130 struct hwrm_func_qcfg_output *resp;
14131 struct hwrm_func_qcfg_input *req;
14132 bool result = true; /* firmware will enforce if unknown */
14133
14134 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
14135 return result;
14136
14137 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
14138 return result;
14139
14140 req->fid = cpu_to_le16(0xffff);
14141 resp = hwrm_req_hold(bp, req);
14142 if (!hwrm_req_send(bp, req))
14143 result = !!(le16_to_cpu(resp->flags) &
14144 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
14145 hwrm_req_drop(bp, req);
14146 return result;
14147 }
14148
bnxt_reset_all(struct bnxt * bp)14149 static void bnxt_reset_all(struct bnxt *bp)
14150 {
14151 struct bnxt_fw_health *fw_health = bp->fw_health;
14152 int i, rc;
14153
14154 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14155 bnxt_fw_reset_via_optee(bp);
14156 bp->fw_reset_timestamp = jiffies;
14157 return;
14158 }
14159
14160 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
14161 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
14162 bnxt_fw_reset_writel(bp, i);
14163 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
14164 struct hwrm_fw_reset_input *req;
14165
14166 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
14167 if (!rc) {
14168 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
14169 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
14170 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
14171 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
14172 rc = hwrm_req_send(bp, req);
14173 }
14174 if (rc != -ENODEV)
14175 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
14176 }
14177 bp->fw_reset_timestamp = jiffies;
14178 }
14179
bnxt_fw_reset_timeout(struct bnxt * bp)14180 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
14181 {
14182 return time_after(jiffies, bp->fw_reset_timestamp +
14183 (bp->fw_reset_max_dsecs * HZ / 10));
14184 }
14185
bnxt_fw_reset_abort(struct bnxt * bp,int rc)14186 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
14187 {
14188 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14189 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
14190 bnxt_dl_health_fw_status_update(bp, false);
14191 bp->fw_reset_state = 0;
14192 dev_close(bp->dev);
14193 }
14194
bnxt_fw_reset_task(struct work_struct * work)14195 static void bnxt_fw_reset_task(struct work_struct *work)
14196 {
14197 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
14198 int rc = 0;
14199
14200 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14201 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
14202 return;
14203 }
14204
14205 switch (bp->fw_reset_state) {
14206 case BNXT_FW_RESET_STATE_POLL_VF: {
14207 int n = bnxt_get_registered_vfs(bp);
14208 int tmo;
14209
14210 if (n < 0) {
14211 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
14212 n, jiffies_to_msecs(jiffies -
14213 bp->fw_reset_timestamp));
14214 goto fw_reset_abort;
14215 } else if (n > 0) {
14216 if (bnxt_fw_reset_timeout(bp)) {
14217 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14218 bp->fw_reset_state = 0;
14219 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
14220 n);
14221 goto ulp_start;
14222 }
14223 bnxt_queue_fw_reset_work(bp, HZ / 10);
14224 return;
14225 }
14226 bp->fw_reset_timestamp = jiffies;
14227 rtnl_lock();
14228 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
14229 bnxt_fw_reset_abort(bp, rc);
14230 rtnl_unlock();
14231 goto ulp_start;
14232 }
14233 bnxt_fw_reset_close(bp);
14234 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14235 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14236 tmo = HZ / 10;
14237 } else {
14238 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14239 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14240 }
14241 rtnl_unlock();
14242 bnxt_queue_fw_reset_work(bp, tmo);
14243 return;
14244 }
14245 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
14246 u32 val;
14247
14248 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14249 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
14250 !bnxt_fw_reset_timeout(bp)) {
14251 bnxt_queue_fw_reset_work(bp, HZ / 5);
14252 return;
14253 }
14254
14255 if (!bp->fw_health->primary) {
14256 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
14257
14258 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14259 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14260 return;
14261 }
14262 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14263 }
14264 fallthrough;
14265 case BNXT_FW_RESET_STATE_RESET_FW:
14266 bnxt_reset_all(bp);
14267 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14268 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
14269 return;
14270 case BNXT_FW_RESET_STATE_ENABLE_DEV:
14271 bnxt_inv_fw_health_reg(bp);
14272 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
14273 !bp->fw_reset_min_dsecs) {
14274 u16 val;
14275
14276 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14277 if (val == 0xffff) {
14278 if (bnxt_fw_reset_timeout(bp)) {
14279 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
14280 rc = -ETIMEDOUT;
14281 goto fw_reset_abort;
14282 }
14283 bnxt_queue_fw_reset_work(bp, HZ / 1000);
14284 return;
14285 }
14286 }
14287 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14288 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
14289 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
14290 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
14291 bnxt_dl_remote_reload(bp);
14292 if (pci_enable_device(bp->pdev)) {
14293 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
14294 rc = -ENODEV;
14295 goto fw_reset_abort;
14296 }
14297 pci_set_master(bp->pdev);
14298 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
14299 fallthrough;
14300 case BNXT_FW_RESET_STATE_POLL_FW:
14301 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
14302 rc = bnxt_hwrm_poll(bp);
14303 if (rc) {
14304 if (bnxt_fw_reset_timeout(bp)) {
14305 netdev_err(bp->dev, "Firmware reset aborted\n");
14306 goto fw_reset_abort_status;
14307 }
14308 bnxt_queue_fw_reset_work(bp, HZ / 5);
14309 return;
14310 }
14311 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
14312 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
14313 fallthrough;
14314 case BNXT_FW_RESET_STATE_OPENING:
14315 while (!rtnl_trylock()) {
14316 bnxt_queue_fw_reset_work(bp, HZ / 10);
14317 return;
14318 }
14319 rc = bnxt_open(bp->dev);
14320 if (rc) {
14321 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
14322 bnxt_fw_reset_abort(bp, rc);
14323 rtnl_unlock();
14324 goto ulp_start;
14325 }
14326
14327 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
14328 bp->fw_health->enabled) {
14329 bp->fw_health->last_fw_reset_cnt =
14330 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14331 }
14332 bp->fw_reset_state = 0;
14333 /* Make sure fw_reset_state is 0 before clearing the flag */
14334 smp_mb__before_atomic();
14335 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14336 bnxt_ptp_reapply_pps(bp);
14337 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
14338 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
14339 bnxt_dl_health_fw_recovery_done(bp);
14340 bnxt_dl_health_fw_status_update(bp, true);
14341 }
14342 rtnl_unlock();
14343 bnxt_ulp_start(bp, 0);
14344 bnxt_reenable_sriov(bp);
14345 rtnl_lock();
14346 bnxt_vf_reps_alloc(bp);
14347 bnxt_vf_reps_open(bp);
14348 rtnl_unlock();
14349 break;
14350 }
14351 return;
14352
14353 fw_reset_abort_status:
14354 if (bp->fw_health->status_reliable ||
14355 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
14356 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14357
14358 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
14359 }
14360 fw_reset_abort:
14361 rtnl_lock();
14362 bnxt_fw_reset_abort(bp, rc);
14363 rtnl_unlock();
14364 ulp_start:
14365 bnxt_ulp_start(bp, rc);
14366 }
14367
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)14368 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
14369 {
14370 int rc;
14371 struct bnxt *bp = netdev_priv(dev);
14372
14373 SET_NETDEV_DEV(dev, &pdev->dev);
14374
14375 /* enable device (incl. PCI PM wakeup), and bus-mastering */
14376 rc = pci_enable_device(pdev);
14377 if (rc) {
14378 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14379 goto init_err;
14380 }
14381
14382 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
14383 dev_err(&pdev->dev,
14384 "Cannot find PCI device base address, aborting\n");
14385 rc = -ENODEV;
14386 goto init_err_disable;
14387 }
14388
14389 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
14390 if (rc) {
14391 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14392 goto init_err_disable;
14393 }
14394
14395 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
14396 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
14397 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
14398 rc = -EIO;
14399 goto init_err_release;
14400 }
14401
14402 pci_set_master(pdev);
14403
14404 bp->dev = dev;
14405 bp->pdev = pdev;
14406
14407 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
14408 * determines the BAR size.
14409 */
14410 bp->bar0 = pci_ioremap_bar(pdev, 0);
14411 if (!bp->bar0) {
14412 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14413 rc = -ENOMEM;
14414 goto init_err_release;
14415 }
14416
14417 bp->bar2 = pci_ioremap_bar(pdev, 4);
14418 if (!bp->bar2) {
14419 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
14420 rc = -ENOMEM;
14421 goto init_err_release;
14422 }
14423
14424 INIT_WORK(&bp->sp_task, bnxt_sp_task);
14425 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
14426
14427 spin_lock_init(&bp->ntp_fltr_lock);
14428 #if BITS_PER_LONG == 32
14429 spin_lock_init(&bp->db_lock);
14430 #endif
14431
14432 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
14433 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
14434
14435 timer_setup(&bp->timer, bnxt_timer, 0);
14436 bp->current_interval = BNXT_TIMER_INTERVAL;
14437
14438 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
14439 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
14440
14441 clear_bit(BNXT_STATE_OPEN, &bp->state);
14442 return 0;
14443
14444 init_err_release:
14445 bnxt_unmap_bars(bp, pdev);
14446 pci_release_regions(pdev);
14447
14448 init_err_disable:
14449 pci_disable_device(pdev);
14450
14451 init_err:
14452 return rc;
14453 }
14454
14455 /* rtnl_lock held */
bnxt_change_mac_addr(struct net_device * dev,void * p)14456 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
14457 {
14458 struct sockaddr *addr = p;
14459 struct bnxt *bp = netdev_priv(dev);
14460 int rc = 0;
14461
14462 if (!is_valid_ether_addr(addr->sa_data))
14463 return -EADDRNOTAVAIL;
14464
14465 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
14466 return 0;
14467
14468 rc = bnxt_approve_mac(bp, addr->sa_data, true);
14469 if (rc)
14470 return rc;
14471
14472 eth_hw_addr_set(dev, addr->sa_data);
14473 bnxt_clear_usr_fltrs(bp, true);
14474 if (netif_running(dev)) {
14475 bnxt_close_nic(bp, false, false);
14476 rc = bnxt_open_nic(bp, false, false);
14477 }
14478
14479 return rc;
14480 }
14481
14482 /* rtnl_lock held */
bnxt_change_mtu(struct net_device * dev,int new_mtu)14483 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
14484 {
14485 struct bnxt *bp = netdev_priv(dev);
14486
14487 if (netif_running(dev))
14488 bnxt_close_nic(bp, true, false);
14489
14490 WRITE_ONCE(dev->mtu, new_mtu);
14491 bnxt_set_ring_params(bp);
14492
14493 if (netif_running(dev))
14494 return bnxt_open_nic(bp, true, false);
14495
14496 return 0;
14497 }
14498
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)14499 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
14500 {
14501 struct bnxt *bp = netdev_priv(dev);
14502 bool sh = false;
14503 int rc, tx_cp;
14504
14505 if (tc > bp->max_tc) {
14506 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
14507 tc, bp->max_tc);
14508 return -EINVAL;
14509 }
14510
14511 if (bp->num_tc == tc)
14512 return 0;
14513
14514 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
14515 sh = true;
14516
14517 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
14518 sh, tc, bp->tx_nr_rings_xdp);
14519 if (rc)
14520 return rc;
14521
14522 /* Needs to close the device and do hw resource re-allocations */
14523 if (netif_running(bp->dev))
14524 bnxt_close_nic(bp, true, false);
14525
14526 if (tc) {
14527 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
14528 netdev_set_num_tc(dev, tc);
14529 bp->num_tc = tc;
14530 } else {
14531 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14532 netdev_reset_tc(dev);
14533 bp->num_tc = 0;
14534 }
14535 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
14536 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
14537 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
14538 tx_cp + bp->rx_nr_rings;
14539
14540 if (netif_running(bp->dev))
14541 return bnxt_open_nic(bp, true, false);
14542
14543 return 0;
14544 }
14545
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)14546 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
14547 void *cb_priv)
14548 {
14549 struct bnxt *bp = cb_priv;
14550
14551 if (!bnxt_tc_flower_enabled(bp) ||
14552 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
14553 return -EOPNOTSUPP;
14554
14555 switch (type) {
14556 case TC_SETUP_CLSFLOWER:
14557 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
14558 default:
14559 return -EOPNOTSUPP;
14560 }
14561 }
14562
14563 LIST_HEAD(bnxt_block_cb_list);
14564
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)14565 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
14566 void *type_data)
14567 {
14568 struct bnxt *bp = netdev_priv(dev);
14569
14570 switch (type) {
14571 case TC_SETUP_BLOCK:
14572 return flow_block_cb_setup_simple(type_data,
14573 &bnxt_block_cb_list,
14574 bnxt_setup_tc_block_cb,
14575 bp, bp, true);
14576 case TC_SETUP_QDISC_MQPRIO: {
14577 struct tc_mqprio_qopt *mqprio = type_data;
14578
14579 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
14580
14581 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
14582 }
14583 default:
14584 return -EOPNOTSUPP;
14585 }
14586 }
14587
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)14588 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
14589 const struct sk_buff *skb)
14590 {
14591 struct bnxt_vnic_info *vnic;
14592
14593 if (skb)
14594 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
14595
14596 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
14597 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
14598 }
14599
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)14600 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
14601 u32 idx)
14602 {
14603 struct hlist_head *head;
14604 int bit_id;
14605
14606 spin_lock_bh(&bp->ntp_fltr_lock);
14607 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
14608 if (bit_id < 0) {
14609 spin_unlock_bh(&bp->ntp_fltr_lock);
14610 return -ENOMEM;
14611 }
14612
14613 fltr->base.sw_id = (u16)bit_id;
14614 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
14615 fltr->base.flags |= BNXT_ACT_RING_DST;
14616 head = &bp->ntp_fltr_hash_tbl[idx];
14617 hlist_add_head_rcu(&fltr->base.hash, head);
14618 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
14619 bnxt_insert_usr_fltr(bp, &fltr->base);
14620 bp->ntp_fltr_count++;
14621 spin_unlock_bh(&bp->ntp_fltr_lock);
14622 return 0;
14623 }
14624
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)14625 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
14626 struct bnxt_ntuple_filter *f2)
14627 {
14628 struct bnxt_flow_masks *masks1 = &f1->fmasks;
14629 struct bnxt_flow_masks *masks2 = &f2->fmasks;
14630 struct flow_keys *keys1 = &f1->fkeys;
14631 struct flow_keys *keys2 = &f2->fkeys;
14632
14633 if (keys1->basic.n_proto != keys2->basic.n_proto ||
14634 keys1->basic.ip_proto != keys2->basic.ip_proto)
14635 return false;
14636
14637 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
14638 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
14639 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
14640 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
14641 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
14642 return false;
14643 } else {
14644 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
14645 &keys2->addrs.v6addrs.src) ||
14646 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
14647 &masks2->addrs.v6addrs.src) ||
14648 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
14649 &keys2->addrs.v6addrs.dst) ||
14650 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
14651 &masks2->addrs.v6addrs.dst))
14652 return false;
14653 }
14654
14655 return keys1->ports.src == keys2->ports.src &&
14656 masks1->ports.src == masks2->ports.src &&
14657 keys1->ports.dst == keys2->ports.dst &&
14658 masks1->ports.dst == masks2->ports.dst &&
14659 keys1->control.flags == keys2->control.flags &&
14660 f1->l2_fltr == f2->l2_fltr;
14661 }
14662
14663 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)14664 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
14665 struct bnxt_ntuple_filter *fltr, u32 idx)
14666 {
14667 struct bnxt_ntuple_filter *f;
14668 struct hlist_head *head;
14669
14670 head = &bp->ntp_fltr_hash_tbl[idx];
14671 hlist_for_each_entry_rcu(f, head, base.hash) {
14672 if (bnxt_fltr_match(f, fltr))
14673 return f;
14674 }
14675 return NULL;
14676 }
14677
14678 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)14679 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
14680 u16 rxq_index, u32 flow_id)
14681 {
14682 struct bnxt *bp = netdev_priv(dev);
14683 struct bnxt_ntuple_filter *fltr, *new_fltr;
14684 struct flow_keys *fkeys;
14685 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
14686 struct bnxt_l2_filter *l2_fltr;
14687 int rc = 0, idx;
14688 u32 flags;
14689
14690 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
14691 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
14692 atomic_inc(&l2_fltr->refcnt);
14693 } else {
14694 struct bnxt_l2_key key;
14695
14696 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
14697 key.vlan = 0;
14698 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
14699 if (!l2_fltr)
14700 return -EINVAL;
14701 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
14702 bnxt_del_l2_filter(bp, l2_fltr);
14703 return -EINVAL;
14704 }
14705 }
14706 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
14707 if (!new_fltr) {
14708 bnxt_del_l2_filter(bp, l2_fltr);
14709 return -ENOMEM;
14710 }
14711
14712 fkeys = &new_fltr->fkeys;
14713 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
14714 rc = -EPROTONOSUPPORT;
14715 goto err_free;
14716 }
14717
14718 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
14719 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
14720 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
14721 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
14722 rc = -EPROTONOSUPPORT;
14723 goto err_free;
14724 }
14725 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
14726 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
14727 if (bp->hwrm_spec_code < 0x10601) {
14728 rc = -EPROTONOSUPPORT;
14729 goto err_free;
14730 }
14731 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
14732 }
14733 flags = fkeys->control.flags;
14734 if (((flags & FLOW_DIS_ENCAPSULATION) &&
14735 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
14736 rc = -EPROTONOSUPPORT;
14737 goto err_free;
14738 }
14739 new_fltr->l2_fltr = l2_fltr;
14740
14741 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
14742 rcu_read_lock();
14743 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
14744 if (fltr) {
14745 rc = fltr->base.sw_id;
14746 rcu_read_unlock();
14747 goto err_free;
14748 }
14749 rcu_read_unlock();
14750
14751 new_fltr->flow_id = flow_id;
14752 new_fltr->base.rxq = rxq_index;
14753 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
14754 if (!rc) {
14755 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14756 return new_fltr->base.sw_id;
14757 }
14758
14759 err_free:
14760 bnxt_del_l2_filter(bp, l2_fltr);
14761 kfree(new_fltr);
14762 return rc;
14763 }
14764 #endif
14765
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)14766 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
14767 {
14768 spin_lock_bh(&bp->ntp_fltr_lock);
14769 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
14770 spin_unlock_bh(&bp->ntp_fltr_lock);
14771 return;
14772 }
14773 hlist_del_rcu(&fltr->base.hash);
14774 bnxt_del_one_usr_fltr(bp, &fltr->base);
14775 bp->ntp_fltr_count--;
14776 spin_unlock_bh(&bp->ntp_fltr_lock);
14777 bnxt_del_l2_filter(bp, fltr->l2_fltr);
14778 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
14779 kfree_rcu(fltr, base.rcu);
14780 }
14781
bnxt_cfg_ntp_filters(struct bnxt * bp)14782 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
14783 {
14784 #ifdef CONFIG_RFS_ACCEL
14785 int i;
14786
14787 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
14788 struct hlist_head *head;
14789 struct hlist_node *tmp;
14790 struct bnxt_ntuple_filter *fltr;
14791 int rc;
14792
14793 head = &bp->ntp_fltr_hash_tbl[i];
14794 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
14795 bool del = false;
14796
14797 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
14798 if (fltr->base.flags & BNXT_ACT_NO_AGING)
14799 continue;
14800 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
14801 fltr->flow_id,
14802 fltr->base.sw_id)) {
14803 bnxt_hwrm_cfa_ntuple_filter_free(bp,
14804 fltr);
14805 del = true;
14806 }
14807 } else {
14808 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
14809 fltr);
14810 if (rc)
14811 del = true;
14812 else
14813 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
14814 }
14815
14816 if (del)
14817 bnxt_del_ntp_filter(bp, fltr);
14818 }
14819 }
14820 #endif
14821 }
14822
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)14823 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
14824 unsigned int entry, struct udp_tunnel_info *ti)
14825 {
14826 struct bnxt *bp = netdev_priv(netdev);
14827 unsigned int cmd;
14828
14829 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14830 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
14831 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14832 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
14833 else
14834 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
14835
14836 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
14837 }
14838
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)14839 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
14840 unsigned int entry, struct udp_tunnel_info *ti)
14841 {
14842 struct bnxt *bp = netdev_priv(netdev);
14843 unsigned int cmd;
14844
14845 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14846 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
14847 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14848 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
14849 else
14850 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
14851
14852 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
14853 }
14854
14855 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
14856 .set_port = bnxt_udp_tunnel_set_port,
14857 .unset_port = bnxt_udp_tunnel_unset_port,
14858 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14859 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14860 .tables = {
14861 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
14862 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14863 },
14864 }, bnxt_udp_tunnels_p7 = {
14865 .set_port = bnxt_udp_tunnel_set_port,
14866 .unset_port = bnxt_udp_tunnel_unset_port,
14867 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14868 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14869 .tables = {
14870 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
14871 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14872 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
14873 },
14874 };
14875
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)14876 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
14877 struct net_device *dev, u32 filter_mask,
14878 int nlflags)
14879 {
14880 struct bnxt *bp = netdev_priv(dev);
14881
14882 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
14883 nlflags, filter_mask, NULL);
14884 }
14885
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)14886 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
14887 u16 flags, struct netlink_ext_ack *extack)
14888 {
14889 struct bnxt *bp = netdev_priv(dev);
14890 struct nlattr *attr, *br_spec;
14891 int rem, rc = 0;
14892
14893 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
14894 return -EOPNOTSUPP;
14895
14896 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
14897 if (!br_spec)
14898 return -EINVAL;
14899
14900 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
14901 u16 mode;
14902
14903 mode = nla_get_u16(attr);
14904 if (mode == bp->br_mode)
14905 break;
14906
14907 rc = bnxt_hwrm_set_br_mode(bp, mode);
14908 if (!rc)
14909 bp->br_mode = mode;
14910 break;
14911 }
14912 return rc;
14913 }
14914
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)14915 int bnxt_get_port_parent_id(struct net_device *dev,
14916 struct netdev_phys_item_id *ppid)
14917 {
14918 struct bnxt *bp = netdev_priv(dev);
14919
14920 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
14921 return -EOPNOTSUPP;
14922
14923 /* The PF and it's VF-reps only support the switchdev framework */
14924 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
14925 return -EOPNOTSUPP;
14926
14927 ppid->id_len = sizeof(bp->dsn);
14928 memcpy(ppid->id, bp->dsn, ppid->id_len);
14929
14930 return 0;
14931 }
14932
14933 static const struct net_device_ops bnxt_netdev_ops = {
14934 .ndo_open = bnxt_open,
14935 .ndo_start_xmit = bnxt_start_xmit,
14936 .ndo_stop = bnxt_close,
14937 .ndo_get_stats64 = bnxt_get_stats64,
14938 .ndo_set_rx_mode = bnxt_set_rx_mode,
14939 .ndo_eth_ioctl = bnxt_ioctl,
14940 .ndo_validate_addr = eth_validate_addr,
14941 .ndo_set_mac_address = bnxt_change_mac_addr,
14942 .ndo_change_mtu = bnxt_change_mtu,
14943 .ndo_fix_features = bnxt_fix_features,
14944 .ndo_set_features = bnxt_set_features,
14945 .ndo_features_check = bnxt_features_check,
14946 .ndo_tx_timeout = bnxt_tx_timeout,
14947 #ifdef CONFIG_BNXT_SRIOV
14948 .ndo_get_vf_config = bnxt_get_vf_config,
14949 .ndo_set_vf_mac = bnxt_set_vf_mac,
14950 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
14951 .ndo_set_vf_rate = bnxt_set_vf_bw,
14952 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
14953 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
14954 .ndo_set_vf_trust = bnxt_set_vf_trust,
14955 #endif
14956 .ndo_setup_tc = bnxt_setup_tc,
14957 #ifdef CONFIG_RFS_ACCEL
14958 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
14959 #endif
14960 .ndo_bpf = bnxt_xdp,
14961 .ndo_xdp_xmit = bnxt_xdp_xmit,
14962 .ndo_bridge_getlink = bnxt_bridge_getlink,
14963 .ndo_bridge_setlink = bnxt_bridge_setlink,
14964 };
14965
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)14966 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
14967 struct netdev_queue_stats_rx *stats)
14968 {
14969 struct bnxt *bp = netdev_priv(dev);
14970 struct bnxt_cp_ring_info *cpr;
14971 u64 *sw;
14972
14973 cpr = &bp->bnapi[i]->cp_ring;
14974 sw = cpr->stats.sw_stats;
14975
14976 stats->packets = 0;
14977 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
14978 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
14979 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
14980
14981 stats->bytes = 0;
14982 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
14983 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
14984 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
14985
14986 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
14987 }
14988
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)14989 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
14990 struct netdev_queue_stats_tx *stats)
14991 {
14992 struct bnxt *bp = netdev_priv(dev);
14993 struct bnxt_napi *bnapi;
14994 u64 *sw;
14995
14996 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
14997 sw = bnapi->cp_ring.stats.sw_stats;
14998
14999 stats->packets = 0;
15000 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15001 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15002 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15003
15004 stats->bytes = 0;
15005 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15006 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15007 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15008 }
15009
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)15010 static void bnxt_get_base_stats(struct net_device *dev,
15011 struct netdev_queue_stats_rx *rx,
15012 struct netdev_queue_stats_tx *tx)
15013 {
15014 struct bnxt *bp = netdev_priv(dev);
15015
15016 rx->packets = bp->net_stats_prev.rx_packets;
15017 rx->bytes = bp->net_stats_prev.rx_bytes;
15018 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15019
15020 tx->packets = bp->net_stats_prev.tx_packets;
15021 tx->bytes = bp->net_stats_prev.tx_bytes;
15022 }
15023
15024 static const struct netdev_stat_ops bnxt_stat_ops = {
15025 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
15026 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
15027 .get_base_stats = bnxt_get_base_stats,
15028 };
15029
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)15030 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
15031 {
15032 u16 mem_size;
15033
15034 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
15035 mem_size = rxr->rx_agg_bmap_size / 8;
15036 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
15037 if (!rxr->rx_agg_bmap)
15038 return -ENOMEM;
15039
15040 return 0;
15041 }
15042
bnxt_queue_mem_alloc(struct net_device * dev,void * qmem,int idx)15043 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
15044 {
15045 struct bnxt_rx_ring_info *rxr, *clone;
15046 struct bnxt *bp = netdev_priv(dev);
15047 struct bnxt_ring_struct *ring;
15048 int rc;
15049
15050 rxr = &bp->rx_ring[idx];
15051 clone = qmem;
15052 memcpy(clone, rxr, sizeof(*rxr));
15053 bnxt_init_rx_ring_struct(bp, clone);
15054 bnxt_reset_rx_ring_struct(bp, clone);
15055
15056 clone->rx_prod = 0;
15057 clone->rx_agg_prod = 0;
15058 clone->rx_sw_agg_prod = 0;
15059 clone->rx_next_cons = 0;
15060
15061 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
15062 if (rc)
15063 return rc;
15064
15065 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
15066 if (rc < 0)
15067 goto err_page_pool_destroy;
15068
15069 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
15070 MEM_TYPE_PAGE_POOL,
15071 clone->page_pool);
15072 if (rc)
15073 goto err_rxq_info_unreg;
15074
15075 ring = &clone->rx_ring_struct;
15076 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15077 if (rc)
15078 goto err_free_rx_ring;
15079
15080 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
15081 ring = &clone->rx_agg_ring_struct;
15082 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15083 if (rc)
15084 goto err_free_rx_agg_ring;
15085
15086 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
15087 if (rc)
15088 goto err_free_rx_agg_ring;
15089 }
15090
15091 bnxt_init_one_rx_ring_rxbd(bp, clone);
15092 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
15093
15094 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
15095 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15096 bnxt_alloc_one_rx_ring_page(bp, clone, idx);
15097
15098 return 0;
15099
15100 err_free_rx_agg_ring:
15101 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
15102 err_free_rx_ring:
15103 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
15104 err_rxq_info_unreg:
15105 xdp_rxq_info_unreg(&clone->xdp_rxq);
15106 err_page_pool_destroy:
15107 clone->page_pool->p.napi = NULL;
15108 page_pool_destroy(clone->page_pool);
15109 clone->page_pool = NULL;
15110 return rc;
15111 }
15112
bnxt_queue_mem_free(struct net_device * dev,void * qmem)15113 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15114 {
15115 struct bnxt_rx_ring_info *rxr = qmem;
15116 struct bnxt *bp = netdev_priv(dev);
15117 struct bnxt_ring_struct *ring;
15118
15119 bnxt_free_one_rx_ring(bp, rxr);
15120 bnxt_free_one_rx_agg_ring(bp, rxr);
15121
15122 xdp_rxq_info_unreg(&rxr->xdp_rxq);
15123
15124 page_pool_destroy(rxr->page_pool);
15125 rxr->page_pool = NULL;
15126
15127 ring = &rxr->rx_ring_struct;
15128 bnxt_free_ring(bp, &ring->ring_mem);
15129
15130 ring = &rxr->rx_agg_ring_struct;
15131 bnxt_free_ring(bp, &ring->ring_mem);
15132
15133 kfree(rxr->rx_agg_bmap);
15134 rxr->rx_agg_bmap = NULL;
15135 }
15136
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)15137 static void bnxt_copy_rx_ring(struct bnxt *bp,
15138 struct bnxt_rx_ring_info *dst,
15139 struct bnxt_rx_ring_info *src)
15140 {
15141 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
15142 struct bnxt_ring_struct *dst_ring, *src_ring;
15143 int i;
15144
15145 dst_ring = &dst->rx_ring_struct;
15146 dst_rmem = &dst_ring->ring_mem;
15147 src_ring = &src->rx_ring_struct;
15148 src_rmem = &src_ring->ring_mem;
15149
15150 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15151 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15152 WARN_ON(dst_rmem->flags != src_rmem->flags);
15153 WARN_ON(dst_rmem->depth != src_rmem->depth);
15154 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15155 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15156
15157 dst_rmem->pg_tbl = src_rmem->pg_tbl;
15158 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15159 *dst_rmem->vmem = *src_rmem->vmem;
15160 for (i = 0; i < dst_rmem->nr_pages; i++) {
15161 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15162 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15163 }
15164
15165 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
15166 return;
15167
15168 dst_ring = &dst->rx_agg_ring_struct;
15169 dst_rmem = &dst_ring->ring_mem;
15170 src_ring = &src->rx_agg_ring_struct;
15171 src_rmem = &src_ring->ring_mem;
15172
15173 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15174 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15175 WARN_ON(dst_rmem->flags != src_rmem->flags);
15176 WARN_ON(dst_rmem->depth != src_rmem->depth);
15177 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15178 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15179 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
15180
15181 dst_rmem->pg_tbl = src_rmem->pg_tbl;
15182 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15183 *dst_rmem->vmem = *src_rmem->vmem;
15184 for (i = 0; i < dst_rmem->nr_pages; i++) {
15185 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15186 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15187 }
15188
15189 dst->rx_agg_bmap = src->rx_agg_bmap;
15190 }
15191
bnxt_queue_start(struct net_device * dev,void * qmem,int idx)15192 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
15193 {
15194 struct bnxt *bp = netdev_priv(dev);
15195 struct bnxt_rx_ring_info *rxr, *clone;
15196 struct bnxt_cp_ring_info *cpr;
15197 struct bnxt_vnic_info *vnic;
15198 int i, rc;
15199
15200 rxr = &bp->rx_ring[idx];
15201 clone = qmem;
15202
15203 rxr->rx_prod = clone->rx_prod;
15204 rxr->rx_agg_prod = clone->rx_agg_prod;
15205 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
15206 rxr->rx_next_cons = clone->rx_next_cons;
15207 rxr->page_pool = clone->page_pool;
15208 rxr->xdp_rxq = clone->xdp_rxq;
15209
15210 bnxt_copy_rx_ring(bp, rxr, clone);
15211
15212 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
15213 if (rc)
15214 return rc;
15215 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
15216 if (rc)
15217 goto err_free_hwrm_rx_ring;
15218
15219 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
15220 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15221 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
15222
15223 cpr = &rxr->bnapi->cp_ring;
15224 cpr->sw_stats->rx.rx_resets++;
15225
15226 for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
15227 vnic = &bp->vnic_info[i];
15228 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
15229 bnxt_hwrm_vnic_update(bp, vnic,
15230 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
15231 }
15232
15233 return 0;
15234
15235 err_free_hwrm_rx_ring:
15236 bnxt_hwrm_rx_ring_free(bp, rxr, false);
15237 return rc;
15238 }
15239
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)15240 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
15241 {
15242 struct bnxt *bp = netdev_priv(dev);
15243 struct bnxt_rx_ring_info *rxr;
15244 struct bnxt_vnic_info *vnic;
15245 int i;
15246
15247 for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
15248 vnic = &bp->vnic_info[i];
15249 vnic->mru = 0;
15250 bnxt_hwrm_vnic_update(bp, vnic,
15251 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
15252 }
15253
15254 rxr = &bp->rx_ring[idx];
15255 bnxt_hwrm_rx_ring_free(bp, rxr, false);
15256 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
15257 rxr->rx_next_cons = 0;
15258 page_pool_disable_direct_recycling(rxr->page_pool);
15259
15260 memcpy(qmem, rxr, sizeof(*rxr));
15261 bnxt_init_rx_ring_struct(bp, qmem);
15262
15263 return 0;
15264 }
15265
15266 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
15267 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
15268 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
15269 .ndo_queue_mem_free = bnxt_queue_mem_free,
15270 .ndo_queue_start = bnxt_queue_start,
15271 .ndo_queue_stop = bnxt_queue_stop,
15272 };
15273
bnxt_remove_one(struct pci_dev * pdev)15274 static void bnxt_remove_one(struct pci_dev *pdev)
15275 {
15276 struct net_device *dev = pci_get_drvdata(pdev);
15277 struct bnxt *bp = netdev_priv(dev);
15278
15279 if (BNXT_PF(bp))
15280 bnxt_sriov_disable(bp);
15281
15282 bnxt_rdma_aux_device_del(bp);
15283
15284 bnxt_ptp_clear(bp);
15285 unregister_netdev(dev);
15286
15287 bnxt_rdma_aux_device_uninit(bp);
15288
15289 bnxt_free_l2_filters(bp, true);
15290 bnxt_free_ntp_fltrs(bp, true);
15291 WARN_ON(bp->num_rss_ctx);
15292 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15293 /* Flush any pending tasks */
15294 cancel_work_sync(&bp->sp_task);
15295 cancel_delayed_work_sync(&bp->fw_reset_task);
15296 bp->sp_event = 0;
15297
15298 bnxt_dl_fw_reporters_destroy(bp);
15299 bnxt_dl_unregister(bp);
15300 bnxt_shutdown_tc(bp);
15301
15302 bnxt_clear_int_mode(bp);
15303 bnxt_hwrm_func_drv_unrgtr(bp);
15304 bnxt_free_hwrm_resources(bp);
15305 bnxt_hwmon_uninit(bp);
15306 bnxt_ethtool_free(bp);
15307 bnxt_dcb_free(bp);
15308 kfree(bp->ptp_cfg);
15309 bp->ptp_cfg = NULL;
15310 kfree(bp->fw_health);
15311 bp->fw_health = NULL;
15312 bnxt_cleanup_pci(bp);
15313 bnxt_free_ctx_mem(bp);
15314 bnxt_free_crash_dump_mem(bp);
15315 kfree(bp->rss_indir_tbl);
15316 bp->rss_indir_tbl = NULL;
15317 bnxt_free_port_stats(bp);
15318 free_netdev(dev);
15319 }
15320
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)15321 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
15322 {
15323 int rc = 0;
15324 struct bnxt_link_info *link_info = &bp->link_info;
15325
15326 bp->phy_flags = 0;
15327 rc = bnxt_hwrm_phy_qcaps(bp);
15328 if (rc) {
15329 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
15330 rc);
15331 return rc;
15332 }
15333 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
15334 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
15335 else
15336 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
15337 if (!fw_dflt)
15338 return 0;
15339
15340 mutex_lock(&bp->link_lock);
15341 rc = bnxt_update_link(bp, false);
15342 if (rc) {
15343 mutex_unlock(&bp->link_lock);
15344 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
15345 rc);
15346 return rc;
15347 }
15348
15349 /* Older firmware does not have supported_auto_speeds, so assume
15350 * that all supported speeds can be autonegotiated.
15351 */
15352 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
15353 link_info->support_auto_speeds = link_info->support_speeds;
15354
15355 bnxt_init_ethtool_link_settings(bp);
15356 mutex_unlock(&bp->link_lock);
15357 return 0;
15358 }
15359
bnxt_get_max_irq(struct pci_dev * pdev)15360 static int bnxt_get_max_irq(struct pci_dev *pdev)
15361 {
15362 u16 ctrl;
15363
15364 if (!pdev->msix_cap)
15365 return 1;
15366
15367 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
15368 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
15369 }
15370
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)15371 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
15372 int *max_cp)
15373 {
15374 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
15375 int max_ring_grps = 0, max_irq;
15376
15377 *max_tx = hw_resc->max_tx_rings;
15378 *max_rx = hw_resc->max_rx_rings;
15379 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
15380 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
15381 bnxt_get_ulp_msix_num_in_use(bp),
15382 hw_resc->max_stat_ctxs -
15383 bnxt_get_ulp_stat_ctxs_in_use(bp));
15384 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
15385 *max_cp = min_t(int, *max_cp, max_irq);
15386 max_ring_grps = hw_resc->max_hw_ring_grps;
15387 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
15388 *max_cp -= 1;
15389 *max_rx -= 2;
15390 }
15391 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15392 *max_rx >>= 1;
15393 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
15394 int rc;
15395
15396 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
15397 if (rc) {
15398 *max_rx = 0;
15399 *max_tx = 0;
15400 }
15401 /* On P5 chips, max_cp output param should be available NQs */
15402 *max_cp = max_irq;
15403 }
15404 *max_rx = min_t(int, *max_rx, max_ring_grps);
15405 }
15406
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)15407 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
15408 {
15409 int rx, tx, cp;
15410
15411 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
15412 *max_rx = rx;
15413 *max_tx = tx;
15414 if (!rx || !tx || !cp)
15415 return -ENOMEM;
15416
15417 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
15418 }
15419
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)15420 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
15421 bool shared)
15422 {
15423 int rc;
15424
15425 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
15426 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
15427 /* Not enough rings, try disabling agg rings. */
15428 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
15429 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
15430 if (rc) {
15431 /* set BNXT_FLAG_AGG_RINGS back for consistency */
15432 bp->flags |= BNXT_FLAG_AGG_RINGS;
15433 return rc;
15434 }
15435 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
15436 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
15437 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
15438 bnxt_set_ring_params(bp);
15439 }
15440
15441 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
15442 int max_cp, max_stat, max_irq;
15443
15444 /* Reserve minimum resources for RoCE */
15445 max_cp = bnxt_get_max_func_cp_rings(bp);
15446 max_stat = bnxt_get_max_func_stat_ctxs(bp);
15447 max_irq = bnxt_get_max_func_irqs(bp);
15448 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
15449 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
15450 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
15451 return 0;
15452
15453 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
15454 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
15455 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
15456 max_cp = min_t(int, max_cp, max_irq);
15457 max_cp = min_t(int, max_cp, max_stat);
15458 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
15459 if (rc)
15460 rc = 0;
15461 }
15462 return rc;
15463 }
15464
15465 /* In initial default shared ring setting, each shared ring must have a
15466 * RX/TX ring pair.
15467 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)15468 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
15469 {
15470 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
15471 bp->rx_nr_rings = bp->cp_nr_rings;
15472 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
15473 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15474 }
15475
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)15476 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
15477 {
15478 int dflt_rings, max_rx_rings, max_tx_rings, rc;
15479 int avail_msix;
15480
15481 if (!bnxt_can_reserve_rings(bp))
15482 return 0;
15483
15484 if (sh)
15485 bp->flags |= BNXT_FLAG_SHARED_RINGS;
15486 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
15487 /* Reduce default rings on multi-port cards so that total default
15488 * rings do not exceed CPU count.
15489 */
15490 if (bp->port_count > 1) {
15491 int max_rings =
15492 max_t(int, num_online_cpus() / bp->port_count, 1);
15493
15494 dflt_rings = min_t(int, dflt_rings, max_rings);
15495 }
15496 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
15497 if (rc)
15498 return rc;
15499 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
15500 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
15501 if (sh)
15502 bnxt_trim_dflt_sh_rings(bp);
15503 else
15504 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
15505 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15506
15507 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
15508 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
15509 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
15510
15511 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
15512 bnxt_set_dflt_ulp_stat_ctxs(bp);
15513 }
15514
15515 rc = __bnxt_reserve_rings(bp);
15516 if (rc && rc != -ENODEV)
15517 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
15518 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15519 if (sh)
15520 bnxt_trim_dflt_sh_rings(bp);
15521
15522 /* Rings may have been trimmed, re-reserve the trimmed rings. */
15523 if (bnxt_need_reserve_rings(bp)) {
15524 rc = __bnxt_reserve_rings(bp);
15525 if (rc && rc != -ENODEV)
15526 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
15527 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15528 }
15529 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
15530 bp->rx_nr_rings++;
15531 bp->cp_nr_rings++;
15532 }
15533 if (rc) {
15534 bp->tx_nr_rings = 0;
15535 bp->rx_nr_rings = 0;
15536 }
15537 return rc;
15538 }
15539
bnxt_init_dflt_ring_mode(struct bnxt * bp)15540 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
15541 {
15542 int rc;
15543
15544 if (bp->tx_nr_rings)
15545 return 0;
15546
15547 bnxt_ulp_irq_stop(bp);
15548 bnxt_clear_int_mode(bp);
15549 rc = bnxt_set_dflt_rings(bp, true);
15550 if (rc) {
15551 if (BNXT_VF(bp) && rc == -ENODEV)
15552 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15553 else
15554 netdev_err(bp->dev, "Not enough rings available.\n");
15555 goto init_dflt_ring_err;
15556 }
15557 rc = bnxt_init_int_mode(bp);
15558 if (rc)
15559 goto init_dflt_ring_err;
15560
15561 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15562
15563 bnxt_set_dflt_rfs(bp);
15564
15565 init_dflt_ring_err:
15566 bnxt_ulp_irq_restart(bp, rc);
15567 return rc;
15568 }
15569
bnxt_restore_pf_fw_resources(struct bnxt * bp)15570 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
15571 {
15572 int rc;
15573
15574 ASSERT_RTNL();
15575 bnxt_hwrm_func_qcaps(bp);
15576
15577 if (netif_running(bp->dev))
15578 __bnxt_close_nic(bp, true, false);
15579
15580 bnxt_ulp_irq_stop(bp);
15581 bnxt_clear_int_mode(bp);
15582 rc = bnxt_init_int_mode(bp);
15583 bnxt_ulp_irq_restart(bp, rc);
15584
15585 if (netif_running(bp->dev)) {
15586 if (rc)
15587 dev_close(bp->dev);
15588 else
15589 rc = bnxt_open_nic(bp, true, false);
15590 }
15591
15592 return rc;
15593 }
15594
bnxt_init_mac_addr(struct bnxt * bp)15595 static int bnxt_init_mac_addr(struct bnxt *bp)
15596 {
15597 int rc = 0;
15598
15599 if (BNXT_PF(bp)) {
15600 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
15601 } else {
15602 #ifdef CONFIG_BNXT_SRIOV
15603 struct bnxt_vf_info *vf = &bp->vf;
15604 bool strict_approval = true;
15605
15606 if (is_valid_ether_addr(vf->mac_addr)) {
15607 /* overwrite netdev dev_addr with admin VF MAC */
15608 eth_hw_addr_set(bp->dev, vf->mac_addr);
15609 /* Older PF driver or firmware may not approve this
15610 * correctly.
15611 */
15612 strict_approval = false;
15613 } else {
15614 eth_hw_addr_random(bp->dev);
15615 }
15616 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
15617 #endif
15618 }
15619 return rc;
15620 }
15621
bnxt_vpd_read_info(struct bnxt * bp)15622 static void bnxt_vpd_read_info(struct bnxt *bp)
15623 {
15624 struct pci_dev *pdev = bp->pdev;
15625 unsigned int vpd_size, kw_len;
15626 int pos, size;
15627 u8 *vpd_data;
15628
15629 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
15630 if (IS_ERR(vpd_data)) {
15631 pci_warn(pdev, "Unable to read VPD\n");
15632 return;
15633 }
15634
15635 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
15636 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
15637 if (pos < 0)
15638 goto read_sn;
15639
15640 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
15641 memcpy(bp->board_partno, &vpd_data[pos], size);
15642
15643 read_sn:
15644 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
15645 PCI_VPD_RO_KEYWORD_SERIALNO,
15646 &kw_len);
15647 if (pos < 0)
15648 goto exit;
15649
15650 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
15651 memcpy(bp->board_serialno, &vpd_data[pos], size);
15652 exit:
15653 kfree(vpd_data);
15654 }
15655
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])15656 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
15657 {
15658 struct pci_dev *pdev = bp->pdev;
15659 u64 qword;
15660
15661 qword = pci_get_dsn(pdev);
15662 if (!qword) {
15663 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
15664 return -EOPNOTSUPP;
15665 }
15666
15667 put_unaligned_le64(qword, dsn);
15668
15669 bp->flags |= BNXT_FLAG_DSN_VALID;
15670 return 0;
15671 }
15672
bnxt_map_db_bar(struct bnxt * bp)15673 static int bnxt_map_db_bar(struct bnxt *bp)
15674 {
15675 if (!bp->db_size)
15676 return -ENODEV;
15677 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
15678 if (!bp->bar1)
15679 return -ENOMEM;
15680 return 0;
15681 }
15682
bnxt_print_device_info(struct bnxt * bp)15683 void bnxt_print_device_info(struct bnxt *bp)
15684 {
15685 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
15686 board_info[bp->board_idx].name,
15687 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
15688
15689 pcie_print_link_status(bp->pdev);
15690 }
15691
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)15692 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
15693 {
15694 struct bnxt_hw_resc *hw_resc;
15695 struct net_device *dev;
15696 struct bnxt *bp;
15697 int rc, max_irqs;
15698
15699 if (pci_is_bridge(pdev))
15700 return -ENODEV;
15701
15702 if (!pdev->msix_cap) {
15703 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
15704 return -ENODEV;
15705 }
15706
15707 /* Clear any pending DMA transactions from crash kernel
15708 * while loading driver in capture kernel.
15709 */
15710 if (is_kdump_kernel()) {
15711 pci_clear_master(pdev);
15712 pcie_flr(pdev);
15713 }
15714
15715 max_irqs = bnxt_get_max_irq(pdev);
15716 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
15717 max_irqs);
15718 if (!dev)
15719 return -ENOMEM;
15720
15721 bp = netdev_priv(dev);
15722 bp->board_idx = ent->driver_data;
15723 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
15724 bnxt_set_max_func_irqs(bp, max_irqs);
15725
15726 if (bnxt_vf_pciid(bp->board_idx))
15727 bp->flags |= BNXT_FLAG_VF;
15728
15729 /* No devlink port registration in case of a VF */
15730 if (BNXT_PF(bp))
15731 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
15732
15733 rc = bnxt_init_board(pdev, dev);
15734 if (rc < 0)
15735 goto init_err_free;
15736
15737 dev->netdev_ops = &bnxt_netdev_ops;
15738 dev->stat_ops = &bnxt_stat_ops;
15739 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
15740 dev->ethtool_ops = &bnxt_ethtool_ops;
15741 pci_set_drvdata(pdev, dev);
15742
15743 rc = bnxt_alloc_hwrm_resources(bp);
15744 if (rc)
15745 goto init_err_pci_clean;
15746
15747 mutex_init(&bp->hwrm_cmd_lock);
15748 mutex_init(&bp->link_lock);
15749
15750 rc = bnxt_fw_init_one_p1(bp);
15751 if (rc)
15752 goto init_err_pci_clean;
15753
15754 if (BNXT_PF(bp))
15755 bnxt_vpd_read_info(bp);
15756
15757 if (BNXT_CHIP_P5_PLUS(bp)) {
15758 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
15759 if (BNXT_CHIP_P7(bp))
15760 bp->flags |= BNXT_FLAG_CHIP_P7;
15761 }
15762
15763 rc = bnxt_alloc_rss_indir_tbl(bp);
15764 if (rc)
15765 goto init_err_pci_clean;
15766
15767 rc = bnxt_fw_init_one_p2(bp);
15768 if (rc)
15769 goto init_err_pci_clean;
15770
15771 rc = bnxt_map_db_bar(bp);
15772 if (rc) {
15773 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
15774 rc);
15775 goto init_err_pci_clean;
15776 }
15777
15778 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
15779 NETIF_F_TSO | NETIF_F_TSO6 |
15780 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
15781 NETIF_F_GSO_IPXIP4 |
15782 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
15783 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
15784 NETIF_F_RXCSUM | NETIF_F_GRO;
15785 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15786 dev->hw_features |= NETIF_F_GSO_UDP_L4;
15787
15788 if (BNXT_SUPPORTS_TPA(bp))
15789 dev->hw_features |= NETIF_F_LRO;
15790
15791 dev->hw_enc_features =
15792 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
15793 NETIF_F_TSO | NETIF_F_TSO6 |
15794 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
15795 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
15796 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
15797 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15798 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
15799 if (bp->flags & BNXT_FLAG_CHIP_P7)
15800 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
15801 else
15802 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
15803
15804 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
15805 NETIF_F_GSO_GRE_CSUM;
15806 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
15807 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
15808 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
15809 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
15810 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
15811 if (BNXT_SUPPORTS_TPA(bp))
15812 dev->hw_features |= NETIF_F_GRO_HW;
15813 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
15814 if (dev->features & NETIF_F_GRO_HW)
15815 dev->features &= ~NETIF_F_LRO;
15816 dev->priv_flags |= IFF_UNICAST_FLT;
15817
15818 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
15819 if (bp->tso_max_segs)
15820 netif_set_tso_max_segs(dev, bp->tso_max_segs);
15821
15822 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
15823 NETDEV_XDP_ACT_RX_SG;
15824
15825 #ifdef CONFIG_BNXT_SRIOV
15826 init_waitqueue_head(&bp->sriov_cfg_wait);
15827 #endif
15828 if (BNXT_SUPPORTS_TPA(bp)) {
15829 bp->gro_func = bnxt_gro_func_5730x;
15830 if (BNXT_CHIP_P4(bp))
15831 bp->gro_func = bnxt_gro_func_5731x;
15832 else if (BNXT_CHIP_P5_PLUS(bp))
15833 bp->gro_func = bnxt_gro_func_5750x;
15834 }
15835 if (!BNXT_CHIP_P4_PLUS(bp))
15836 bp->flags |= BNXT_FLAG_DOUBLE_DB;
15837
15838 rc = bnxt_init_mac_addr(bp);
15839 if (rc) {
15840 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
15841 rc = -EADDRNOTAVAIL;
15842 goto init_err_pci_clean;
15843 }
15844
15845 if (BNXT_PF(bp)) {
15846 /* Read the adapter's DSN to use as the eswitch switch_id */
15847 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
15848 }
15849
15850 /* MTU range: 60 - FW defined max */
15851 dev->min_mtu = ETH_ZLEN;
15852 dev->max_mtu = bp->max_mtu;
15853
15854 rc = bnxt_probe_phy(bp, true);
15855 if (rc)
15856 goto init_err_pci_clean;
15857
15858 hw_resc = &bp->hw_resc;
15859 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
15860 BNXT_L2_FLTR_MAX_FLTR;
15861 /* Older firmware may not report these filters properly */
15862 if (bp->max_fltr < BNXT_MAX_FLTR)
15863 bp->max_fltr = BNXT_MAX_FLTR;
15864 bnxt_init_l2_fltr_tbl(bp);
15865 bnxt_set_rx_skb_mode(bp, false);
15866 bnxt_set_tpa_flags(bp);
15867 bnxt_set_ring_params(bp);
15868 bnxt_rdma_aux_device_init(bp);
15869 rc = bnxt_set_dflt_rings(bp, true);
15870 if (rc) {
15871 if (BNXT_VF(bp) && rc == -ENODEV) {
15872 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15873 } else {
15874 netdev_err(bp->dev, "Not enough rings available.\n");
15875 rc = -ENOMEM;
15876 }
15877 goto init_err_pci_clean;
15878 }
15879
15880 bnxt_fw_init_one_p3(bp);
15881
15882 bnxt_init_dflt_coal(bp);
15883
15884 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
15885 bp->flags |= BNXT_FLAG_STRIP_VLAN;
15886
15887 rc = bnxt_init_int_mode(bp);
15888 if (rc)
15889 goto init_err_pci_clean;
15890
15891 /* No TC has been set yet and rings may have been trimmed due to
15892 * limited MSIX, so we re-initialize the TX rings per TC.
15893 */
15894 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15895
15896 if (BNXT_PF(bp)) {
15897 if (!bnxt_pf_wq) {
15898 bnxt_pf_wq =
15899 create_singlethread_workqueue("bnxt_pf_wq");
15900 if (!bnxt_pf_wq) {
15901 dev_err(&pdev->dev, "Unable to create workqueue.\n");
15902 rc = -ENOMEM;
15903 goto init_err_pci_clean;
15904 }
15905 }
15906 rc = bnxt_init_tc(bp);
15907 if (rc)
15908 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
15909 rc);
15910 }
15911
15912 bnxt_inv_fw_health_reg(bp);
15913 rc = bnxt_dl_register(bp);
15914 if (rc)
15915 goto init_err_dl;
15916
15917 INIT_LIST_HEAD(&bp->usr_fltr_list);
15918
15919 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
15920 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
15921 if (BNXT_SUPPORTS_QUEUE_API(bp))
15922 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
15923
15924 rc = register_netdev(dev);
15925 if (rc)
15926 goto init_err_cleanup;
15927
15928 bnxt_dl_fw_reporters_create(bp);
15929
15930 bnxt_rdma_aux_device_add(bp);
15931
15932 bnxt_print_device_info(bp);
15933
15934 pci_save_state(pdev);
15935
15936 return 0;
15937 init_err_cleanup:
15938 bnxt_rdma_aux_device_uninit(bp);
15939 bnxt_dl_unregister(bp);
15940 init_err_dl:
15941 bnxt_shutdown_tc(bp);
15942 bnxt_clear_int_mode(bp);
15943
15944 init_err_pci_clean:
15945 bnxt_hwrm_func_drv_unrgtr(bp);
15946 bnxt_free_hwrm_resources(bp);
15947 bnxt_hwmon_uninit(bp);
15948 bnxt_ethtool_free(bp);
15949 bnxt_ptp_clear(bp);
15950 kfree(bp->ptp_cfg);
15951 bp->ptp_cfg = NULL;
15952 kfree(bp->fw_health);
15953 bp->fw_health = NULL;
15954 bnxt_cleanup_pci(bp);
15955 bnxt_free_ctx_mem(bp);
15956 bnxt_free_crash_dump_mem(bp);
15957 kfree(bp->rss_indir_tbl);
15958 bp->rss_indir_tbl = NULL;
15959
15960 init_err_free:
15961 free_netdev(dev);
15962 return rc;
15963 }
15964
bnxt_shutdown(struct pci_dev * pdev)15965 static void bnxt_shutdown(struct pci_dev *pdev)
15966 {
15967 struct net_device *dev = pci_get_drvdata(pdev);
15968 struct bnxt *bp;
15969
15970 if (!dev)
15971 return;
15972
15973 rtnl_lock();
15974 bp = netdev_priv(dev);
15975 if (!bp)
15976 goto shutdown_exit;
15977
15978 if (netif_running(dev))
15979 dev_close(dev);
15980
15981 bnxt_clear_int_mode(bp);
15982 pci_disable_device(pdev);
15983
15984 if (system_state == SYSTEM_POWER_OFF) {
15985 pci_wake_from_d3(pdev, bp->wol);
15986 pci_set_power_state(pdev, PCI_D3hot);
15987 }
15988
15989 shutdown_exit:
15990 rtnl_unlock();
15991 }
15992
15993 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)15994 static int bnxt_suspend(struct device *device)
15995 {
15996 struct net_device *dev = dev_get_drvdata(device);
15997 struct bnxt *bp = netdev_priv(dev);
15998 int rc = 0;
15999
16000 bnxt_ulp_stop(bp);
16001
16002 rtnl_lock();
16003 if (netif_running(dev)) {
16004 netif_device_detach(dev);
16005 rc = bnxt_close(dev);
16006 }
16007 bnxt_hwrm_func_drv_unrgtr(bp);
16008 pci_disable_device(bp->pdev);
16009 bnxt_free_ctx_mem(bp);
16010 rtnl_unlock();
16011 return rc;
16012 }
16013
bnxt_resume(struct device * device)16014 static int bnxt_resume(struct device *device)
16015 {
16016 struct net_device *dev = dev_get_drvdata(device);
16017 struct bnxt *bp = netdev_priv(dev);
16018 int rc = 0;
16019
16020 rtnl_lock();
16021 rc = pci_enable_device(bp->pdev);
16022 if (rc) {
16023 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
16024 rc);
16025 goto resume_exit;
16026 }
16027 pci_set_master(bp->pdev);
16028 if (bnxt_hwrm_ver_get(bp)) {
16029 rc = -ENODEV;
16030 goto resume_exit;
16031 }
16032 rc = bnxt_hwrm_func_reset(bp);
16033 if (rc) {
16034 rc = -EBUSY;
16035 goto resume_exit;
16036 }
16037
16038 rc = bnxt_hwrm_func_qcaps(bp);
16039 if (rc)
16040 goto resume_exit;
16041
16042 bnxt_clear_reservations(bp, true);
16043
16044 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
16045 rc = -ENODEV;
16046 goto resume_exit;
16047 }
16048 if (bp->fw_crash_mem)
16049 bnxt_hwrm_crash_dump_mem_cfg(bp);
16050
16051 bnxt_get_wol_settings(bp);
16052 if (netif_running(dev)) {
16053 rc = bnxt_open(dev);
16054 if (!rc)
16055 netif_device_attach(dev);
16056 }
16057
16058 resume_exit:
16059 rtnl_unlock();
16060 bnxt_ulp_start(bp, rc);
16061 if (!rc)
16062 bnxt_reenable_sriov(bp);
16063 return rc;
16064 }
16065
16066 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
16067 #define BNXT_PM_OPS (&bnxt_pm_ops)
16068
16069 #else
16070
16071 #define BNXT_PM_OPS NULL
16072
16073 #endif /* CONFIG_PM_SLEEP */
16074
16075 /**
16076 * bnxt_io_error_detected - called when PCI error is detected
16077 * @pdev: Pointer to PCI device
16078 * @state: The current pci connection state
16079 *
16080 * This function is called after a PCI bus error affecting
16081 * this device has been detected.
16082 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)16083 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
16084 pci_channel_state_t state)
16085 {
16086 struct net_device *netdev = pci_get_drvdata(pdev);
16087 struct bnxt *bp = netdev_priv(netdev);
16088 bool abort = false;
16089
16090 netdev_info(netdev, "PCI I/O error detected\n");
16091
16092 bnxt_ulp_stop(bp);
16093
16094 rtnl_lock();
16095 netif_device_detach(netdev);
16096
16097 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
16098 netdev_err(bp->dev, "Firmware reset already in progress\n");
16099 abort = true;
16100 }
16101
16102 if (abort || state == pci_channel_io_perm_failure) {
16103 rtnl_unlock();
16104 return PCI_ERS_RESULT_DISCONNECT;
16105 }
16106
16107 /* Link is not reliable anymore if state is pci_channel_io_frozen
16108 * so we disable bus master to prevent any potential bad DMAs before
16109 * freeing kernel memory.
16110 */
16111 if (state == pci_channel_io_frozen) {
16112 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
16113 bnxt_fw_fatal_close(bp);
16114 }
16115
16116 if (netif_running(netdev))
16117 __bnxt_close_nic(bp, true, true);
16118
16119 if (pci_is_enabled(pdev))
16120 pci_disable_device(pdev);
16121 bnxt_free_ctx_mem(bp);
16122 rtnl_unlock();
16123
16124 /* Request a slot slot reset. */
16125 return PCI_ERS_RESULT_NEED_RESET;
16126 }
16127
16128 /**
16129 * bnxt_io_slot_reset - called after the pci bus has been reset.
16130 * @pdev: Pointer to PCI device
16131 *
16132 * Restart the card from scratch, as if from a cold-boot.
16133 * At this point, the card has exprienced a hard reset,
16134 * followed by fixups by BIOS, and has its config space
16135 * set up identically to what it was at cold boot.
16136 */
bnxt_io_slot_reset(struct pci_dev * pdev)16137 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
16138 {
16139 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
16140 struct net_device *netdev = pci_get_drvdata(pdev);
16141 struct bnxt *bp = netdev_priv(netdev);
16142 int retry = 0;
16143 int err = 0;
16144 int off;
16145
16146 netdev_info(bp->dev, "PCI Slot Reset\n");
16147
16148 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
16149 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
16150 msleep(900);
16151
16152 rtnl_lock();
16153
16154 if (pci_enable_device(pdev)) {
16155 dev_err(&pdev->dev,
16156 "Cannot re-enable PCI device after reset.\n");
16157 } else {
16158 pci_set_master(pdev);
16159 /* Upon fatal error, our device internal logic that latches to
16160 * BAR value is getting reset and will restore only upon
16161 * rewritting the BARs.
16162 *
16163 * As pci_restore_state() does not re-write the BARs if the
16164 * value is same as saved value earlier, driver needs to
16165 * write the BARs to 0 to force restore, in case of fatal error.
16166 */
16167 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
16168 &bp->state)) {
16169 for (off = PCI_BASE_ADDRESS_0;
16170 off <= PCI_BASE_ADDRESS_5; off += 4)
16171 pci_write_config_dword(bp->pdev, off, 0);
16172 }
16173 pci_restore_state(pdev);
16174 pci_save_state(pdev);
16175
16176 bnxt_inv_fw_health_reg(bp);
16177 bnxt_try_map_fw_health_reg(bp);
16178
16179 /* In some PCIe AER scenarios, firmware may take up to
16180 * 10 seconds to become ready in the worst case.
16181 */
16182 do {
16183 err = bnxt_try_recover_fw(bp);
16184 if (!err)
16185 break;
16186 retry++;
16187 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
16188
16189 if (err) {
16190 dev_err(&pdev->dev, "Firmware not ready\n");
16191 goto reset_exit;
16192 }
16193
16194 err = bnxt_hwrm_func_reset(bp);
16195 if (!err)
16196 result = PCI_ERS_RESULT_RECOVERED;
16197
16198 bnxt_ulp_irq_stop(bp);
16199 bnxt_clear_int_mode(bp);
16200 err = bnxt_init_int_mode(bp);
16201 bnxt_ulp_irq_restart(bp, err);
16202 }
16203
16204 reset_exit:
16205 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16206 bnxt_clear_reservations(bp, true);
16207 rtnl_unlock();
16208
16209 return result;
16210 }
16211
16212 /**
16213 * bnxt_io_resume - called when traffic can start flowing again.
16214 * @pdev: Pointer to PCI device
16215 *
16216 * This callback is called when the error recovery driver tells
16217 * us that its OK to resume normal operation.
16218 */
bnxt_io_resume(struct pci_dev * pdev)16219 static void bnxt_io_resume(struct pci_dev *pdev)
16220 {
16221 struct net_device *netdev = pci_get_drvdata(pdev);
16222 struct bnxt *bp = netdev_priv(netdev);
16223 int err;
16224
16225 netdev_info(bp->dev, "PCI Slot Resume\n");
16226 rtnl_lock();
16227
16228 err = bnxt_hwrm_func_qcaps(bp);
16229 if (!err && netif_running(netdev))
16230 err = bnxt_open(netdev);
16231
16232 if (!err)
16233 netif_device_attach(netdev);
16234
16235 rtnl_unlock();
16236 bnxt_ulp_start(bp, err);
16237 if (!err)
16238 bnxt_reenable_sriov(bp);
16239 }
16240
16241 static const struct pci_error_handlers bnxt_err_handler = {
16242 .error_detected = bnxt_io_error_detected,
16243 .slot_reset = bnxt_io_slot_reset,
16244 .resume = bnxt_io_resume
16245 };
16246
16247 static struct pci_driver bnxt_pci_driver = {
16248 .name = DRV_MODULE_NAME,
16249 .id_table = bnxt_pci_tbl,
16250 .probe = bnxt_init_one,
16251 .remove = bnxt_remove_one,
16252 .shutdown = bnxt_shutdown,
16253 .driver.pm = BNXT_PM_OPS,
16254 .err_handler = &bnxt_err_handler,
16255 #if defined(CONFIG_BNXT_SRIOV)
16256 .sriov_configure = bnxt_sriov_configure,
16257 #endif
16258 };
16259
bnxt_init(void)16260 static int __init bnxt_init(void)
16261 {
16262 int err;
16263
16264 bnxt_debug_init();
16265 err = pci_register_driver(&bnxt_pci_driver);
16266 if (err) {
16267 bnxt_debug_exit();
16268 return err;
16269 }
16270
16271 return 0;
16272 }
16273
bnxt_exit(void)16274 static void __exit bnxt_exit(void)
16275 {
16276 pci_unregister_driver(&bnxt_pci_driver);
16277 if (bnxt_pf_wq)
16278 destroy_workqueue(bnxt_pf_wq);
16279 bnxt_debug_exit();
16280 }
16281
16282 module_init(bnxt_init);
16283 module_exit(bnxt_exit);
16284