1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #ifdef CONFIG_RFS_ACCEL
8 #include <linux/cpu_rmap.h>
9 #endif
10 #include <linux/if_vlan.h>
11 #include <linux/irq.h>
12 #include <linux/ip.h>
13 #include <linux/ipv6.h>
14 #include <linux/iommu.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/skbuff.h>
18 #include <linux/sctp.h>
19 #include <net/gre.h>
20 #include <net/gro.h>
21 #include <net/ip6_checksum.h>
22 #include <net/page_pool/helpers.h>
23 #include <net/pkt_cls.h>
24 #include <net/pkt_sched.h>
25 #include <net/tcp.h>
26 #include <net/vxlan.h>
27 #include <net/geneve.h>
28 #include <net/netdev_queues.h>
29
30 #include "hnae3.h"
31 #include "hns3_enet.h"
32 /* All hns3 tracepoints are defined by the include below, which
33 * must be included exactly once across the whole kernel with
34 * CREATE_TRACE_POINTS defined
35 */
36 #define CREATE_TRACE_POINTS
37 #include "hns3_trace.h"
38
39 #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
40 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
41
42 #define hns3_rl_err(fmt, ...) \
43 do { \
44 if (net_ratelimit()) \
45 netdev_err(fmt, ##__VA_ARGS__); \
46 } while (0)
47
48 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
49
50 static const char hns3_driver_name[] = "hns3";
51 static const char hns3_driver_string[] =
52 "Hisilicon Ethernet Network Driver for Hip08 Family";
53 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
54 static struct hnae3_client client;
55
56 static int debug = -1;
57 module_param(debug, int, 0);
58 MODULE_PARM_DESC(debug, " Network interface message level setting");
59
60 static unsigned int tx_sgl = 1;
61 module_param(tx_sgl, uint, 0600);
62 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
63
64 static bool page_pool_enabled = true;
65 module_param(page_pool_enabled, bool, 0400);
66
67 #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
68 sizeof(struct sg_table))
69 #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
70 dma_get_cache_alignment())
71
72 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
74
75 #define HNS3_INNER_VLAN_TAG 1
76 #define HNS3_OUTER_VLAN_TAG 2
77
78 #define HNS3_MIN_TX_LEN 33U
79 #define HNS3_MIN_TUN_PKT_LEN 65U
80
81 /* hns3_pci_tbl - PCI Device ID Table
82 *
83 * Last entry must be all 0s
84 *
85 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
86 * Class, Class Mask, private data (not used) }
87 */
88 static const struct pci_device_id hns3_pci_tbl[] = {
89 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
90 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
91 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
92 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
93 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
94 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
95 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
96 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
97 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
98 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
99 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
100 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
101 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
102 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
103 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
104 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
105 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
106 /* required last entry */
107 {0,}
108 };
109 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
110
111 #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \
112 { ptype, \
113 l, \
114 CHECKSUM_##s, \
115 HNS3_L3_TYPE_##t, \
116 1, \
117 h}
118
119 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
120 { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \
121 PKT_HASH_TYPE_NONE }
122
123 static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
124 HNS3_RX_PTYPE_UNUSED_ENTRY(0),
125 HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE),
126 HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE),
127 HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE),
128 HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
129 HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
130 HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
131 HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE),
132 HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
133 HNS3_RX_PTYPE_UNUSED_ENTRY(9),
134 HNS3_RX_PTYPE_UNUSED_ENTRY(10),
135 HNS3_RX_PTYPE_UNUSED_ENTRY(11),
136 HNS3_RX_PTYPE_UNUSED_ENTRY(12),
137 HNS3_RX_PTYPE_UNUSED_ENTRY(13),
138 HNS3_RX_PTYPE_UNUSED_ENTRY(14),
139 HNS3_RX_PTYPE_UNUSED_ENTRY(15),
140 HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
141 HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
142 HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
143 HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
144 HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
145 HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE),
146 HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
147 HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
148 HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
149 HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
150 HNS3_RX_PTYPE_UNUSED_ENTRY(26),
151 HNS3_RX_PTYPE_UNUSED_ENTRY(27),
152 HNS3_RX_PTYPE_UNUSED_ENTRY(28),
153 HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
154 HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
155 HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
156 HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
157 HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
158 HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
159 HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
160 HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
161 HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
162 HNS3_RX_PTYPE_UNUSED_ENTRY(38),
163 HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
164 HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
165 HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
166 HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
167 HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
168 HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
169 HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
170 HNS3_RX_PTYPE_UNUSED_ENTRY(46),
171 HNS3_RX_PTYPE_UNUSED_ENTRY(47),
172 HNS3_RX_PTYPE_UNUSED_ENTRY(48),
173 HNS3_RX_PTYPE_UNUSED_ENTRY(49),
174 HNS3_RX_PTYPE_UNUSED_ENTRY(50),
175 HNS3_RX_PTYPE_UNUSED_ENTRY(51),
176 HNS3_RX_PTYPE_UNUSED_ENTRY(52),
177 HNS3_RX_PTYPE_UNUSED_ENTRY(53),
178 HNS3_RX_PTYPE_UNUSED_ENTRY(54),
179 HNS3_RX_PTYPE_UNUSED_ENTRY(55),
180 HNS3_RX_PTYPE_UNUSED_ENTRY(56),
181 HNS3_RX_PTYPE_UNUSED_ENTRY(57),
182 HNS3_RX_PTYPE_UNUSED_ENTRY(58),
183 HNS3_RX_PTYPE_UNUSED_ENTRY(59),
184 HNS3_RX_PTYPE_UNUSED_ENTRY(60),
185 HNS3_RX_PTYPE_UNUSED_ENTRY(61),
186 HNS3_RX_PTYPE_UNUSED_ENTRY(62),
187 HNS3_RX_PTYPE_UNUSED_ENTRY(63),
188 HNS3_RX_PTYPE_UNUSED_ENTRY(64),
189 HNS3_RX_PTYPE_UNUSED_ENTRY(65),
190 HNS3_RX_PTYPE_UNUSED_ENTRY(66),
191 HNS3_RX_PTYPE_UNUSED_ENTRY(67),
192 HNS3_RX_PTYPE_UNUSED_ENTRY(68),
193 HNS3_RX_PTYPE_UNUSED_ENTRY(69),
194 HNS3_RX_PTYPE_UNUSED_ENTRY(70),
195 HNS3_RX_PTYPE_UNUSED_ENTRY(71),
196 HNS3_RX_PTYPE_UNUSED_ENTRY(72),
197 HNS3_RX_PTYPE_UNUSED_ENTRY(73),
198 HNS3_RX_PTYPE_UNUSED_ENTRY(74),
199 HNS3_RX_PTYPE_UNUSED_ENTRY(75),
200 HNS3_RX_PTYPE_UNUSED_ENTRY(76),
201 HNS3_RX_PTYPE_UNUSED_ENTRY(77),
202 HNS3_RX_PTYPE_UNUSED_ENTRY(78),
203 HNS3_RX_PTYPE_UNUSED_ENTRY(79),
204 HNS3_RX_PTYPE_UNUSED_ENTRY(80),
205 HNS3_RX_PTYPE_UNUSED_ENTRY(81),
206 HNS3_RX_PTYPE_UNUSED_ENTRY(82),
207 HNS3_RX_PTYPE_UNUSED_ENTRY(83),
208 HNS3_RX_PTYPE_UNUSED_ENTRY(84),
209 HNS3_RX_PTYPE_UNUSED_ENTRY(85),
210 HNS3_RX_PTYPE_UNUSED_ENTRY(86),
211 HNS3_RX_PTYPE_UNUSED_ENTRY(87),
212 HNS3_RX_PTYPE_UNUSED_ENTRY(88),
213 HNS3_RX_PTYPE_UNUSED_ENTRY(89),
214 HNS3_RX_PTYPE_UNUSED_ENTRY(90),
215 HNS3_RX_PTYPE_UNUSED_ENTRY(91),
216 HNS3_RX_PTYPE_UNUSED_ENTRY(92),
217 HNS3_RX_PTYPE_UNUSED_ENTRY(93),
218 HNS3_RX_PTYPE_UNUSED_ENTRY(94),
219 HNS3_RX_PTYPE_UNUSED_ENTRY(95),
220 HNS3_RX_PTYPE_UNUSED_ENTRY(96),
221 HNS3_RX_PTYPE_UNUSED_ENTRY(97),
222 HNS3_RX_PTYPE_UNUSED_ENTRY(98),
223 HNS3_RX_PTYPE_UNUSED_ENTRY(99),
224 HNS3_RX_PTYPE_UNUSED_ENTRY(100),
225 HNS3_RX_PTYPE_UNUSED_ENTRY(101),
226 HNS3_RX_PTYPE_UNUSED_ENTRY(102),
227 HNS3_RX_PTYPE_UNUSED_ENTRY(103),
228 HNS3_RX_PTYPE_UNUSED_ENTRY(104),
229 HNS3_RX_PTYPE_UNUSED_ENTRY(105),
230 HNS3_RX_PTYPE_UNUSED_ENTRY(106),
231 HNS3_RX_PTYPE_UNUSED_ENTRY(107),
232 HNS3_RX_PTYPE_UNUSED_ENTRY(108),
233 HNS3_RX_PTYPE_UNUSED_ENTRY(109),
234 HNS3_RX_PTYPE_UNUSED_ENTRY(110),
235 HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
236 HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
237 HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
238 HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
239 HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
240 HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
241 HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
242 HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
243 HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
244 HNS3_RX_PTYPE_UNUSED_ENTRY(120),
245 HNS3_RX_PTYPE_UNUSED_ENTRY(121),
246 HNS3_RX_PTYPE_UNUSED_ENTRY(122),
247 HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
248 HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
249 HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
250 HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
251 HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
252 HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
253 HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
254 HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
255 HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
256 HNS3_RX_PTYPE_UNUSED_ENTRY(132),
257 HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
258 HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
259 HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
260 HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
261 HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
262 HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
263 HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
264 HNS3_RX_PTYPE_UNUSED_ENTRY(140),
265 HNS3_RX_PTYPE_UNUSED_ENTRY(141),
266 HNS3_RX_PTYPE_UNUSED_ENTRY(142),
267 HNS3_RX_PTYPE_UNUSED_ENTRY(143),
268 HNS3_RX_PTYPE_UNUSED_ENTRY(144),
269 HNS3_RX_PTYPE_UNUSED_ENTRY(145),
270 HNS3_RX_PTYPE_UNUSED_ENTRY(146),
271 HNS3_RX_PTYPE_UNUSED_ENTRY(147),
272 HNS3_RX_PTYPE_UNUSED_ENTRY(148),
273 HNS3_RX_PTYPE_UNUSED_ENTRY(149),
274 HNS3_RX_PTYPE_UNUSED_ENTRY(150),
275 HNS3_RX_PTYPE_UNUSED_ENTRY(151),
276 HNS3_RX_PTYPE_UNUSED_ENTRY(152),
277 HNS3_RX_PTYPE_UNUSED_ENTRY(153),
278 HNS3_RX_PTYPE_UNUSED_ENTRY(154),
279 HNS3_RX_PTYPE_UNUSED_ENTRY(155),
280 HNS3_RX_PTYPE_UNUSED_ENTRY(156),
281 HNS3_RX_PTYPE_UNUSED_ENTRY(157),
282 HNS3_RX_PTYPE_UNUSED_ENTRY(158),
283 HNS3_RX_PTYPE_UNUSED_ENTRY(159),
284 HNS3_RX_PTYPE_UNUSED_ENTRY(160),
285 HNS3_RX_PTYPE_UNUSED_ENTRY(161),
286 HNS3_RX_PTYPE_UNUSED_ENTRY(162),
287 HNS3_RX_PTYPE_UNUSED_ENTRY(163),
288 HNS3_RX_PTYPE_UNUSED_ENTRY(164),
289 HNS3_RX_PTYPE_UNUSED_ENTRY(165),
290 HNS3_RX_PTYPE_UNUSED_ENTRY(166),
291 HNS3_RX_PTYPE_UNUSED_ENTRY(167),
292 HNS3_RX_PTYPE_UNUSED_ENTRY(168),
293 HNS3_RX_PTYPE_UNUSED_ENTRY(169),
294 HNS3_RX_PTYPE_UNUSED_ENTRY(170),
295 HNS3_RX_PTYPE_UNUSED_ENTRY(171),
296 HNS3_RX_PTYPE_UNUSED_ENTRY(172),
297 HNS3_RX_PTYPE_UNUSED_ENTRY(173),
298 HNS3_RX_PTYPE_UNUSED_ENTRY(174),
299 HNS3_RX_PTYPE_UNUSED_ENTRY(175),
300 HNS3_RX_PTYPE_UNUSED_ENTRY(176),
301 HNS3_RX_PTYPE_UNUSED_ENTRY(177),
302 HNS3_RX_PTYPE_UNUSED_ENTRY(178),
303 HNS3_RX_PTYPE_UNUSED_ENTRY(179),
304 HNS3_RX_PTYPE_UNUSED_ENTRY(180),
305 HNS3_RX_PTYPE_UNUSED_ENTRY(181),
306 HNS3_RX_PTYPE_UNUSED_ENTRY(182),
307 HNS3_RX_PTYPE_UNUSED_ENTRY(183),
308 HNS3_RX_PTYPE_UNUSED_ENTRY(184),
309 HNS3_RX_PTYPE_UNUSED_ENTRY(185),
310 HNS3_RX_PTYPE_UNUSED_ENTRY(186),
311 HNS3_RX_PTYPE_UNUSED_ENTRY(187),
312 HNS3_RX_PTYPE_UNUSED_ENTRY(188),
313 HNS3_RX_PTYPE_UNUSED_ENTRY(189),
314 HNS3_RX_PTYPE_UNUSED_ENTRY(190),
315 HNS3_RX_PTYPE_UNUSED_ENTRY(191),
316 HNS3_RX_PTYPE_UNUSED_ENTRY(192),
317 HNS3_RX_PTYPE_UNUSED_ENTRY(193),
318 HNS3_RX_PTYPE_UNUSED_ENTRY(194),
319 HNS3_RX_PTYPE_UNUSED_ENTRY(195),
320 HNS3_RX_PTYPE_UNUSED_ENTRY(196),
321 HNS3_RX_PTYPE_UNUSED_ENTRY(197),
322 HNS3_RX_PTYPE_UNUSED_ENTRY(198),
323 HNS3_RX_PTYPE_UNUSED_ENTRY(199),
324 HNS3_RX_PTYPE_UNUSED_ENTRY(200),
325 HNS3_RX_PTYPE_UNUSED_ENTRY(201),
326 HNS3_RX_PTYPE_UNUSED_ENTRY(202),
327 HNS3_RX_PTYPE_UNUSED_ENTRY(203),
328 HNS3_RX_PTYPE_UNUSED_ENTRY(204),
329 HNS3_RX_PTYPE_UNUSED_ENTRY(205),
330 HNS3_RX_PTYPE_UNUSED_ENTRY(206),
331 HNS3_RX_PTYPE_UNUSED_ENTRY(207),
332 HNS3_RX_PTYPE_UNUSED_ENTRY(208),
333 HNS3_RX_PTYPE_UNUSED_ENTRY(209),
334 HNS3_RX_PTYPE_UNUSED_ENTRY(210),
335 HNS3_RX_PTYPE_UNUSED_ENTRY(211),
336 HNS3_RX_PTYPE_UNUSED_ENTRY(212),
337 HNS3_RX_PTYPE_UNUSED_ENTRY(213),
338 HNS3_RX_PTYPE_UNUSED_ENTRY(214),
339 HNS3_RX_PTYPE_UNUSED_ENTRY(215),
340 HNS3_RX_PTYPE_UNUSED_ENTRY(216),
341 HNS3_RX_PTYPE_UNUSED_ENTRY(217),
342 HNS3_RX_PTYPE_UNUSED_ENTRY(218),
343 HNS3_RX_PTYPE_UNUSED_ENTRY(219),
344 HNS3_RX_PTYPE_UNUSED_ENTRY(220),
345 HNS3_RX_PTYPE_UNUSED_ENTRY(221),
346 HNS3_RX_PTYPE_UNUSED_ENTRY(222),
347 HNS3_RX_PTYPE_UNUSED_ENTRY(223),
348 HNS3_RX_PTYPE_UNUSED_ENTRY(224),
349 HNS3_RX_PTYPE_UNUSED_ENTRY(225),
350 HNS3_RX_PTYPE_UNUSED_ENTRY(226),
351 HNS3_RX_PTYPE_UNUSED_ENTRY(227),
352 HNS3_RX_PTYPE_UNUSED_ENTRY(228),
353 HNS3_RX_PTYPE_UNUSED_ENTRY(229),
354 HNS3_RX_PTYPE_UNUSED_ENTRY(230),
355 HNS3_RX_PTYPE_UNUSED_ENTRY(231),
356 HNS3_RX_PTYPE_UNUSED_ENTRY(232),
357 HNS3_RX_PTYPE_UNUSED_ENTRY(233),
358 HNS3_RX_PTYPE_UNUSED_ENTRY(234),
359 HNS3_RX_PTYPE_UNUSED_ENTRY(235),
360 HNS3_RX_PTYPE_UNUSED_ENTRY(236),
361 HNS3_RX_PTYPE_UNUSED_ENTRY(237),
362 HNS3_RX_PTYPE_UNUSED_ENTRY(238),
363 HNS3_RX_PTYPE_UNUSED_ENTRY(239),
364 HNS3_RX_PTYPE_UNUSED_ENTRY(240),
365 HNS3_RX_PTYPE_UNUSED_ENTRY(241),
366 HNS3_RX_PTYPE_UNUSED_ENTRY(242),
367 HNS3_RX_PTYPE_UNUSED_ENTRY(243),
368 HNS3_RX_PTYPE_UNUSED_ENTRY(244),
369 HNS3_RX_PTYPE_UNUSED_ENTRY(245),
370 HNS3_RX_PTYPE_UNUSED_ENTRY(246),
371 HNS3_RX_PTYPE_UNUSED_ENTRY(247),
372 HNS3_RX_PTYPE_UNUSED_ENTRY(248),
373 HNS3_RX_PTYPE_UNUSED_ENTRY(249),
374 HNS3_RX_PTYPE_UNUSED_ENTRY(250),
375 HNS3_RX_PTYPE_UNUSED_ENTRY(251),
376 HNS3_RX_PTYPE_UNUSED_ENTRY(252),
377 HNS3_RX_PTYPE_UNUSED_ENTRY(253),
378 HNS3_RX_PTYPE_UNUSED_ENTRY(254),
379 HNS3_RX_PTYPE_UNUSED_ENTRY(255),
380 };
381
382 #define HNS3_INVALID_PTYPE \
383 ARRAY_SIZE(hns3_rx_ptype_tbl)
384
hns3_irq_handle(int irq,void * vector)385 static irqreturn_t hns3_irq_handle(int irq, void *vector)
386 {
387 struct hns3_enet_tqp_vector *tqp_vector = vector;
388
389 napi_schedule_irqoff(&tqp_vector->napi);
390 tqp_vector->event_cnt++;
391
392 return IRQ_HANDLED;
393 }
394
hns3_nic_uninit_irq(struct hns3_nic_priv * priv)395 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
396 {
397 struct hns3_enet_tqp_vector *tqp_vectors;
398 unsigned int i;
399
400 for (i = 0; i < priv->vector_num; i++) {
401 tqp_vectors = &priv->tqp_vector[i];
402
403 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
404 continue;
405
406 /* clear the affinity mask */
407 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
408
409 /* release the irq resource */
410 free_irq(tqp_vectors->vector_irq, tqp_vectors);
411 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
412 }
413 }
414
hns3_nic_init_irq(struct hns3_nic_priv * priv)415 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
416 {
417 struct hns3_enet_tqp_vector *tqp_vectors;
418 int txrx_int_idx = 0;
419 int rx_int_idx = 0;
420 int tx_int_idx = 0;
421 unsigned int i;
422 int ret;
423
424 for (i = 0; i < priv->vector_num; i++) {
425 tqp_vectors = &priv->tqp_vector[i];
426
427 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
428 continue;
429
430 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
431 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
432 "%s-%s-%s-%d", hns3_driver_name,
433 pci_name(priv->ae_handle->pdev),
434 "TxRx", txrx_int_idx++);
435 txrx_int_idx++;
436 } else if (tqp_vectors->rx_group.ring) {
437 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
438 "%s-%s-%s-%d", hns3_driver_name,
439 pci_name(priv->ae_handle->pdev),
440 "Rx", rx_int_idx++);
441 } else if (tqp_vectors->tx_group.ring) {
442 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
443 "%s-%s-%s-%d", hns3_driver_name,
444 pci_name(priv->ae_handle->pdev),
445 "Tx", tx_int_idx++);
446 } else {
447 /* Skip this unused q_vector */
448 continue;
449 }
450
451 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
452
453 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN);
454 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
455 tqp_vectors->name, tqp_vectors);
456 if (ret) {
457 netdev_err(priv->netdev, "request irq(%d) fail\n",
458 tqp_vectors->vector_irq);
459 hns3_nic_uninit_irq(priv);
460 return ret;
461 }
462
463 irq_set_affinity_hint(tqp_vectors->vector_irq,
464 &tqp_vectors->affinity_mask);
465
466 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
467 }
468
469 return 0;
470 }
471
hns3_mask_vector_irq(struct hns3_enet_tqp_vector * tqp_vector,u32 mask_en)472 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
473 u32 mask_en)
474 {
475 writel(mask_en, tqp_vector->mask_addr);
476 }
477
hns3_irq_enable(struct hns3_enet_tqp_vector * tqp_vector)478 static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
479 {
480 napi_enable(&tqp_vector->napi);
481 enable_irq(tqp_vector->vector_irq);
482 }
483
hns3_irq_disable(struct hns3_enet_tqp_vector * tqp_vector)484 static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
485 {
486 disable_irq(tqp_vector->vector_irq);
487 napi_disable(&tqp_vector->napi);
488 cancel_work_sync(&tqp_vector->rx_group.dim.work);
489 cancel_work_sync(&tqp_vector->tx_group.dim.work);
490 }
491
hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector * tqp_vector,u32 rl_value)492 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
493 u32 rl_value)
494 {
495 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
496
497 /* this defines the configuration for RL (Interrupt Rate Limiter).
498 * Rl defines rate of interrupts i.e. number of interrupts-per-second
499 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
500 */
501 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
502 !tqp_vector->rx_group.coal.adapt_enable)
503 /* According to the hardware, the range of rl_reg is
504 * 0-59 and the unit is 4.
505 */
506 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
507
508 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
509 }
510
hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector * tqp_vector,u32 gl_value)511 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
512 u32 gl_value)
513 {
514 u32 new_val;
515
516 if (tqp_vector->rx_group.coal.unit_1us)
517 new_val = gl_value | HNS3_INT_GL_1US;
518 else
519 new_val = hns3_gl_usec_to_reg(gl_value);
520
521 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
522 }
523
hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector * tqp_vector,u32 gl_value)524 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
525 u32 gl_value)
526 {
527 u32 new_val;
528
529 if (tqp_vector->tx_group.coal.unit_1us)
530 new_val = gl_value | HNS3_INT_GL_1US;
531 else
532 new_val = hns3_gl_usec_to_reg(gl_value);
533
534 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
535 }
536
hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector * tqp_vector,u32 ql_value)537 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
538 u32 ql_value)
539 {
540 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
541 }
542
hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector * tqp_vector,u32 ql_value)543 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
544 u32 ql_value)
545 {
546 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
547 }
548
hns3_vector_coalesce_init(struct hns3_enet_tqp_vector * tqp_vector,struct hns3_nic_priv * priv)549 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
550 struct hns3_nic_priv *priv)
551 {
552 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
553 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
554 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
555 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
556 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
557
558 tx_coal->adapt_enable = ptx_coal->adapt_enable;
559 rx_coal->adapt_enable = prx_coal->adapt_enable;
560
561 tx_coal->int_gl = ptx_coal->int_gl;
562 rx_coal->int_gl = prx_coal->int_gl;
563
564 rx_coal->flow_level = prx_coal->flow_level;
565 tx_coal->flow_level = ptx_coal->flow_level;
566
567 /* device version above V3(include V3), GL can configure 1us
568 * unit, so uses 1us unit.
569 */
570 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
571 tx_coal->unit_1us = 1;
572 rx_coal->unit_1us = 1;
573 }
574
575 if (ae_dev->dev_specs.int_ql_max) {
576 tx_coal->ql_enable = 1;
577 rx_coal->ql_enable = 1;
578 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
579 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
580 tx_coal->int_ql = ptx_coal->int_ql;
581 rx_coal->int_ql = prx_coal->int_ql;
582 }
583 }
584
585 static void
hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector * tqp_vector,struct hns3_nic_priv * priv)586 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
587 struct hns3_nic_priv *priv)
588 {
589 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
590 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
591 struct hnae3_handle *h = priv->ae_handle;
592
593 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
594 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
595 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
596
597 if (tx_coal->ql_enable)
598 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);
599
600 if (rx_coal->ql_enable)
601 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
602 }
603
hns3_nic_set_real_num_queue(struct net_device * netdev)604 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
605 {
606 struct hnae3_handle *h = hns3_get_handle(netdev);
607 struct hnae3_knic_private_info *kinfo = &h->kinfo;
608 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
609 unsigned int queue_size = kinfo->num_tqps;
610 int i, ret;
611
612 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) {
613 netdev_reset_tc(netdev);
614 } else {
615 ret = netdev_set_num_tc(netdev, tc_info->num_tc);
616 if (ret) {
617 netdev_err(netdev,
618 "netdev_set_num_tc fail, ret=%d!\n", ret);
619 return ret;
620 }
621
622 for (i = 0; i < tc_info->num_tc; i++)
623 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
624 tc_info->tqp_offset[i]);
625 }
626
627 ret = netif_set_real_num_tx_queues(netdev, queue_size);
628 if (ret) {
629 netdev_err(netdev,
630 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
631 return ret;
632 }
633
634 ret = netif_set_real_num_rx_queues(netdev, queue_size);
635 if (ret) {
636 netdev_err(netdev,
637 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
638 return ret;
639 }
640
641 return 0;
642 }
643
hns3_get_max_available_channels(struct hnae3_handle * h)644 u16 hns3_get_max_available_channels(struct hnae3_handle *h)
645 {
646 u16 alloc_tqps, max_rss_size, rss_size;
647
648 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
649 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc;
650
651 return min_t(u16, rss_size, max_rss_size);
652 }
653
hns3_tqp_enable(struct hnae3_queue * tqp)654 static void hns3_tqp_enable(struct hnae3_queue *tqp)
655 {
656 u32 rcb_reg;
657
658 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
659 rcb_reg |= BIT(HNS3_RING_EN_B);
660 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
661 }
662
hns3_tqp_disable(struct hnae3_queue * tqp)663 static void hns3_tqp_disable(struct hnae3_queue *tqp)
664 {
665 u32 rcb_reg;
666
667 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
668 rcb_reg &= ~BIT(HNS3_RING_EN_B);
669 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
670 }
671
hns3_free_rx_cpu_rmap(struct net_device * netdev)672 static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
673 {
674 #ifdef CONFIG_RFS_ACCEL
675 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
676 netdev->rx_cpu_rmap = NULL;
677 #endif
678 }
679
hns3_set_rx_cpu_rmap(struct net_device * netdev)680 static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
681 {
682 #ifdef CONFIG_RFS_ACCEL
683 struct hns3_nic_priv *priv = netdev_priv(netdev);
684 struct hns3_enet_tqp_vector *tqp_vector;
685 int i, ret;
686
687 if (!netdev->rx_cpu_rmap) {
688 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
689 if (!netdev->rx_cpu_rmap)
690 return -ENOMEM;
691 }
692
693 for (i = 0; i < priv->vector_num; i++) {
694 tqp_vector = &priv->tqp_vector[i];
695 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
696 tqp_vector->vector_irq);
697 if (ret) {
698 hns3_free_rx_cpu_rmap(netdev);
699 return ret;
700 }
701 }
702 #endif
703 return 0;
704 }
705
hns3_enable_irqs_and_tqps(struct net_device * netdev)706 static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
707 {
708 struct hns3_nic_priv *priv = netdev_priv(netdev);
709 struct hnae3_handle *h = priv->ae_handle;
710 u16 i;
711
712 for (i = 0; i < priv->vector_num; i++)
713 hns3_irq_enable(&priv->tqp_vector[i]);
714
715 for (i = 0; i < priv->vector_num; i++)
716 hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
717
718 for (i = 0; i < h->kinfo.num_tqps; i++)
719 hns3_tqp_enable(h->kinfo.tqp[i]);
720 }
721
hns3_disable_irqs_and_tqps(struct net_device * netdev)722 static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
723 {
724 struct hns3_nic_priv *priv = netdev_priv(netdev);
725 struct hnae3_handle *h = priv->ae_handle;
726 u16 i;
727
728 for (i = 0; i < h->kinfo.num_tqps; i++)
729 hns3_tqp_disable(h->kinfo.tqp[i]);
730
731 for (i = 0; i < priv->vector_num; i++)
732 hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
733
734 for (i = 0; i < priv->vector_num; i++)
735 hns3_irq_disable(&priv->tqp_vector[i]);
736 }
737
hns3_nic_net_up(struct net_device * netdev)738 static int hns3_nic_net_up(struct net_device *netdev)
739 {
740 struct hns3_nic_priv *priv = netdev_priv(netdev);
741 struct hnae3_handle *h = priv->ae_handle;
742 int ret;
743
744 ret = hns3_nic_reset_all_ring(h);
745 if (ret)
746 return ret;
747
748 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
749
750 hns3_enable_irqs_and_tqps(netdev);
751
752 /* start the ae_dev */
753 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
754 if (ret) {
755 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
756 hns3_disable_irqs_and_tqps(netdev);
757 }
758
759 return ret;
760 }
761
hns3_config_xps(struct hns3_nic_priv * priv)762 static void hns3_config_xps(struct hns3_nic_priv *priv)
763 {
764 int i;
765
766 for (i = 0; i < priv->vector_num; i++) {
767 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
768 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
769
770 while (ring) {
771 int ret;
772
773 ret = netif_set_xps_queue(priv->netdev,
774 &tqp_vector->affinity_mask,
775 ring->tqp->tqp_index);
776 if (ret)
777 netdev_warn(priv->netdev,
778 "set xps queue failed: %d", ret);
779
780 ring = ring->next;
781 }
782 }
783 }
784
hns3_nic_net_open(struct net_device * netdev)785 static int hns3_nic_net_open(struct net_device *netdev)
786 {
787 struct hns3_nic_priv *priv = netdev_priv(netdev);
788 struct hnae3_handle *h = hns3_get_handle(netdev);
789 struct hnae3_knic_private_info *kinfo;
790 int i, ret;
791
792 if (hns3_nic_resetting(netdev))
793 return -EBUSY;
794
795 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
796 netdev_warn(netdev, "net open repeatedly!\n");
797 return 0;
798 }
799
800 netif_carrier_off(netdev);
801
802 ret = hns3_nic_set_real_num_queue(netdev);
803 if (ret)
804 return ret;
805
806 ret = hns3_nic_net_up(netdev);
807 if (ret) {
808 netdev_err(netdev, "net up fail, ret=%d!\n", ret);
809 return ret;
810 }
811
812 kinfo = &h->kinfo;
813 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
814 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]);
815
816 if (h->ae_algo->ops->set_timer_task)
817 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
818
819 hns3_config_xps(priv);
820
821 netif_dbg(h, drv, netdev, "net open\n");
822
823 return 0;
824 }
825
hns3_reset_tx_queue(struct hnae3_handle * h)826 static void hns3_reset_tx_queue(struct hnae3_handle *h)
827 {
828 struct net_device *ndev = h->kinfo.netdev;
829 struct hns3_nic_priv *priv = netdev_priv(ndev);
830 struct netdev_queue *dev_queue;
831 u32 i;
832
833 for (i = 0; i < h->kinfo.num_tqps; i++) {
834 dev_queue = netdev_get_tx_queue(ndev,
835 priv->ring[i].queue_index);
836 netdev_tx_reset_queue(dev_queue);
837 }
838 }
839
hns3_nic_net_down(struct net_device * netdev)840 static void hns3_nic_net_down(struct net_device *netdev)
841 {
842 struct hns3_nic_priv *priv = netdev_priv(netdev);
843 const struct hnae3_ae_ops *ops;
844
845 hns3_disable_irqs_and_tqps(netdev);
846
847 /* stop ae_dev */
848 ops = priv->ae_handle->ae_algo->ops;
849 if (ops->stop)
850 ops->stop(priv->ae_handle);
851
852 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
853 * during reset process, because driver may not be able
854 * to disable the ring through firmware when downing the netdev.
855 */
856 if (!hns3_nic_resetting(netdev))
857 hns3_clear_all_ring(priv->ae_handle, false);
858
859 hns3_reset_tx_queue(priv->ae_handle);
860 }
861
hns3_nic_net_stop(struct net_device * netdev)862 static int hns3_nic_net_stop(struct net_device *netdev)
863 {
864 struct hns3_nic_priv *priv = netdev_priv(netdev);
865 struct hnae3_handle *h = hns3_get_handle(netdev);
866
867 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
868 return 0;
869
870 netif_dbg(h, drv, netdev, "net stop\n");
871
872 if (h->ae_algo->ops->set_timer_task)
873 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
874
875 netif_carrier_off(netdev);
876 netif_tx_disable(netdev);
877
878 hns3_nic_net_down(netdev);
879
880 return 0;
881 }
882
hns3_nic_uc_sync(struct net_device * netdev,const unsigned char * addr)883 static int hns3_nic_uc_sync(struct net_device *netdev,
884 const unsigned char *addr)
885 {
886 struct hnae3_handle *h = hns3_get_handle(netdev);
887
888 if (h->ae_algo->ops->add_uc_addr)
889 return h->ae_algo->ops->add_uc_addr(h, addr);
890
891 return 0;
892 }
893
hns3_nic_uc_unsync(struct net_device * netdev,const unsigned char * addr)894 static int hns3_nic_uc_unsync(struct net_device *netdev,
895 const unsigned char *addr)
896 {
897 struct hnae3_handle *h = hns3_get_handle(netdev);
898
899 /* need ignore the request of removing device address, because
900 * we store the device address and other addresses of uc list
901 * in the function's mac filter list.
902 */
903 if (ether_addr_equal(addr, netdev->dev_addr))
904 return 0;
905
906 if (h->ae_algo->ops->rm_uc_addr)
907 return h->ae_algo->ops->rm_uc_addr(h, addr);
908
909 return 0;
910 }
911
hns3_nic_mc_sync(struct net_device * netdev,const unsigned char * addr)912 static int hns3_nic_mc_sync(struct net_device *netdev,
913 const unsigned char *addr)
914 {
915 struct hnae3_handle *h = hns3_get_handle(netdev);
916
917 if (h->ae_algo->ops->add_mc_addr)
918 return h->ae_algo->ops->add_mc_addr(h, addr);
919
920 return 0;
921 }
922
hns3_nic_mc_unsync(struct net_device * netdev,const unsigned char * addr)923 static int hns3_nic_mc_unsync(struct net_device *netdev,
924 const unsigned char *addr)
925 {
926 struct hnae3_handle *h = hns3_get_handle(netdev);
927
928 if (h->ae_algo->ops->rm_mc_addr)
929 return h->ae_algo->ops->rm_mc_addr(h, addr);
930
931 return 0;
932 }
933
hns3_get_netdev_flags(struct net_device * netdev)934 static u8 hns3_get_netdev_flags(struct net_device *netdev)
935 {
936 u8 flags = 0;
937
938 if (netdev->flags & IFF_PROMISC)
939 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
940 else if (netdev->flags & IFF_ALLMULTI)
941 flags = HNAE3_USER_MPE;
942
943 return flags;
944 }
945
hns3_nic_set_rx_mode(struct net_device * netdev)946 static void hns3_nic_set_rx_mode(struct net_device *netdev)
947 {
948 struct hnae3_handle *h = hns3_get_handle(netdev);
949 u8 new_flags;
950
951 new_flags = hns3_get_netdev_flags(netdev);
952
953 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
954 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
955
956 /* User mode Promisc mode enable and vlan filtering is disabled to
957 * let all packets in.
958 */
959 h->netdev_flags = new_flags;
960 hns3_request_update_promisc_mode(h);
961 }
962
hns3_request_update_promisc_mode(struct hnae3_handle * handle)963 void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
964 {
965 const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
966
967 if (ops->request_update_promisc_mode)
968 ops->request_update_promisc_mode(handle);
969 }
970
hns3_tx_spare_space(struct hns3_enet_ring * ring)971 static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring)
972 {
973 struct hns3_tx_spare *tx_spare = ring->tx_spare;
974 u32 ntc, ntu;
975
976 /* This smp_load_acquire() pairs with smp_store_release() in
977 * hns3_tx_spare_update() called in tx desc cleaning process.
978 */
979 ntc = smp_load_acquire(&tx_spare->last_to_clean);
980 ntu = tx_spare->next_to_use;
981
982 if (ntc > ntu)
983 return ntc - ntu - 1;
984
985 /* The free tx buffer is divided into two part, so pick the
986 * larger one.
987 */
988 return max(ntc, tx_spare->len - ntu) - 1;
989 }
990
hns3_tx_spare_update(struct hns3_enet_ring * ring)991 static void hns3_tx_spare_update(struct hns3_enet_ring *ring)
992 {
993 struct hns3_tx_spare *tx_spare = ring->tx_spare;
994
995 if (!tx_spare ||
996 tx_spare->last_to_clean == tx_spare->next_to_clean)
997 return;
998
999 /* This smp_store_release() pairs with smp_load_acquire() in
1000 * hns3_tx_spare_space() called in xmit process.
1001 */
1002 smp_store_release(&tx_spare->last_to_clean,
1003 tx_spare->next_to_clean);
1004 }
1005
hns3_can_use_tx_bounce(struct hns3_enet_ring * ring,struct sk_buff * skb,u32 space)1006 static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
1007 struct sk_buff *skb,
1008 u32 space)
1009 {
1010 u32 len = skb->len <= ring->tx_copybreak ? skb->len :
1011 skb_headlen(skb);
1012
1013 if (len > ring->tx_copybreak)
1014 return false;
1015
1016 if (ALIGN(len, dma_get_cache_alignment()) > space) {
1017 hns3_ring_stats_update(ring, tx_spare_full);
1018 return false;
1019 }
1020
1021 return true;
1022 }
1023
hns3_can_use_tx_sgl(struct hns3_enet_ring * ring,struct sk_buff * skb,u32 space)1024 static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
1025 struct sk_buff *skb,
1026 u32 space)
1027 {
1028 if (skb->len <= ring->tx_copybreak || !tx_sgl ||
1029 (!skb_has_frag_list(skb) &&
1030 skb_shinfo(skb)->nr_frags < tx_sgl))
1031 return false;
1032
1033 if (space < HNS3_MAX_SGL_SIZE) {
1034 hns3_ring_stats_update(ring, tx_spare_full);
1035 return false;
1036 }
1037
1038 return true;
1039 }
1040
hns3_init_tx_spare_buffer(struct hns3_enet_ring * ring)1041 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
1042 {
1043 u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
1044 struct net_device *netdev = ring_to_netdev(ring);
1045 struct hns3_nic_priv *priv = netdev_priv(netdev);
1046 struct hns3_tx_spare *tx_spare;
1047 struct page *page;
1048 dma_addr_t dma;
1049 int order;
1050
1051 if (!alloc_size)
1052 goto not_init;
1053
1054 order = get_order(alloc_size);
1055 if (order > MAX_PAGE_ORDER) {
1056 if (net_ratelimit())
1057 dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
1058 goto not_init;
1059 }
1060
1061 tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
1062 GFP_KERNEL);
1063 if (!tx_spare) {
1064 /* The driver still work without the tx spare buffer */
1065 dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
1066 goto devm_kzalloc_error;
1067 }
1068
1069 page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
1070 GFP_KERNEL, order);
1071 if (!page) {
1072 dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
1073 goto alloc_pages_error;
1074 }
1075
1076 dma = dma_map_page(ring_to_dev(ring), page, 0,
1077 PAGE_SIZE << order, DMA_TO_DEVICE);
1078 if (dma_mapping_error(ring_to_dev(ring), dma)) {
1079 dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
1080 goto dma_mapping_error;
1081 }
1082
1083 tx_spare->dma = dma;
1084 tx_spare->buf = page_address(page);
1085 tx_spare->len = PAGE_SIZE << order;
1086 ring->tx_spare = tx_spare;
1087 ring->tx_copybreak = priv->tx_copybreak;
1088 return;
1089
1090 dma_mapping_error:
1091 put_page(page);
1092 alloc_pages_error:
1093 devm_kfree(ring_to_dev(ring), tx_spare);
1094 devm_kzalloc_error:
1095 ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
1096 not_init:
1097 /* When driver init or reset_init, the ring->tx_spare is always NULL;
1098 * but when called from hns3_set_ringparam, it's usually not NULL, and
1099 * will be restored if hns3_init_all_ring() failed. So it's safe to set
1100 * ring->tx_spare to NULL here.
1101 */
1102 ring->tx_spare = NULL;
1103 }
1104
1105 /* Use hns3_tx_spare_space() to make sure there is enough buffer
1106 * before calling below function to allocate tx buffer.
1107 */
hns3_tx_spare_alloc(struct hns3_enet_ring * ring,unsigned int size,dma_addr_t * dma,u32 * cb_len)1108 static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring,
1109 unsigned int size, dma_addr_t *dma,
1110 u32 *cb_len)
1111 {
1112 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1113 u32 ntu = tx_spare->next_to_use;
1114
1115 size = ALIGN(size, dma_get_cache_alignment());
1116 *cb_len = size;
1117
1118 /* Tx spare buffer wraps back here because the end of
1119 * freed tx buffer is not enough.
1120 */
1121 if (ntu + size > tx_spare->len) {
1122 *cb_len += (tx_spare->len - ntu);
1123 ntu = 0;
1124 }
1125
1126 tx_spare->next_to_use = ntu + size;
1127 if (tx_spare->next_to_use == tx_spare->len)
1128 tx_spare->next_to_use = 0;
1129
1130 *dma = tx_spare->dma + ntu;
1131
1132 return tx_spare->buf + ntu;
1133 }
1134
hns3_tx_spare_rollback(struct hns3_enet_ring * ring,u32 len)1135 static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len)
1136 {
1137 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1138
1139 if (len > tx_spare->next_to_use) {
1140 len -= tx_spare->next_to_use;
1141 tx_spare->next_to_use = tx_spare->len - len;
1142 } else {
1143 tx_spare->next_to_use -= len;
1144 }
1145 }
1146
hns3_tx_spare_reclaim_cb(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)1147 static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring,
1148 struct hns3_desc_cb *cb)
1149 {
1150 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1151 u32 ntc = tx_spare->next_to_clean;
1152 u32 len = cb->length;
1153
1154 tx_spare->next_to_clean += len;
1155
1156 if (tx_spare->next_to_clean >= tx_spare->len) {
1157 tx_spare->next_to_clean -= tx_spare->len;
1158
1159 if (tx_spare->next_to_clean) {
1160 ntc = 0;
1161 len = tx_spare->next_to_clean;
1162 }
1163 }
1164
1165 /* This tx spare buffer is only really reclaimed after calling
1166 * hns3_tx_spare_update(), so it is still safe to use the info in
1167 * the tx buffer to do the dma sync or sg unmapping after
1168 * tx_spare->next_to_clean is moved forword.
1169 */
1170 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) {
1171 dma_addr_t dma = tx_spare->dma + ntc;
1172
1173 dma_sync_single_for_cpu(ring_to_dev(ring), dma, len,
1174 DMA_TO_DEVICE);
1175 } else {
1176 struct sg_table *sgt = tx_spare->buf + ntc;
1177
1178 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
1179 DMA_TO_DEVICE);
1180 }
1181 }
1182
hns3_set_tso(struct sk_buff * skb,u32 * paylen_fdop_ol4cs,u16 * mss,u32 * type_cs_vlan_tso,u32 * send_bytes)1183 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
1184 u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes)
1185 {
1186 u32 l4_offset, hdr_len;
1187 union l3_hdr_info l3;
1188 union l4_hdr_info l4;
1189 u32 l4_paylen;
1190 int ret;
1191
1192 if (!skb_is_gso(skb))
1193 return 0;
1194
1195 ret = skb_cow_head(skb, 0);
1196 if (unlikely(ret < 0))
1197 return ret;
1198
1199 l3.hdr = skb_network_header(skb);
1200 l4.hdr = skb_transport_header(skb);
1201
1202 /* Software should clear the IPv4's checksum field when tso is
1203 * needed.
1204 */
1205 if (l3.v4->version == 4)
1206 l3.v4->check = 0;
1207
1208 /* tunnel packet */
1209 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1210 SKB_GSO_GRE_CSUM |
1211 SKB_GSO_UDP_TUNNEL |
1212 SKB_GSO_UDP_TUNNEL_CSUM)) {
1213 /* reset l3&l4 pointers from outer to inner headers */
1214 l3.hdr = skb_inner_network_header(skb);
1215 l4.hdr = skb_inner_transport_header(skb);
1216
1217 /* Software should clear the IPv4's checksum field when
1218 * tso is needed.
1219 */
1220 if (l3.v4->version == 4)
1221 l3.v4->check = 0;
1222 }
1223
1224 /* normal or tunnel packet */
1225 l4_offset = l4.hdr - skb->data;
1226
1227 /* remove payload length from inner pseudo checksum when tso */
1228 l4_paylen = skb->len - l4_offset;
1229
1230 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1231 hdr_len = sizeof(*l4.udp) + l4_offset;
1232 csum_replace_by_diff(&l4.udp->check,
1233 (__force __wsum)htonl(l4_paylen));
1234 } else {
1235 hdr_len = (l4.tcp->doff << 2) + l4_offset;
1236 csum_replace_by_diff(&l4.tcp->check,
1237 (__force __wsum)htonl(l4_paylen));
1238 }
1239
1240 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len;
1241
1242 /* find the txbd field values */
1243 *paylen_fdop_ol4cs = skb->len - hdr_len;
1244 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
1245
1246 /* offload outer UDP header checksum */
1247 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1248 hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);
1249
1250 /* get MSS for TSO */
1251 *mss = skb_shinfo(skb)->gso_size;
1252
1253 trace_hns3_tso(skb);
1254
1255 return 0;
1256 }
1257
hns3_get_l4_protocol(struct sk_buff * skb,u8 * ol4_proto,u8 * il4_proto)1258 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
1259 u8 *il4_proto)
1260 {
1261 union l3_hdr_info l3;
1262 unsigned char *l4_hdr;
1263 unsigned char *exthdr;
1264 u8 l4_proto_tmp;
1265 __be16 frag_off;
1266
1267 /* find outer header point */
1268 l3.hdr = skb_network_header(skb);
1269 l4_hdr = skb_transport_header(skb);
1270
1271 if (skb->protocol == htons(ETH_P_IPV6)) {
1272 exthdr = l3.hdr + sizeof(*l3.v6);
1273 l4_proto_tmp = l3.v6->nexthdr;
1274 if (l4_hdr != exthdr)
1275 ipv6_skip_exthdr(skb, exthdr - skb->data,
1276 &l4_proto_tmp, &frag_off);
1277 } else if (skb->protocol == htons(ETH_P_IP)) {
1278 l4_proto_tmp = l3.v4->protocol;
1279 } else {
1280 return -EINVAL;
1281 }
1282
1283 *ol4_proto = l4_proto_tmp;
1284
1285 /* tunnel packet */
1286 if (!skb->encapsulation) {
1287 *il4_proto = 0;
1288 return 0;
1289 }
1290
1291 /* find inner header point */
1292 l3.hdr = skb_inner_network_header(skb);
1293 l4_hdr = skb_inner_transport_header(skb);
1294
1295 if (l3.v6->version == 6) {
1296 exthdr = l3.hdr + sizeof(*l3.v6);
1297 l4_proto_tmp = l3.v6->nexthdr;
1298 if (l4_hdr != exthdr)
1299 ipv6_skip_exthdr(skb, exthdr - skb->data,
1300 &l4_proto_tmp, &frag_off);
1301 } else if (l3.v4->version == 4) {
1302 l4_proto_tmp = l3.v4->protocol;
1303 }
1304
1305 *il4_proto = l4_proto_tmp;
1306
1307 return 0;
1308 }
1309
1310 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1311 * and it is udp packet, which has a dest port as the IANA assigned.
1312 * the hardware is expected to do the checksum offload, but the
1313 * hardware will not do the checksum offload when udp dest port is
1314 * 4789, 4790 or 6081.
1315 */
hns3_tunnel_csum_bug(struct sk_buff * skb)1316 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
1317 {
1318 struct hns3_nic_priv *priv = netdev_priv(skb->dev);
1319 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
1320 union l4_hdr_info l4;
1321
1322 /* device version above V3(include V3), the hardware can
1323 * do this checksum offload.
1324 */
1325 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
1326 return false;
1327
1328 l4.hdr = skb_transport_header(skb);
1329
1330 if (!(!skb->encapsulation &&
1331 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
1332 l4.udp->dest == htons(GENEVE_UDP_PORT) ||
1333 l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT))))
1334 return false;
1335
1336 return true;
1337 }
1338
hns3_set_outer_l2l3l4(struct sk_buff * skb,u8 ol4_proto,u32 * ol_type_vlan_len_msec)1339 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
1340 u32 *ol_type_vlan_len_msec)
1341 {
1342 u32 l2_len, l3_len, l4_len;
1343 unsigned char *il2_hdr;
1344 union l3_hdr_info l3;
1345 union l4_hdr_info l4;
1346
1347 l3.hdr = skb_network_header(skb);
1348 l4.hdr = skb_transport_header(skb);
1349
1350 /* compute OL2 header size, defined in 2 Bytes */
1351 l2_len = l3.hdr - skb->data;
1352 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
1353
1354 /* compute OL3 header size, defined in 4 Bytes */
1355 l3_len = l4.hdr - l3.hdr;
1356 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
1357
1358 il2_hdr = skb_inner_mac_header(skb);
1359 /* compute OL4 header size, defined in 4 Bytes */
1360 l4_len = il2_hdr - l4.hdr;
1361 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
1362
1363 /* define outer network header type */
1364 if (skb->protocol == htons(ETH_P_IP)) {
1365 if (skb_is_gso(skb))
1366 hns3_set_field(*ol_type_vlan_len_msec,
1367 HNS3_TXD_OL3T_S,
1368 HNS3_OL3T_IPV4_CSUM);
1369 else
1370 hns3_set_field(*ol_type_vlan_len_msec,
1371 HNS3_TXD_OL3T_S,
1372 HNS3_OL3T_IPV4_NO_CSUM);
1373 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1374 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
1375 HNS3_OL3T_IPV6);
1376 }
1377
1378 if (ol4_proto == IPPROTO_UDP)
1379 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
1380 HNS3_TUN_MAC_IN_UDP);
1381 else if (ol4_proto == IPPROTO_GRE)
1382 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
1383 HNS3_TUN_NVGRE);
1384 }
1385
hns3_set_l3_type(struct sk_buff * skb,union l3_hdr_info l3,u32 * type_cs_vlan_tso)1386 static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3,
1387 u32 *type_cs_vlan_tso)
1388 {
1389 if (l3.v4->version == 4) {
1390 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
1391 HNS3_L3T_IPV4);
1392
1393 /* the stack computes the IP header already, the only time we
1394 * need the hardware to recompute it is in the case of TSO.
1395 */
1396 if (skb_is_gso(skb))
1397 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
1398 } else if (l3.v6->version == 6) {
1399 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
1400 HNS3_L3T_IPV6);
1401 }
1402 }
1403
hns3_set_l4_csum_length(struct sk_buff * skb,union l4_hdr_info l4,u32 l4_proto,u32 * type_cs_vlan_tso)1404 static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4,
1405 u32 l4_proto, u32 *type_cs_vlan_tso)
1406 {
1407 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
1408 switch (l4_proto) {
1409 case IPPROTO_TCP:
1410 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1411 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1412 HNS3_L4T_TCP);
1413 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1414 l4.tcp->doff);
1415 break;
1416 case IPPROTO_UDP:
1417 if (hns3_tunnel_csum_bug(skb)) {
1418 int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
1419
1420 return ret ? ret : skb_checksum_help(skb);
1421 }
1422
1423 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1424 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1425 HNS3_L4T_UDP);
1426 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1427 (sizeof(struct udphdr) >> 2));
1428 break;
1429 case IPPROTO_SCTP:
1430 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1431 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1432 HNS3_L4T_SCTP);
1433 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1434 (sizeof(struct sctphdr) >> 2));
1435 break;
1436 default:
1437 /* drop the skb tunnel packet if hardware don't support,
1438 * because hardware can't calculate csum when TSO.
1439 */
1440 if (skb_is_gso(skb))
1441 return -EDOM;
1442
1443 /* the stack computes the IP header already,
1444 * driver calculate l4 checksum when not TSO.
1445 */
1446 return skb_checksum_help(skb);
1447 }
1448
1449 return 0;
1450 }
1451
hns3_set_l2l3l4(struct sk_buff * skb,u8 ol4_proto,u8 il4_proto,u32 * type_cs_vlan_tso,u32 * ol_type_vlan_len_msec)1452 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
1453 u8 il4_proto, u32 *type_cs_vlan_tso,
1454 u32 *ol_type_vlan_len_msec)
1455 {
1456 unsigned char *l2_hdr = skb->data;
1457 u32 l4_proto = ol4_proto;
1458 union l4_hdr_info l4;
1459 union l3_hdr_info l3;
1460 u32 l2_len, l3_len;
1461
1462 l4.hdr = skb_transport_header(skb);
1463 l3.hdr = skb_network_header(skb);
1464
1465 /* handle encapsulation skb */
1466 if (skb->encapsulation) {
1467 /* If this is a not UDP/GRE encapsulation skb */
1468 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
1469 /* drop the skb tunnel packet if hardware don't support,
1470 * because hardware can't calculate csum when TSO.
1471 */
1472 if (skb_is_gso(skb))
1473 return -EDOM;
1474
1475 /* the stack computes the IP header already,
1476 * driver calculate l4 checksum when not TSO.
1477 */
1478 return skb_checksum_help(skb);
1479 }
1480
1481 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
1482
1483 /* switch to inner header */
1484 l2_hdr = skb_inner_mac_header(skb);
1485 l3.hdr = skb_inner_network_header(skb);
1486 l4.hdr = skb_inner_transport_header(skb);
1487 l4_proto = il4_proto;
1488 }
1489
1490 hns3_set_l3_type(skb, l3, type_cs_vlan_tso);
1491
1492 /* compute inner(/normal) L2 header size, defined in 2 Bytes */
1493 l2_len = l3.hdr - l2_hdr;
1494 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
1495
1496 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
1497 l3_len = l4.hdr - l3.hdr;
1498 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
1499
1500 return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso);
1501 }
1502
hns3_handle_vtags(struct hns3_enet_ring * tx_ring,struct sk_buff * skb)1503 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
1504 struct sk_buff *skb)
1505 {
1506 struct hnae3_handle *handle = tx_ring->tqp->handle;
1507 struct hnae3_ae_dev *ae_dev;
1508 struct vlan_ethhdr *vhdr;
1509 int rc;
1510
1511 if (!(skb->protocol == htons(ETH_P_8021Q) ||
1512 skb_vlan_tag_present(skb)))
1513 return 0;
1514
1515 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
1516 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
1517 * will cause RAS error.
1518 */
1519 ae_dev = hns3_get_ae_dev(handle);
1520 if (unlikely(skb_vlan_tagged_multi(skb) &&
1521 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
1522 handle->port_base_vlan_state ==
1523 HNAE3_PORT_BASE_VLAN_ENABLE))
1524 return -EINVAL;
1525
1526 if (skb->protocol == htons(ETH_P_8021Q) &&
1527 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1528 /* When HW VLAN acceleration is turned off, and the stack
1529 * sets the protocol to 802.1q, the driver just need to
1530 * set the protocol to the encapsulated ethertype.
1531 */
1532 skb->protocol = vlan_get_protocol(skb);
1533 return 0;
1534 }
1535
1536 if (skb_vlan_tag_present(skb)) {
1537 /* Based on hw strategy, use out_vtag in two layer tag case,
1538 * and use inner_vtag in one tag case.
1539 */
1540 if (skb->protocol == htons(ETH_P_8021Q) &&
1541 handle->port_base_vlan_state ==
1542 HNAE3_PORT_BASE_VLAN_DISABLE)
1543 rc = HNS3_OUTER_VLAN_TAG;
1544 else
1545 rc = HNS3_INNER_VLAN_TAG;
1546
1547 skb->protocol = vlan_get_protocol(skb);
1548 return rc;
1549 }
1550
1551 rc = skb_cow_head(skb, 0);
1552 if (unlikely(rc < 0))
1553 return rc;
1554
1555 vhdr = skb_vlan_eth_hdr(skb);
1556 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
1557 & VLAN_PRIO_MASK);
1558
1559 skb->protocol = vlan_get_protocol(skb);
1560 return 0;
1561 }
1562
1563 /* check if the hardware is capable of checksum offloading */
hns3_check_hw_tx_csum(struct sk_buff * skb)1564 static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
1565 {
1566 struct hns3_nic_priv *priv = netdev_priv(skb->dev);
1567
1568 /* Kindly note, due to backward compatibility of the TX descriptor,
1569 * HW checksum of the non-IP packets and GSO packets is handled at
1570 * different place in the following code
1571 */
1572 if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
1573 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
1574 return false;
1575
1576 return true;
1577 }
1578
1579 struct hns3_desc_param {
1580 u32 paylen_ol4cs;
1581 u32 ol_type_vlan_len_msec;
1582 u32 type_cs_vlan_tso;
1583 u16 mss_hw_csum;
1584 u16 inner_vtag;
1585 u16 out_vtag;
1586 };
1587
hns3_init_desc_data(struct sk_buff * skb,struct hns3_desc_param * pa)1588 static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
1589 {
1590 pa->paylen_ol4cs = skb->len;
1591 pa->ol_type_vlan_len_msec = 0;
1592 pa->type_cs_vlan_tso = 0;
1593 pa->mss_hw_csum = 0;
1594 pa->inner_vtag = 0;
1595 pa->out_vtag = 0;
1596 }
1597
hns3_handle_vlan_info(struct hns3_enet_ring * ring,struct sk_buff * skb,struct hns3_desc_param * param)1598 static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
1599 struct sk_buff *skb,
1600 struct hns3_desc_param *param)
1601 {
1602 int ret;
1603
1604 ret = hns3_handle_vtags(ring, skb);
1605 if (unlikely(ret < 0)) {
1606 hns3_ring_stats_update(ring, tx_vlan_err);
1607 return ret;
1608 } else if (ret == HNS3_INNER_VLAN_TAG) {
1609 param->inner_vtag = skb_vlan_tag_get(skb);
1610 param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1611 VLAN_PRIO_MASK;
1612 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
1613 } else if (ret == HNS3_OUTER_VLAN_TAG) {
1614 param->out_vtag = skb_vlan_tag_get(skb);
1615 param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1616 VLAN_PRIO_MASK;
1617 hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1618 1);
1619 }
1620 return 0;
1621 }
1622
hns3_handle_csum_partial(struct hns3_enet_ring * ring,struct sk_buff * skb,struct hns3_desc_cb * desc_cb,struct hns3_desc_param * param)1623 static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
1624 struct sk_buff *skb,
1625 struct hns3_desc_cb *desc_cb,
1626 struct hns3_desc_param *param)
1627 {
1628 u8 ol4_proto, il4_proto;
1629 int ret;
1630
1631 if (hns3_check_hw_tx_csum(skb)) {
1632 /* set checksum start and offset, defined in 2 Bytes */
1633 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
1634 skb_checksum_start_offset(skb) >> 1);
1635 hns3_set_field(param->ol_type_vlan_len_msec,
1636 HNS3_TXD_CSUM_OFFSET_S,
1637 skb->csum_offset >> 1);
1638 param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
1639 return 0;
1640 }
1641
1642 skb_reset_mac_len(skb);
1643
1644 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1645 if (unlikely(ret < 0)) {
1646 hns3_ring_stats_update(ring, tx_l4_proto_err);
1647 return ret;
1648 }
1649
1650 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1651 ¶m->type_cs_vlan_tso,
1652 ¶m->ol_type_vlan_len_msec);
1653 if (unlikely(ret < 0)) {
1654 hns3_ring_stats_update(ring, tx_l2l3l4_err);
1655 return ret;
1656 }
1657
1658 ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum,
1659 ¶m->type_cs_vlan_tso, &desc_cb->send_bytes);
1660 if (unlikely(ret < 0)) {
1661 hns3_ring_stats_update(ring, tx_tso_err);
1662 return ret;
1663 }
1664 return 0;
1665 }
1666
hns3_fill_skb_desc(struct hns3_enet_ring * ring,struct sk_buff * skb,struct hns3_desc * desc,struct hns3_desc_cb * desc_cb)1667 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
1668 struct sk_buff *skb, struct hns3_desc *desc,
1669 struct hns3_desc_cb *desc_cb)
1670 {
1671 struct hns3_desc_param param;
1672 int ret;
1673
1674 hns3_init_desc_data(skb, ¶m);
1675 ret = hns3_handle_vlan_info(ring, skb, ¶m);
1676 if (unlikely(ret < 0))
1677 return ret;
1678
1679 desc_cb->send_bytes = skb->len;
1680
1681 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1682 ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m);
1683 if (ret)
1684 return ret;
1685 }
1686
1687 /* Set txbd */
1688 desc->tx.ol_type_vlan_len_msec =
1689 cpu_to_le32(param.ol_type_vlan_len_msec);
1690 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
1691 desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
1692 desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
1693 desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
1694 desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
1695
1696 return 0;
1697 }
1698
hns3_fill_desc(struct hns3_enet_ring * ring,dma_addr_t dma,unsigned int size)1699 static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
1700 unsigned int size)
1701 {
1702 #define HNS3_LIKELY_BD_NUM 1
1703
1704 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1705 unsigned int frag_buf_num, k;
1706 int sizeoflast;
1707
1708 if (likely(size <= HNS3_MAX_BD_SIZE)) {
1709 desc->addr = cpu_to_le64(dma);
1710 desc->tx.send_size = cpu_to_le16(size);
1711 desc->tx.bdtp_fe_sc_vld_ra_ri =
1712 cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1713
1714 trace_hns3_tx_desc(ring, ring->next_to_use);
1715 ring_ptr_move_fw(ring, next_to_use);
1716 return HNS3_LIKELY_BD_NUM;
1717 }
1718
1719 frag_buf_num = hns3_tx_bd_count(size);
1720 sizeoflast = size % HNS3_MAX_BD_SIZE;
1721 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1722
1723 /* When frag size is bigger than hardware limit, split this frag */
1724 for (k = 0; k < frag_buf_num; k++) {
1725 /* now, fill the descriptor */
1726 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1727 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1728 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1729 desc->tx.bdtp_fe_sc_vld_ra_ri =
1730 cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1731
1732 trace_hns3_tx_desc(ring, ring->next_to_use);
1733 /* move ring pointer to next */
1734 ring_ptr_move_fw(ring, next_to_use);
1735
1736 desc = &ring->desc[ring->next_to_use];
1737 }
1738
1739 return frag_buf_num;
1740 }
1741
hns3_map_and_fill_desc(struct hns3_enet_ring * ring,void * priv,unsigned int type)1742 static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
1743 unsigned int type)
1744 {
1745 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1746 struct device *dev = ring_to_dev(ring);
1747 unsigned int size;
1748 dma_addr_t dma;
1749
1750 if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) {
1751 struct sk_buff *skb = (struct sk_buff *)priv;
1752
1753 size = skb_headlen(skb);
1754 if (!size)
1755 return 0;
1756
1757 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1758 } else if (type & DESC_TYPE_BOUNCE_HEAD) {
1759 /* Head data has been filled in hns3_handle_tx_bounce(),
1760 * just return 0 here.
1761 */
1762 return 0;
1763 } else {
1764 skb_frag_t *frag = (skb_frag_t *)priv;
1765
1766 size = skb_frag_size(frag);
1767 if (!size)
1768 return 0;
1769
1770 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1771 }
1772
1773 if (unlikely(dma_mapping_error(dev, dma))) {
1774 hns3_ring_stats_update(ring, sw_err_cnt);
1775 return -ENOMEM;
1776 }
1777
1778 desc_cb->priv = priv;
1779 desc_cb->length = size;
1780 desc_cb->dma = dma;
1781 desc_cb->type = type;
1782
1783 return hns3_fill_desc(ring, dma, size);
1784 }
1785
hns3_skb_bd_num(struct sk_buff * skb,unsigned int * bd_size,unsigned int bd_num)1786 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
1787 unsigned int bd_num)
1788 {
1789 unsigned int size;
1790 int i;
1791
1792 size = skb_headlen(skb);
1793 while (size > HNS3_MAX_BD_SIZE) {
1794 bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
1795 size -= HNS3_MAX_BD_SIZE;
1796
1797 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1798 return bd_num;
1799 }
1800
1801 if (size) {
1802 bd_size[bd_num++] = size;
1803 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1804 return bd_num;
1805 }
1806
1807 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1808 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1809 size = skb_frag_size(frag);
1810 if (!size)
1811 continue;
1812
1813 while (size > HNS3_MAX_BD_SIZE) {
1814 bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
1815 size -= HNS3_MAX_BD_SIZE;
1816
1817 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1818 return bd_num;
1819 }
1820
1821 bd_size[bd_num++] = size;
1822 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1823 return bd_num;
1824 }
1825
1826 return bd_num;
1827 }
1828
hns3_tx_bd_num(struct sk_buff * skb,unsigned int * bd_size,u8 max_non_tso_bd_num,unsigned int bd_num,unsigned int recursion_level)1829 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
1830 u8 max_non_tso_bd_num, unsigned int bd_num,
1831 unsigned int recursion_level)
1832 {
1833 #define HNS3_MAX_RECURSION_LEVEL 24
1834
1835 struct sk_buff *frag_skb;
1836
1837 /* If the total len is within the max bd limit */
1838 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
1839 !skb_has_frag_list(skb) &&
1840 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
1841 return skb_shinfo(skb)->nr_frags + 1U;
1842
1843 if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
1844 return UINT_MAX;
1845
1846 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
1847 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
1848 return bd_num;
1849
1850 skb_walk_frags(skb, frag_skb) {
1851 bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
1852 bd_num, recursion_level + 1);
1853 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1854 return bd_num;
1855 }
1856
1857 return bd_num;
1858 }
1859
hns3_gso_hdr_len(struct sk_buff * skb)1860 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1861 {
1862 if (!skb->encapsulation)
1863 return skb_tcp_all_headers(skb);
1864
1865 return skb_inner_tcp_all_headers(skb);
1866 }
1867
1868 /* HW need every continuous max_non_tso_bd_num buffer data to be larger
1869 * than MSS, we simplify it by ensuring skb_headlen + the first continuous
1870 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1871 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1872 * than MSS except the last max_non_tso_bd_num - 1 frags.
1873 */
hns3_skb_need_linearized(struct sk_buff * skb,unsigned int * bd_size,unsigned int bd_num,u8 max_non_tso_bd_num)1874 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
1875 unsigned int bd_num, u8 max_non_tso_bd_num)
1876 {
1877 unsigned int tot_len = 0;
1878 unsigned int i;
1879
1880 for (i = 0; i < max_non_tso_bd_num - 1U; i++)
1881 tot_len += bd_size[i];
1882
1883 /* ensure the first max_non_tso_bd_num frags is greater than
1884 * mss + header
1885 */
1886 if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
1887 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
1888 return true;
1889
1890 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
1891 * than mss except the last one.
1892 */
1893 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
1894 tot_len -= bd_size[i];
1895 tot_len += bd_size[i + max_non_tso_bd_num - 1U];
1896
1897 if (tot_len < skb_shinfo(skb)->gso_size)
1898 return true;
1899 }
1900
1901 return false;
1902 }
1903
hns3_shinfo_pack(struct skb_shared_info * shinfo,__u32 * size)1904 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
1905 {
1906 u32 i;
1907
1908 for (i = 0; i < MAX_SKB_FRAGS; i++)
1909 size[i] = skb_frag_size(&shinfo->frags[i]);
1910 }
1911
hns3_skb_linearize(struct hns3_enet_ring * ring,struct sk_buff * skb,unsigned int bd_num)1912 static int hns3_skb_linearize(struct hns3_enet_ring *ring,
1913 struct sk_buff *skb,
1914 unsigned int bd_num)
1915 {
1916 /* 'bd_num == UINT_MAX' means the skb' fraglist has a
1917 * recursion level of over HNS3_MAX_RECURSION_LEVEL.
1918 */
1919 if (bd_num == UINT_MAX) {
1920 hns3_ring_stats_update(ring, over_max_recursion);
1921 return -ENOMEM;
1922 }
1923
1924 /* The skb->len has exceeded the hw limitation, linearization
1925 * will not help.
1926 */
1927 if (skb->len > HNS3_MAX_TSO_SIZE ||
1928 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
1929 hns3_ring_stats_update(ring, hw_limitation);
1930 return -ENOMEM;
1931 }
1932
1933 if (__skb_linearize(skb)) {
1934 hns3_ring_stats_update(ring, sw_err_cnt);
1935 return -ENOMEM;
1936 }
1937
1938 return 0;
1939 }
1940
hns3_nic_maybe_stop_tx(struct hns3_enet_ring * ring,struct net_device * netdev,struct sk_buff * skb)1941 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1942 struct net_device *netdev,
1943 struct sk_buff *skb)
1944 {
1945 struct hns3_nic_priv *priv = netdev_priv(netdev);
1946 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
1947 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
1948 unsigned int bd_num;
1949
1950 bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
1951 if (unlikely(bd_num > max_non_tso_bd_num)) {
1952 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
1953 !hns3_skb_need_linearized(skb, bd_size, bd_num,
1954 max_non_tso_bd_num)) {
1955 trace_hns3_over_max_bd(skb);
1956 goto out;
1957 }
1958
1959 if (hns3_skb_linearize(ring, skb, bd_num))
1960 return -ENOMEM;
1961
1962 bd_num = hns3_tx_bd_count(skb->len);
1963
1964 hns3_ring_stats_update(ring, tx_copy);
1965 }
1966
1967 out:
1968 if (likely(ring_space(ring) >= bd_num))
1969 return bd_num;
1970
1971 netif_stop_subqueue(netdev, ring->queue_index);
1972 smp_mb(); /* Memory barrier before checking ring_space */
1973
1974 /* Start queue in case hns3_clean_tx_ring has just made room
1975 * available and has not seen the queue stopped state performed
1976 * by netif_stop_subqueue above.
1977 */
1978 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
1979 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
1980 netif_start_subqueue(netdev, ring->queue_index);
1981 return bd_num;
1982 }
1983
1984 hns3_ring_stats_update(ring, tx_busy);
1985
1986 return -EBUSY;
1987 }
1988
hns3_clear_desc(struct hns3_enet_ring * ring,int next_to_use_orig)1989 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1990 {
1991 struct device *dev = ring_to_dev(ring);
1992 unsigned int i;
1993
1994 for (i = 0; i < ring->desc_num; i++) {
1995 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1996 struct hns3_desc_cb *desc_cb;
1997
1998 memset(desc, 0, sizeof(*desc));
1999
2000 /* check if this is where we started */
2001 if (ring->next_to_use == next_to_use_orig)
2002 break;
2003
2004 /* rollback one */
2005 ring_ptr_move_bw(ring, next_to_use);
2006
2007 desc_cb = &ring->desc_cb[ring->next_to_use];
2008
2009 if (!desc_cb->dma)
2010 continue;
2011
2012 /* unmap the descriptor dma address */
2013 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
2014 dma_unmap_single(dev, desc_cb->dma, desc_cb->length,
2015 DMA_TO_DEVICE);
2016 else if (desc_cb->type &
2017 (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL))
2018 hns3_tx_spare_rollback(ring, desc_cb->length);
2019 else if (desc_cb->length)
2020 dma_unmap_page(dev, desc_cb->dma, desc_cb->length,
2021 DMA_TO_DEVICE);
2022
2023 desc_cb->length = 0;
2024 desc_cb->dma = 0;
2025 desc_cb->type = DESC_TYPE_UNKNOWN;
2026 }
2027 }
2028
hns3_fill_skb_to_desc(struct hns3_enet_ring * ring,struct sk_buff * skb,unsigned int type)2029 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
2030 struct sk_buff *skb, unsigned int type)
2031 {
2032 struct sk_buff *frag_skb;
2033 int i, ret, bd_num = 0;
2034
2035 ret = hns3_map_and_fill_desc(ring, skb, type);
2036 if (unlikely(ret < 0))
2037 return ret;
2038
2039 bd_num += ret;
2040
2041 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2042 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2043
2044 ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE);
2045 if (unlikely(ret < 0))
2046 return ret;
2047
2048 bd_num += ret;
2049 }
2050
2051 skb_walk_frags(skb, frag_skb) {
2052 ret = hns3_fill_skb_to_desc(ring, frag_skb,
2053 DESC_TYPE_FRAGLIST_SKB);
2054 if (unlikely(ret < 0))
2055 return ret;
2056
2057 bd_num += ret;
2058 }
2059
2060 return bd_num;
2061 }
2062
hns3_tx_push_bd(struct hns3_enet_ring * ring,int num)2063 static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
2064 {
2065 #define HNS3_BYTES_PER_64BIT 8
2066
2067 struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {};
2068 int offset = 0;
2069
2070 /* make sure everything is visible to device before
2071 * excuting tx push or updating doorbell
2072 */
2073 dma_wmb();
2074
2075 do {
2076 int idx = (ring->next_to_use - num + ring->desc_num) %
2077 ring->desc_num;
2078
2079 u64_stats_update_begin(&ring->syncp);
2080 ring->stats.tx_push++;
2081 u64_stats_update_end(&ring->syncp);
2082 memcpy(&desc[offset], &ring->desc[idx],
2083 sizeof(struct hns3_desc));
2084 offset++;
2085 } while (--num);
2086
2087 __iowrite64_copy(ring->tqp->mem_base, desc,
2088 (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
2089 HNS3_BYTES_PER_64BIT);
2090 }
2091
hns3_tx_mem_doorbell(struct hns3_enet_ring * ring)2092 static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
2093 {
2094 #define HNS3_MEM_DOORBELL_OFFSET 64
2095
2096 __le64 bd_num = cpu_to_le64((u64)ring->pending_buf);
2097
2098 /* make sure everything is visible to device before
2099 * excuting tx push or updating doorbell
2100 */
2101 dma_wmb();
2102
2103 __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET,
2104 &bd_num, 1);
2105 u64_stats_update_begin(&ring->syncp);
2106 ring->stats.tx_mem_doorbell += ring->pending_buf;
2107 u64_stats_update_end(&ring->syncp);
2108 }
2109
hns3_tx_doorbell(struct hns3_enet_ring * ring,int num,bool doorbell)2110 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
2111 bool doorbell)
2112 {
2113 struct net_device *netdev = ring_to_netdev(ring);
2114 struct hns3_nic_priv *priv = netdev_priv(netdev);
2115
2116 /* when tx push is enabled, the packet whose number of BD below
2117 * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
2118 */
2119 if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
2120 !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
2121 /* This smp_store_release() pairs with smp_load_acquire() in
2122 * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
2123 * is updated.
2124 */
2125 smp_store_release(&ring->last_to_use, ring->next_to_use);
2126 hns3_tx_push_bd(ring, num);
2127 return;
2128 }
2129
2130 ring->pending_buf += num;
2131
2132 if (!doorbell) {
2133 hns3_ring_stats_update(ring, tx_more);
2134 return;
2135 }
2136
2137 /* This smp_store_release() pairs with smp_load_acquire() in
2138 * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
2139 */
2140 smp_store_release(&ring->last_to_use, ring->next_to_use);
2141
2142 if (ring->tqp->mem_base)
2143 hns3_tx_mem_doorbell(ring);
2144 else
2145 writel(ring->pending_buf,
2146 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
2147
2148 ring->pending_buf = 0;
2149 }
2150
hns3_tsyn(struct net_device * netdev,struct sk_buff * skb,struct hns3_desc * desc)2151 static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
2152 struct hns3_desc *desc)
2153 {
2154 struct hnae3_handle *h = hns3_get_handle(netdev);
2155
2156 if (!(h->ae_algo->ops->set_tx_hwts_info &&
2157 h->ae_algo->ops->set_tx_hwts_info(h, skb)))
2158 return;
2159
2160 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B));
2161 }
2162
hns3_handle_tx_bounce(struct hns3_enet_ring * ring,struct sk_buff * skb)2163 static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
2164 struct sk_buff *skb)
2165 {
2166 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2167 unsigned int type = DESC_TYPE_BOUNCE_HEAD;
2168 unsigned int size = skb_headlen(skb);
2169 dma_addr_t dma;
2170 int bd_num = 0;
2171 u32 cb_len;
2172 void *buf;
2173 int ret;
2174
2175 if (skb->len <= ring->tx_copybreak) {
2176 size = skb->len;
2177 type = DESC_TYPE_BOUNCE_ALL;
2178 }
2179
2180 /* hns3_can_use_tx_bounce() is called to ensure the below
2181 * function can always return the tx buffer.
2182 */
2183 buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len);
2184
2185 ret = skb_copy_bits(skb, 0, buf, size);
2186 if (unlikely(ret < 0)) {
2187 hns3_tx_spare_rollback(ring, cb_len);
2188 hns3_ring_stats_update(ring, copy_bits_err);
2189 return ret;
2190 }
2191
2192 desc_cb->priv = skb;
2193 desc_cb->length = cb_len;
2194 desc_cb->dma = dma;
2195 desc_cb->type = type;
2196
2197 bd_num += hns3_fill_desc(ring, dma, size);
2198
2199 if (type == DESC_TYPE_BOUNCE_HEAD) {
2200 ret = hns3_fill_skb_to_desc(ring, skb,
2201 DESC_TYPE_BOUNCE_HEAD);
2202 if (unlikely(ret < 0))
2203 return ret;
2204
2205 bd_num += ret;
2206 }
2207
2208 dma_sync_single_for_device(ring_to_dev(ring), dma, size,
2209 DMA_TO_DEVICE);
2210
2211 hns3_ring_stats_update(ring, tx_bounce);
2212
2213 return bd_num;
2214 }
2215
hns3_handle_tx_sgl(struct hns3_enet_ring * ring,struct sk_buff * skb)2216 static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
2217 struct sk_buff *skb)
2218 {
2219 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2220 u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
2221 struct sg_table *sgt;
2222 int bd_num = 0;
2223 dma_addr_t dma;
2224 u32 cb_len, i;
2225 int nents;
2226
2227 if (skb_has_frag_list(skb))
2228 nfrag = HNS3_MAX_TSO_BD_NUM;
2229
2230 /* hns3_can_use_tx_sgl() is called to ensure the below
2231 * function can always return the tx buffer.
2232 */
2233 sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag),
2234 &dma, &cb_len);
2235
2236 /* scatterlist follows by the sg table */
2237 sgt->sgl = (struct scatterlist *)(sgt + 1);
2238 sg_init_table(sgt->sgl, nfrag);
2239 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
2240 if (unlikely(nents < 0)) {
2241 hns3_tx_spare_rollback(ring, cb_len);
2242 hns3_ring_stats_update(ring, skb2sgl_err);
2243 return -ENOMEM;
2244 }
2245
2246 sgt->orig_nents = nents;
2247 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
2248 DMA_TO_DEVICE);
2249 if (unlikely(!sgt->nents)) {
2250 hns3_tx_spare_rollback(ring, cb_len);
2251 hns3_ring_stats_update(ring, map_sg_err);
2252 return -ENOMEM;
2253 }
2254
2255 desc_cb->priv = skb;
2256 desc_cb->length = cb_len;
2257 desc_cb->dma = dma;
2258 desc_cb->type = DESC_TYPE_SGL_SKB;
2259
2260 for (i = 0; i < sgt->nents; i++)
2261 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
2262 sg_dma_len(sgt->sgl + i));
2263 hns3_ring_stats_update(ring, tx_sgl);
2264
2265 return bd_num;
2266 }
2267
hns3_handle_desc_filling(struct hns3_enet_ring * ring,struct sk_buff * skb)2268 static int hns3_handle_desc_filling(struct hns3_enet_ring *ring,
2269 struct sk_buff *skb)
2270 {
2271 u32 space;
2272
2273 if (!ring->tx_spare)
2274 goto out;
2275
2276 space = hns3_tx_spare_space(ring);
2277
2278 if (hns3_can_use_tx_sgl(ring, skb, space))
2279 return hns3_handle_tx_sgl(ring, skb);
2280
2281 if (hns3_can_use_tx_bounce(ring, skb, space))
2282 return hns3_handle_tx_bounce(ring, skb);
2283
2284 out:
2285 return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
2286 }
2287
hns3_handle_skb_desc(struct hns3_enet_ring * ring,struct sk_buff * skb,struct hns3_desc_cb * desc_cb,int next_to_use_head)2288 static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
2289 struct sk_buff *skb,
2290 struct hns3_desc_cb *desc_cb,
2291 int next_to_use_head)
2292 {
2293 int ret;
2294
2295 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
2296 desc_cb);
2297 if (unlikely(ret < 0))
2298 goto fill_err;
2299
2300 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
2301 * zero, which is unlikely, and 'ret > 0' means how many tx desc
2302 * need to be notified to the hw.
2303 */
2304 ret = hns3_handle_desc_filling(ring, skb);
2305 if (likely(ret > 0))
2306 return ret;
2307
2308 fill_err:
2309 hns3_clear_desc(ring, next_to_use_head);
2310 return ret;
2311 }
2312
hns3_nic_net_xmit(struct sk_buff * skb,struct net_device * netdev)2313 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
2314 {
2315 struct hns3_nic_priv *priv = netdev_priv(netdev);
2316 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
2317 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2318 struct netdev_queue *dev_queue;
2319 int pre_ntu, ret;
2320 bool doorbell;
2321
2322 /* Hardware can only handle short frames above 32 bytes */
2323 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
2324 hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
2325
2326 hns3_ring_stats_update(ring, sw_err_cnt);
2327
2328 return NETDEV_TX_OK;
2329 }
2330
2331 /* Prefetch the data used later */
2332 prefetch(skb->data);
2333
2334 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
2335 if (unlikely(ret <= 0)) {
2336 if (ret == -EBUSY) {
2337 hns3_tx_doorbell(ring, 0, true);
2338 return NETDEV_TX_BUSY;
2339 }
2340
2341 hns3_rl_err(netdev, "xmit error: %d!\n", ret);
2342 goto out_err_tx_ok;
2343 }
2344
2345 ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
2346 if (unlikely(ret <= 0))
2347 goto out_err_tx_ok;
2348
2349 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
2350 (ring->desc_num - 1);
2351
2352 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
2353 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]);
2354
2355 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
2356 cpu_to_le16(BIT(HNS3_TXD_FE_B));
2357 trace_hns3_tx_desc(ring, pre_ntu);
2358
2359 skb_tx_timestamp(skb);
2360
2361 /* Complete translate all packets */
2362 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
2363 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
2364 netdev_xmit_more());
2365 hns3_tx_doorbell(ring, ret, doorbell);
2366
2367 return NETDEV_TX_OK;
2368
2369 out_err_tx_ok:
2370 dev_kfree_skb_any(skb);
2371 hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
2372 return NETDEV_TX_OK;
2373 }
2374
hns3_nic_net_set_mac_address(struct net_device * netdev,void * p)2375 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
2376 {
2377 char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN];
2378 char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN];
2379 struct hnae3_handle *h = hns3_get_handle(netdev);
2380 struct sockaddr *mac_addr = p;
2381 int ret;
2382
2383 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
2384 return -EADDRNOTAVAIL;
2385
2386 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
2387 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
2388 netdev_info(netdev, "already using mac address %s\n",
2389 format_mac_addr_sa);
2390 return 0;
2391 }
2392
2393 /* For VF device, if there is a perm_addr, then the user will not
2394 * be allowed to change the address.
2395 */
2396 if (!hns3_is_phys_func(h->pdev) &&
2397 !is_zero_ether_addr(netdev->perm_addr)) {
2398 hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr);
2399 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
2400 netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n",
2401 format_mac_addr_perm, format_mac_addr_sa);
2402 return -EPERM;
2403 }
2404
2405 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
2406 if (ret) {
2407 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
2408 return ret;
2409 }
2410
2411 eth_hw_addr_set(netdev, mac_addr->sa_data);
2412
2413 return 0;
2414 }
2415
hns3_nic_do_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)2416 static int hns3_nic_do_ioctl(struct net_device *netdev,
2417 struct ifreq *ifr, int cmd)
2418 {
2419 struct hnae3_handle *h = hns3_get_handle(netdev);
2420
2421 if (!netif_running(netdev))
2422 return -EINVAL;
2423
2424 if (!h->ae_algo->ops->do_ioctl)
2425 return -EOPNOTSUPP;
2426
2427 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
2428 }
2429
hns3_nic_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)2430 static int hns3_nic_hwtstamp_get(struct net_device *netdev,
2431 struct kernel_hwtstamp_config *config)
2432 {
2433 struct hnae3_handle *h = hns3_get_handle(netdev);
2434
2435 if (!netif_running(netdev))
2436 return -EINVAL;
2437
2438 if (!h->ae_algo->ops->hwtstamp_get)
2439 return -EOPNOTSUPP;
2440
2441 return h->ae_algo->ops->hwtstamp_get(h, config);
2442 }
2443
hns3_nic_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)2444 static int hns3_nic_hwtstamp_set(struct net_device *netdev,
2445 struct kernel_hwtstamp_config *config,
2446 struct netlink_ext_ack *extack)
2447 {
2448 struct hnae3_handle *h = hns3_get_handle(netdev);
2449
2450 if (!netif_running(netdev))
2451 return -EINVAL;
2452
2453 if (!h->ae_algo->ops->hwtstamp_set)
2454 return -EOPNOTSUPP;
2455
2456 return h->ae_algo->ops->hwtstamp_set(h, config, extack);
2457 }
2458
hns3_nic_set_features(struct net_device * netdev,netdev_features_t features)2459 static int hns3_nic_set_features(struct net_device *netdev,
2460 netdev_features_t features)
2461 {
2462 netdev_features_t changed = netdev->features ^ features;
2463 struct hns3_nic_priv *priv = netdev_priv(netdev);
2464 struct hnae3_handle *h = priv->ae_handle;
2465 bool enable;
2466 int ret;
2467
2468 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
2469 enable = !!(features & NETIF_F_GRO_HW);
2470 ret = h->ae_algo->ops->set_gro_en(h, enable);
2471 if (ret)
2472 return ret;
2473 }
2474
2475 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
2476 h->ae_algo->ops->enable_hw_strip_rxvtag) {
2477 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
2478 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
2479 if (ret)
2480 return ret;
2481 }
2482
2483 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
2484 enable = !!(features & NETIF_F_NTUPLE);
2485 h->ae_algo->ops->enable_fd(h, enable);
2486 }
2487
2488 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
2489 h->ae_algo->ops->cls_flower_active(h)) {
2490 netdev_err(netdev,
2491 "there are offloaded TC filters active, cannot disable HW TC offload\n");
2492 return -EINVAL;
2493 }
2494
2495 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2496 h->ae_algo->ops->enable_vlan_filter) {
2497 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2498 ret = h->ae_algo->ops->enable_vlan_filter(h, enable);
2499 if (ret)
2500 return ret;
2501 }
2502
2503 return 0;
2504 }
2505
hns3_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)2506 static netdev_features_t hns3_features_check(struct sk_buff *skb,
2507 struct net_device *dev,
2508 netdev_features_t features)
2509 {
2510 #define HNS3_MAX_HDR_LEN 480U
2511 #define HNS3_MAX_L4_HDR_LEN 60U
2512
2513 size_t len;
2514
2515 if (skb->ip_summed != CHECKSUM_PARTIAL)
2516 return features;
2517
2518 if (skb->encapsulation)
2519 len = skb_inner_transport_offset(skb);
2520 else
2521 len = skb_transport_offset(skb);
2522
2523 /* Assume L4 is 60 byte as TCP is the only protocol with a
2524 * a flexible value, and it's max len is 60 bytes.
2525 */
2526 len += HNS3_MAX_L4_HDR_LEN;
2527
2528 /* Hardware only supports checksum on the skb with a max header
2529 * len of 480 bytes.
2530 */
2531 if (len > HNS3_MAX_HDR_LEN)
2532 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2533
2534 return features;
2535 }
2536
hns3_fetch_stats(struct rtnl_link_stats64 * stats,struct hns3_enet_ring * ring,bool is_tx)2537 static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
2538 struct hns3_enet_ring *ring, bool is_tx)
2539 {
2540 struct ring_stats ring_stats;
2541 unsigned int start;
2542
2543 do {
2544 start = u64_stats_fetch_begin(&ring->syncp);
2545 ring_stats = ring->stats;
2546 } while (u64_stats_fetch_retry(&ring->syncp, start));
2547
2548 if (is_tx) {
2549 stats->tx_bytes += ring_stats.tx_bytes;
2550 stats->tx_packets += ring_stats.tx_pkts;
2551 stats->tx_dropped += ring_stats.sw_err_cnt;
2552 stats->tx_dropped += ring_stats.tx_vlan_err;
2553 stats->tx_dropped += ring_stats.tx_l4_proto_err;
2554 stats->tx_dropped += ring_stats.tx_l2l3l4_err;
2555 stats->tx_dropped += ring_stats.tx_tso_err;
2556 stats->tx_dropped += ring_stats.over_max_recursion;
2557 stats->tx_dropped += ring_stats.hw_limitation;
2558 stats->tx_dropped += ring_stats.copy_bits_err;
2559 stats->tx_dropped += ring_stats.skb2sgl_err;
2560 stats->tx_dropped += ring_stats.map_sg_err;
2561 stats->tx_errors += ring_stats.sw_err_cnt;
2562 stats->tx_errors += ring_stats.tx_vlan_err;
2563 stats->tx_errors += ring_stats.tx_l4_proto_err;
2564 stats->tx_errors += ring_stats.tx_l2l3l4_err;
2565 stats->tx_errors += ring_stats.tx_tso_err;
2566 stats->tx_errors += ring_stats.over_max_recursion;
2567 stats->tx_errors += ring_stats.hw_limitation;
2568 stats->tx_errors += ring_stats.copy_bits_err;
2569 stats->tx_errors += ring_stats.skb2sgl_err;
2570 stats->tx_errors += ring_stats.map_sg_err;
2571 } else {
2572 stats->rx_bytes += ring_stats.rx_bytes;
2573 stats->rx_packets += ring_stats.rx_pkts;
2574 stats->rx_dropped += ring_stats.l2_err;
2575 stats->rx_errors += ring_stats.l2_err;
2576 stats->rx_errors += ring_stats.l3l4_csum_err;
2577 stats->rx_crc_errors += ring_stats.l2_err;
2578 stats->multicast += ring_stats.rx_multicast;
2579 stats->rx_length_errors += ring_stats.err_pkt_len;
2580 }
2581 }
2582
hns3_nic_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)2583 static void hns3_nic_get_stats64(struct net_device *netdev,
2584 struct rtnl_link_stats64 *stats)
2585 {
2586 struct hns3_nic_priv *priv = netdev_priv(netdev);
2587 int queue_num = priv->ae_handle->kinfo.num_tqps;
2588 struct hnae3_handle *handle = priv->ae_handle;
2589 struct rtnl_link_stats64 ring_total_stats;
2590 struct hns3_enet_ring *ring;
2591 int idx;
2592
2593 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
2594 return;
2595
2596 handle->ae_algo->ops->update_stats(handle);
2597
2598 memset(&ring_total_stats, 0, sizeof(ring_total_stats));
2599 for (idx = 0; idx < queue_num; idx++) {
2600 /* fetch the tx stats */
2601 ring = &priv->ring[idx];
2602 hns3_fetch_stats(&ring_total_stats, ring, true);
2603
2604 /* fetch the rx stats */
2605 ring = &priv->ring[idx + queue_num];
2606 hns3_fetch_stats(&ring_total_stats, ring, false);
2607 }
2608
2609 stats->tx_bytes = ring_total_stats.tx_bytes;
2610 stats->tx_packets = ring_total_stats.tx_packets;
2611 stats->rx_bytes = ring_total_stats.rx_bytes;
2612 stats->rx_packets = ring_total_stats.rx_packets;
2613
2614 stats->rx_errors = ring_total_stats.rx_errors;
2615 stats->multicast = ring_total_stats.multicast;
2616 stats->rx_length_errors = ring_total_stats.rx_length_errors;
2617 stats->rx_crc_errors = ring_total_stats.rx_crc_errors;
2618 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
2619
2620 stats->tx_errors = ring_total_stats.tx_errors;
2621 stats->rx_dropped = ring_total_stats.rx_dropped;
2622 stats->tx_dropped = ring_total_stats.tx_dropped;
2623 stats->collisions = netdev->stats.collisions;
2624 stats->rx_over_errors = netdev->stats.rx_over_errors;
2625 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
2626 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
2627 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
2628 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
2629 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
2630 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
2631 stats->tx_window_errors = netdev->stats.tx_window_errors;
2632 stats->rx_compressed = netdev->stats.rx_compressed;
2633 stats->tx_compressed = netdev->stats.tx_compressed;
2634 }
2635
hns3_setup_tc(struct net_device * netdev,void * type_data)2636 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
2637 {
2638 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2639 struct hnae3_knic_private_info *kinfo;
2640 u8 tc = mqprio_qopt->qopt.num_tc;
2641 u16 mode = mqprio_qopt->mode;
2642 u8 hw = mqprio_qopt->qopt.hw;
2643 struct hnae3_handle *h;
2644
2645 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
2646 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
2647 return -EOPNOTSUPP;
2648
2649 if (tc > HNAE3_MAX_TC)
2650 return -EINVAL;
2651
2652 if (!netdev)
2653 return -EINVAL;
2654
2655 h = hns3_get_handle(netdev);
2656 kinfo = &h->kinfo;
2657
2658 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
2659
2660 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
2661 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP;
2662 }
2663
hns3_setup_tc_cls_flower(struct hns3_nic_priv * priv,struct flow_cls_offload * flow)2664 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv,
2665 struct flow_cls_offload *flow)
2666 {
2667 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid);
2668 struct hnae3_handle *h = hns3_get_handle(priv->netdev);
2669
2670 switch (flow->command) {
2671 case FLOW_CLS_REPLACE:
2672 if (h->ae_algo->ops->add_cls_flower)
2673 return h->ae_algo->ops->add_cls_flower(h, flow, tc);
2674 break;
2675 case FLOW_CLS_DESTROY:
2676 if (h->ae_algo->ops->del_cls_flower)
2677 return h->ae_algo->ops->del_cls_flower(h, flow);
2678 break;
2679 default:
2680 break;
2681 }
2682
2683 return -EOPNOTSUPP;
2684 }
2685
hns3_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)2686 static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2687 void *cb_priv)
2688 {
2689 struct hns3_nic_priv *priv = cb_priv;
2690
2691 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
2692 return -EOPNOTSUPP;
2693
2694 switch (type) {
2695 case TC_SETUP_CLSFLOWER:
2696 return hns3_setup_tc_cls_flower(priv, type_data);
2697 default:
2698 return -EOPNOTSUPP;
2699 }
2700 }
2701
2702 static LIST_HEAD(hns3_block_cb_list);
2703
hns3_nic_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)2704 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
2705 void *type_data)
2706 {
2707 struct hns3_nic_priv *priv = netdev_priv(dev);
2708 int ret;
2709
2710 switch (type) {
2711 case TC_SETUP_QDISC_MQPRIO:
2712 ret = hns3_setup_tc(dev, type_data);
2713 break;
2714 case TC_SETUP_BLOCK:
2715 ret = flow_block_cb_setup_simple(type_data,
2716 &hns3_block_cb_list,
2717 hns3_setup_tc_block_cb,
2718 priv, priv, true);
2719 break;
2720 default:
2721 return -EOPNOTSUPP;
2722 }
2723
2724 return ret;
2725 }
2726
hns3_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2727 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
2728 __be16 proto, u16 vid)
2729 {
2730 struct hnae3_handle *h = hns3_get_handle(netdev);
2731 int ret = -EIO;
2732
2733 if (h->ae_algo->ops->set_vlan_filter)
2734 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
2735
2736 return ret;
2737 }
2738
hns3_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2739 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
2740 __be16 proto, u16 vid)
2741 {
2742 struct hnae3_handle *h = hns3_get_handle(netdev);
2743 int ret = -EIO;
2744
2745 if (h->ae_algo->ops->set_vlan_filter)
2746 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
2747
2748 return ret;
2749 }
2750
hns3_ndo_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)2751 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2752 u8 qos, __be16 vlan_proto)
2753 {
2754 struct hnae3_handle *h = hns3_get_handle(netdev);
2755 int ret = -EIO;
2756
2757 netif_dbg(h, drv, netdev,
2758 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
2759 vf, vlan, qos, ntohs(vlan_proto));
2760
2761 if (h->ae_algo->ops->set_vf_vlan_filter)
2762 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
2763 qos, vlan_proto);
2764
2765 return ret;
2766 }
2767
hns3_set_vf_spoofchk(struct net_device * netdev,int vf,bool enable)2768 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2769 {
2770 struct hnae3_handle *handle = hns3_get_handle(netdev);
2771
2772 if (hns3_nic_resetting(netdev))
2773 return -EBUSY;
2774
2775 if (!handle->ae_algo->ops->set_vf_spoofchk)
2776 return -EOPNOTSUPP;
2777
2778 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
2779 }
2780
hns3_set_vf_trust(struct net_device * netdev,int vf,bool enable)2781 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
2782 {
2783 struct hnae3_handle *handle = hns3_get_handle(netdev);
2784
2785 if (!handle->ae_algo->ops->set_vf_trust)
2786 return -EOPNOTSUPP;
2787
2788 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
2789 }
2790
hns3_nic_change_mtu(struct net_device * netdev,int new_mtu)2791 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
2792 {
2793 struct hnae3_handle *h = hns3_get_handle(netdev);
2794 int ret;
2795
2796 if (hns3_nic_resetting(netdev))
2797 return -EBUSY;
2798
2799 if (!h->ae_algo->ops->set_mtu)
2800 return -EOPNOTSUPP;
2801
2802 netif_dbg(h, drv, netdev,
2803 "change mtu from %u to %d\n", netdev->mtu, new_mtu);
2804
2805 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
2806 if (ret)
2807 netdev_err(netdev, "failed to change MTU in hardware %d\n",
2808 ret);
2809 else
2810 WRITE_ONCE(netdev->mtu, new_mtu);
2811
2812 return ret;
2813 }
2814
hns3_get_timeout_queue(struct net_device * ndev)2815 static int hns3_get_timeout_queue(struct net_device *ndev)
2816 {
2817 unsigned int i;
2818
2819 /* Find the stopped queue the same way the stack does */
2820 for (i = 0; i < ndev->num_tx_queues; i++) {
2821 unsigned int timedout_ms;
2822 struct netdev_queue *q;
2823
2824 q = netdev_get_tx_queue(ndev, i);
2825 timedout_ms = netif_xmit_timeout_ms(q);
2826 if (timedout_ms) {
2827 #ifdef CONFIG_BQL
2828 struct dql *dql = &q->dql;
2829
2830 netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
2831 dql->last_obj_cnt, dql->num_queued,
2832 dql->adj_limit, dql->num_completed);
2833 #endif
2834 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
2835 q->state, timedout_ms);
2836 break;
2837 }
2838 }
2839
2840 return i;
2841 }
2842
hns3_dump_queue_stats(struct net_device * ndev,struct hns3_enet_ring * tx_ring,int timeout_queue)2843 static void hns3_dump_queue_stats(struct net_device *ndev,
2844 struct hns3_enet_ring *tx_ring,
2845 int timeout_queue)
2846 {
2847 struct napi_struct *napi = &tx_ring->tqp_vector->napi;
2848 struct hns3_nic_priv *priv = netdev_priv(ndev);
2849
2850 netdev_info(ndev,
2851 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
2852 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
2853 tx_ring->next_to_clean, napi->state);
2854
2855 netdev_info(ndev,
2856 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
2857 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
2858 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
2859
2860 netdev_info(ndev,
2861 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
2862 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
2863 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
2864
2865 netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n",
2866 tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell);
2867 }
2868
hns3_dump_queue_reg(struct net_device * ndev,struct hns3_enet_ring * tx_ring)2869 static void hns3_dump_queue_reg(struct net_device *ndev,
2870 struct hns3_enet_ring *tx_ring)
2871 {
2872 netdev_info(ndev,
2873 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
2874 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG),
2875 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG),
2876 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG),
2877 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG),
2878 readl(tx_ring->tqp_vector->mask_addr));
2879 netdev_info(ndev,
2880 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
2881 hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG),
2882 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG),
2883 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG),
2884 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG),
2885 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG),
2886 hns3_tqp_read_reg(tx_ring,
2887 HNS3_RING_TX_RING_EBD_OFFSET_REG));
2888 }
2889
hns3_get_tx_timeo_queue_info(struct net_device * ndev)2890 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
2891 {
2892 struct hns3_nic_priv *priv = netdev_priv(ndev);
2893 struct hnae3_handle *h = hns3_get_handle(ndev);
2894 struct hns3_enet_ring *tx_ring;
2895 u32 timeout_queue;
2896
2897 timeout_queue = hns3_get_timeout_queue(ndev);
2898 if (timeout_queue >= ndev->num_tx_queues) {
2899 netdev_info(ndev,
2900 "no netdev TX timeout queue found, timeout count: %llu\n",
2901 priv->tx_timeout_count);
2902 return false;
2903 }
2904
2905 priv->tx_timeout_count++;
2906
2907 tx_ring = &priv->ring[timeout_queue];
2908 hns3_dump_queue_stats(ndev, tx_ring, timeout_queue);
2909
2910 /* When mac received many pause frames continuous, it's unable to send
2911 * packets, which may cause tx timeout
2912 */
2913 if (h->ae_algo->ops->get_mac_stats) {
2914 struct hns3_mac_stats mac_stats;
2915
2916 h->ae_algo->ops->get_mac_stats(h, &mac_stats);
2917 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
2918 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
2919 }
2920
2921 hns3_dump_queue_reg(ndev, tx_ring);
2922
2923 return true;
2924 }
2925
hns3_nic_net_timeout(struct net_device * ndev,unsigned int txqueue)2926 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
2927 {
2928 struct hns3_nic_priv *priv = netdev_priv(ndev);
2929 struct hnae3_handle *h = priv->ae_handle;
2930
2931 if (!hns3_get_tx_timeo_queue_info(ndev))
2932 return;
2933
2934 /* request the reset, and let the hclge to determine
2935 * which reset level should be done
2936 */
2937 if (h->ae_algo->ops->reset_event)
2938 h->ae_algo->ops->reset_event(h->pdev, h);
2939 }
2940
2941 #ifdef CONFIG_RFS_ACCEL
hns3_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)2942 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2943 u16 rxq_index, u32 flow_id)
2944 {
2945 struct hnae3_handle *h = hns3_get_handle(dev);
2946 struct flow_keys fkeys;
2947
2948 if (!h->ae_algo->ops->add_arfs_entry)
2949 return -EOPNOTSUPP;
2950
2951 if (skb->encapsulation)
2952 return -EPROTONOSUPPORT;
2953
2954 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
2955 return -EPROTONOSUPPORT;
2956
2957 if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
2958 fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
2959 (fkeys.basic.ip_proto != IPPROTO_TCP &&
2960 fkeys.basic.ip_proto != IPPROTO_UDP))
2961 return -EPROTONOSUPPORT;
2962
2963 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
2964 }
2965 #endif
2966
hns3_nic_get_vf_config(struct net_device * ndev,int vf,struct ifla_vf_info * ivf)2967 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
2968 struct ifla_vf_info *ivf)
2969 {
2970 struct hnae3_handle *h = hns3_get_handle(ndev);
2971
2972 if (!h->ae_algo->ops->get_vf_config)
2973 return -EOPNOTSUPP;
2974
2975 return h->ae_algo->ops->get_vf_config(h, vf, ivf);
2976 }
2977
hns3_nic_set_vf_link_state(struct net_device * ndev,int vf,int link_state)2978 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
2979 int link_state)
2980 {
2981 struct hnae3_handle *h = hns3_get_handle(ndev);
2982
2983 if (!h->ae_algo->ops->set_vf_link_state)
2984 return -EOPNOTSUPP;
2985
2986 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
2987 }
2988
hns3_nic_set_vf_rate(struct net_device * ndev,int vf,int min_tx_rate,int max_tx_rate)2989 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
2990 int min_tx_rate, int max_tx_rate)
2991 {
2992 struct hnae3_handle *h = hns3_get_handle(ndev);
2993
2994 if (!h->ae_algo->ops->set_vf_rate)
2995 return -EOPNOTSUPP;
2996
2997 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
2998 false);
2999 }
3000
hns3_nic_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)3001 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3002 {
3003 struct hnae3_handle *h = hns3_get_handle(netdev);
3004 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
3005
3006 if (!h->ae_algo->ops->set_vf_mac)
3007 return -EOPNOTSUPP;
3008
3009 if (is_multicast_ether_addr(mac)) {
3010 hnae3_format_mac_addr(format_mac_addr, mac);
3011 netdev_err(netdev,
3012 "Invalid MAC:%s specified. Could not set MAC\n",
3013 format_mac_addr);
3014 return -EINVAL;
3015 }
3016
3017 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
3018 }
3019
3020 #define HNS3_INVALID_DSCP 0xff
3021 #define HNS3_DSCP_SHIFT 2
3022
hns3_get_skb_dscp(struct sk_buff * skb)3023 static u8 hns3_get_skb_dscp(struct sk_buff *skb)
3024 {
3025 __be16 protocol = skb->protocol;
3026 u8 dscp = HNS3_INVALID_DSCP;
3027
3028 if (protocol == htons(ETH_P_8021Q))
3029 protocol = vlan_get_protocol(skb);
3030
3031 if (protocol == htons(ETH_P_IP))
3032 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT;
3033 else if (protocol == htons(ETH_P_IPV6))
3034 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT;
3035
3036 return dscp;
3037 }
3038
hns3_nic_select_queue(struct net_device * netdev,struct sk_buff * skb,struct net_device * sb_dev)3039 static u16 hns3_nic_select_queue(struct net_device *netdev,
3040 struct sk_buff *skb,
3041 struct net_device *sb_dev)
3042 {
3043 struct hnae3_handle *h = hns3_get_handle(netdev);
3044 u8 dscp;
3045
3046 if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP ||
3047 !h->ae_algo->ops->get_dscp_prio)
3048 goto out;
3049
3050 dscp = hns3_get_skb_dscp(skb);
3051 if (unlikely(dscp >= HNAE3_MAX_DSCP))
3052 goto out;
3053
3054 skb->priority = h->kinfo.dscp_prio[dscp];
3055 if (skb->priority == HNAE3_PRIO_ID_INVALID)
3056 skb->priority = 0;
3057
3058 out:
3059 return netdev_pick_tx(netdev, skb, sb_dev);
3060 }
3061
3062 static const struct net_device_ops hns3_nic_netdev_ops = {
3063 .ndo_open = hns3_nic_net_open,
3064 .ndo_stop = hns3_nic_net_stop,
3065 .ndo_start_xmit = hns3_nic_net_xmit,
3066 .ndo_tx_timeout = hns3_nic_net_timeout,
3067 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
3068 .ndo_eth_ioctl = hns3_nic_do_ioctl,
3069 .ndo_change_mtu = hns3_nic_change_mtu,
3070 .ndo_set_features = hns3_nic_set_features,
3071 .ndo_features_check = hns3_features_check,
3072 .ndo_get_stats64 = hns3_nic_get_stats64,
3073 .ndo_setup_tc = hns3_nic_setup_tc,
3074 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
3075 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
3076 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
3077 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
3078 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
3079 .ndo_set_vf_trust = hns3_set_vf_trust,
3080 #ifdef CONFIG_RFS_ACCEL
3081 .ndo_rx_flow_steer = hns3_rx_flow_steer,
3082 #endif
3083 .ndo_get_vf_config = hns3_nic_get_vf_config,
3084 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
3085 .ndo_set_vf_rate = hns3_nic_set_vf_rate,
3086 .ndo_set_vf_mac = hns3_nic_set_vf_mac,
3087 .ndo_select_queue = hns3_nic_select_queue,
3088 .ndo_hwtstamp_get = hns3_nic_hwtstamp_get,
3089 .ndo_hwtstamp_set = hns3_nic_hwtstamp_set,
3090 };
3091
hns3_is_phys_func(struct pci_dev * pdev)3092 bool hns3_is_phys_func(struct pci_dev *pdev)
3093 {
3094 u32 dev_id = pdev->device;
3095
3096 switch (dev_id) {
3097 case HNAE3_DEV_ID_GE:
3098 case HNAE3_DEV_ID_25GE:
3099 case HNAE3_DEV_ID_25GE_RDMA:
3100 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
3101 case HNAE3_DEV_ID_50GE_RDMA:
3102 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
3103 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
3104 case HNAE3_DEV_ID_200G_RDMA:
3105 return true;
3106 case HNAE3_DEV_ID_VF:
3107 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
3108 return false;
3109 default:
3110 dev_warn(&pdev->dev, "un-recognized pci device-id %u",
3111 dev_id);
3112 }
3113
3114 return false;
3115 }
3116
hns3_disable_sriov(struct pci_dev * pdev)3117 static void hns3_disable_sriov(struct pci_dev *pdev)
3118 {
3119 /* If our VFs are assigned we cannot shut down SR-IOV
3120 * without causing issues, so just leave the hardware
3121 * available but disabled
3122 */
3123 if (pci_vfs_assigned(pdev)) {
3124 dev_warn(&pdev->dev,
3125 "disabling driver while VFs are assigned\n");
3126 return;
3127 }
3128
3129 pci_disable_sriov(pdev);
3130 }
3131
3132 /* hns3_probe - Device initialization routine
3133 * @pdev: PCI device information struct
3134 * @ent: entry in hns3_pci_tbl
3135 *
3136 * hns3_probe initializes a PF identified by a pci_dev structure.
3137 * The OS initialization, configuring of the PF private structure,
3138 * and a hardware reset occur.
3139 *
3140 * Returns 0 on success, negative on failure
3141 */
hns3_probe(struct pci_dev * pdev,const struct pci_device_id * ent)3142 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3143 {
3144 struct hnae3_ae_dev *ae_dev;
3145 int ret;
3146
3147 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
3148 if (!ae_dev)
3149 return -ENOMEM;
3150
3151 ae_dev->pdev = pdev;
3152 ae_dev->flag = ent->driver_data;
3153 pci_set_drvdata(pdev, ae_dev);
3154
3155 ret = hnae3_register_ae_dev(ae_dev);
3156 if (ret)
3157 pci_set_drvdata(pdev, NULL);
3158
3159 return ret;
3160 }
3161
3162 /**
3163 * hns3_clean_vf_config
3164 * @pdev: pointer to a pci_dev structure
3165 * @num_vfs: number of VFs allocated
3166 *
3167 * Clean residual vf config after disable sriov
3168 **/
hns3_clean_vf_config(struct pci_dev * pdev,int num_vfs)3169 static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
3170 {
3171 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3172
3173 if (ae_dev->ops->clean_vf_config)
3174 ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
3175 }
3176
3177 /* hns3_remove - Device removal routine
3178 * @pdev: PCI device information struct
3179 */
hns3_remove(struct pci_dev * pdev)3180 static void hns3_remove(struct pci_dev *pdev)
3181 {
3182 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3183
3184 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
3185 hns3_disable_sriov(pdev);
3186
3187 hnae3_unregister_ae_dev(ae_dev);
3188 pci_set_drvdata(pdev, NULL);
3189 }
3190
3191 /**
3192 * hns3_pci_sriov_configure
3193 * @pdev: pointer to a pci_dev structure
3194 * @num_vfs: number of VFs to allocate
3195 *
3196 * Enable or change the number of VFs. Called when the user updates the number
3197 * of VFs in sysfs.
3198 **/
hns3_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)3199 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
3200 {
3201 int ret;
3202
3203 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
3204 dev_warn(&pdev->dev, "Can not config SRIOV\n");
3205 return -EINVAL;
3206 }
3207
3208 if (num_vfs) {
3209 ret = pci_enable_sriov(pdev, num_vfs);
3210 if (ret)
3211 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
3212 else
3213 return num_vfs;
3214 } else if (!pci_vfs_assigned(pdev)) {
3215 int num_vfs_pre = pci_num_vf(pdev);
3216
3217 pci_disable_sriov(pdev);
3218 hns3_clean_vf_config(pdev, num_vfs_pre);
3219 } else {
3220 dev_warn(&pdev->dev,
3221 "Unable to free VFs because some are assigned to VMs.\n");
3222 }
3223
3224 return 0;
3225 }
3226
hns3_shutdown(struct pci_dev * pdev)3227 static void hns3_shutdown(struct pci_dev *pdev)
3228 {
3229 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3230
3231 hnae3_unregister_ae_dev(ae_dev);
3232 pci_set_drvdata(pdev, NULL);
3233
3234 if (system_state == SYSTEM_POWER_OFF)
3235 pci_set_power_state(pdev, PCI_D3hot);
3236 }
3237
hns3_suspend(struct device * dev)3238 static int __maybe_unused hns3_suspend(struct device *dev)
3239 {
3240 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
3241
3242 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
3243 dev_info(dev, "Begin to suspend.\n");
3244 if (ae_dev->ops && ae_dev->ops->reset_prepare)
3245 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET);
3246 }
3247
3248 return 0;
3249 }
3250
hns3_resume(struct device * dev)3251 static int __maybe_unused hns3_resume(struct device *dev)
3252 {
3253 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
3254
3255 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
3256 dev_info(dev, "Begin to resume.\n");
3257 if (ae_dev->ops && ae_dev->ops->reset_done)
3258 ae_dev->ops->reset_done(ae_dev);
3259 }
3260
3261 return 0;
3262 }
3263
hns3_error_detected(struct pci_dev * pdev,pci_channel_state_t state)3264 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
3265 pci_channel_state_t state)
3266 {
3267 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3268 pci_ers_result_t ret;
3269
3270 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state);
3271
3272 if (state == pci_channel_io_perm_failure)
3273 return PCI_ERS_RESULT_DISCONNECT;
3274
3275 if (!ae_dev || !ae_dev->ops) {
3276 dev_err(&pdev->dev,
3277 "Can't recover - error happened before device initialized\n");
3278 return PCI_ERS_RESULT_NONE;
3279 }
3280
3281 if (ae_dev->ops->handle_hw_ras_error)
3282 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
3283 else
3284 return PCI_ERS_RESULT_NONE;
3285
3286 return ret;
3287 }
3288
hns3_slot_reset(struct pci_dev * pdev)3289 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
3290 {
3291 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3292 const struct hnae3_ae_ops *ops;
3293 enum hnae3_reset_type reset_type;
3294 struct device *dev = &pdev->dev;
3295
3296 if (!ae_dev || !ae_dev->ops)
3297 return PCI_ERS_RESULT_NONE;
3298
3299 ops = ae_dev->ops;
3300 /* request the reset */
3301 if (ops->reset_event && ops->get_reset_level &&
3302 ops->set_default_reset_request) {
3303 if (ae_dev->hw_err_reset_req) {
3304 reset_type = ops->get_reset_level(ae_dev,
3305 &ae_dev->hw_err_reset_req);
3306 ops->set_default_reset_request(ae_dev, reset_type);
3307 dev_info(dev, "requesting reset due to PCI error\n");
3308 ops->reset_event(pdev, NULL);
3309 }
3310
3311 return PCI_ERS_RESULT_RECOVERED;
3312 }
3313
3314 return PCI_ERS_RESULT_DISCONNECT;
3315 }
3316
hns3_reset_prepare(struct pci_dev * pdev)3317 static void hns3_reset_prepare(struct pci_dev *pdev)
3318 {
3319 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3320
3321 dev_info(&pdev->dev, "FLR prepare\n");
3322 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare)
3323 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET);
3324 }
3325
hns3_reset_done(struct pci_dev * pdev)3326 static void hns3_reset_done(struct pci_dev *pdev)
3327 {
3328 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3329
3330 dev_info(&pdev->dev, "FLR done\n");
3331 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done)
3332 ae_dev->ops->reset_done(ae_dev);
3333 }
3334
3335 static const struct pci_error_handlers hns3_err_handler = {
3336 .error_detected = hns3_error_detected,
3337 .slot_reset = hns3_slot_reset,
3338 .reset_prepare = hns3_reset_prepare,
3339 .reset_done = hns3_reset_done,
3340 };
3341
3342 static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume);
3343
3344 static struct pci_driver hns3_driver = {
3345 .name = hns3_driver_name,
3346 .id_table = hns3_pci_tbl,
3347 .probe = hns3_probe,
3348 .remove = hns3_remove,
3349 .shutdown = hns3_shutdown,
3350 .driver.pm = &hns3_pm_ops,
3351 .sriov_configure = hns3_pci_sriov_configure,
3352 .err_handler = &hns3_err_handler,
3353 };
3354
3355 /* set default feature to hns3 */
hns3_set_default_feature(struct net_device * netdev)3356 static void hns3_set_default_feature(struct net_device *netdev)
3357 {
3358 struct hnae3_handle *h = hns3_get_handle(netdev);
3359 struct pci_dev *pdev = h->pdev;
3360 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3361
3362 netdev->priv_flags |= IFF_UNICAST_FLT;
3363
3364 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3365 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3366 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
3367 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
3368 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
3369 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
3370
3371 if (hnae3_ae_dev_gro_supported(ae_dev))
3372 netdev->features |= NETIF_F_GRO_HW;
3373
3374 if (hnae3_ae_dev_fd_supported(ae_dev))
3375 netdev->features |= NETIF_F_NTUPLE;
3376
3377 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
3378 netdev->features |= NETIF_F_GSO_UDP_L4;
3379
3380 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
3381 netdev->features |= NETIF_F_HW_CSUM;
3382 else
3383 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3384
3385 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps))
3386 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
3387
3388 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps))
3389 netdev->features |= NETIF_F_HW_TC;
3390
3391 netdev->hw_features |= netdev->features;
3392 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
3393 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3394
3395 netdev->vlan_features |= netdev->features &
3396 ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX |
3397 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE |
3398 NETIF_F_HW_TC);
3399
3400 netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
3401
3402 /* The device_version V3 hardware can't offload the checksum for IP in
3403 * GRE packets, but can do it for NvGRE. So default to disable the
3404 * checksum and GSO offload for GRE.
3405 */
3406 if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) {
3407 netdev->features &= ~NETIF_F_GSO_GRE;
3408 netdev->features &= ~NETIF_F_GSO_GRE_CSUM;
3409 }
3410 }
3411
hns3_alloc_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)3412 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
3413 struct hns3_desc_cb *cb)
3414 {
3415 unsigned int order = hns3_page_order(ring);
3416 struct page *p;
3417
3418 if (ring->page_pool) {
3419 p = page_pool_dev_alloc_frag(ring->page_pool,
3420 &cb->page_offset,
3421 hns3_buf_size(ring));
3422 if (unlikely(!p))
3423 return -ENOMEM;
3424
3425 cb->priv = p;
3426 cb->buf = page_address(p);
3427 cb->dma = page_pool_get_dma_addr(p);
3428 cb->type = DESC_TYPE_PP_FRAG;
3429 cb->reuse_flag = 0;
3430 return 0;
3431 }
3432
3433 p = dev_alloc_pages(order);
3434 if (!p)
3435 return -ENOMEM;
3436
3437 cb->priv = p;
3438 cb->page_offset = 0;
3439 cb->reuse_flag = 0;
3440 cb->buf = page_address(p);
3441 cb->length = hns3_page_size(ring);
3442 cb->type = DESC_TYPE_PAGE;
3443 page_ref_add(p, USHRT_MAX - 1);
3444 cb->pagecnt_bias = USHRT_MAX;
3445
3446 return 0;
3447 }
3448
hns3_free_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb,int budget)3449 static void hns3_free_buffer(struct hns3_enet_ring *ring,
3450 struct hns3_desc_cb *cb, int budget)
3451 {
3452 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
3453 DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
3454 napi_consume_skb(cb->priv, budget);
3455 else if (!HNAE3_IS_TX_RING(ring)) {
3456 if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias)
3457 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
3458 else if (cb->type & DESC_TYPE_PP_FRAG)
3459 page_pool_put_full_page(ring->page_pool, cb->priv,
3460 false);
3461 }
3462 memset(cb, 0, sizeof(*cb));
3463 }
3464
hns3_map_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)3465 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
3466 {
3467 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
3468 cb->length, ring_to_dma_dir(ring));
3469
3470 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
3471 return -EIO;
3472
3473 return 0;
3474 }
3475
hns3_unmap_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)3476 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
3477 struct hns3_desc_cb *cb)
3478 {
3479 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
3480 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
3481 ring_to_dma_dir(ring));
3482 else if ((cb->type & DESC_TYPE_PAGE) && cb->length)
3483 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
3484 ring_to_dma_dir(ring));
3485 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD |
3486 DESC_TYPE_SGL_SKB))
3487 hns3_tx_spare_reclaim_cb(ring, cb);
3488 }
3489
hns3_buffer_detach(struct hns3_enet_ring * ring,int i)3490 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
3491 {
3492 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
3493 ring->desc[i].addr = 0;
3494 ring->desc_cb[i].refill = 0;
3495 }
3496
hns3_free_buffer_detach(struct hns3_enet_ring * ring,int i,int budget)3497 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
3498 int budget)
3499 {
3500 struct hns3_desc_cb *cb = &ring->desc_cb[i];
3501
3502 if (!ring->desc_cb[i].dma)
3503 return;
3504
3505 hns3_buffer_detach(ring, i);
3506 hns3_free_buffer(ring, cb, budget);
3507 }
3508
hns3_free_buffers(struct hns3_enet_ring * ring)3509 static void hns3_free_buffers(struct hns3_enet_ring *ring)
3510 {
3511 int i;
3512
3513 for (i = 0; i < ring->desc_num; i++)
3514 hns3_free_buffer_detach(ring, i, 0);
3515 }
3516
3517 /* free desc along with its attached buffer */
hns3_free_desc(struct hns3_enet_ring * ring)3518 static void hns3_free_desc(struct hns3_enet_ring *ring)
3519 {
3520 int size = ring->desc_num * sizeof(ring->desc[0]);
3521
3522 hns3_free_buffers(ring);
3523
3524 if (ring->desc) {
3525 dma_free_coherent(ring_to_dev(ring), size,
3526 ring->desc, ring->desc_dma_addr);
3527 ring->desc = NULL;
3528 }
3529 }
3530
hns3_alloc_desc(struct hns3_enet_ring * ring)3531 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
3532 {
3533 int size = ring->desc_num * sizeof(ring->desc[0]);
3534
3535 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
3536 &ring->desc_dma_addr, GFP_KERNEL);
3537 if (!ring->desc)
3538 return -ENOMEM;
3539
3540 return 0;
3541 }
3542
hns3_alloc_and_map_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)3543 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
3544 struct hns3_desc_cb *cb)
3545 {
3546 int ret;
3547
3548 ret = hns3_alloc_buffer(ring, cb);
3549 if (ret || ring->page_pool)
3550 goto out;
3551
3552 ret = hns3_map_buffer(ring, cb);
3553 if (ret)
3554 goto out_with_buf;
3555
3556 return 0;
3557
3558 out_with_buf:
3559 hns3_free_buffer(ring, cb, 0);
3560 out:
3561 return ret;
3562 }
3563
hns3_alloc_and_attach_buffer(struct hns3_enet_ring * ring,int i)3564 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
3565 {
3566 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
3567
3568 if (ret)
3569 return ret;
3570
3571 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3572 ring->desc_cb[i].page_offset);
3573 ring->desc_cb[i].refill = 1;
3574
3575 return 0;
3576 }
3577
3578 /* Allocate memory for raw pkg, and map with dma */
hns3_alloc_ring_buffers(struct hns3_enet_ring * ring)3579 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
3580 {
3581 int i, j, ret;
3582
3583 for (i = 0; i < ring->desc_num; i++) {
3584 ret = hns3_alloc_and_attach_buffer(ring, i);
3585 if (ret)
3586 goto out_buffer_fail;
3587
3588 if (!(i % HNS3_RESCHED_BD_NUM))
3589 cond_resched();
3590 }
3591
3592 return 0;
3593
3594 out_buffer_fail:
3595 for (j = i - 1; j >= 0; j--)
3596 hns3_free_buffer_detach(ring, j, 0);
3597 return ret;
3598 }
3599
3600 /* detach a in-used buffer and replace with a reserved one */
hns3_replace_buffer(struct hns3_enet_ring * ring,int i,struct hns3_desc_cb * res_cb)3601 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
3602 struct hns3_desc_cb *res_cb)
3603 {
3604 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
3605 ring->desc_cb[i] = *res_cb;
3606 ring->desc_cb[i].refill = 1;
3607 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3608 ring->desc_cb[i].page_offset);
3609 ring->desc[i].rx.bd_base_info = 0;
3610 }
3611
hns3_reuse_buffer(struct hns3_enet_ring * ring,int i)3612 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
3613 {
3614 ring->desc_cb[i].reuse_flag = 0;
3615 ring->desc_cb[i].refill = 1;
3616 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3617 ring->desc_cb[i].page_offset);
3618 ring->desc[i].rx.bd_base_info = 0;
3619
3620 dma_sync_single_for_device(ring_to_dev(ring),
3621 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
3622 hns3_buf_size(ring),
3623 DMA_FROM_DEVICE);
3624 }
3625
hns3_nic_reclaim_desc(struct hns3_enet_ring * ring,int * bytes,int * pkts,int budget)3626 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
3627 int *bytes, int *pkts, int budget)
3628 {
3629 /* This smp_load_acquire() pairs with smp_store_release() in
3630 * hns3_tx_doorbell().
3631 */
3632 int ltu = smp_load_acquire(&ring->last_to_use);
3633 int ntc = ring->next_to_clean;
3634 struct hns3_desc_cb *desc_cb;
3635 bool reclaimed = false;
3636 struct hns3_desc *desc;
3637
3638 while (ltu != ntc) {
3639 desc = &ring->desc[ntc];
3640
3641 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
3642 BIT(HNS3_TXD_VLD_B))
3643 break;
3644
3645 desc_cb = &ring->desc_cb[ntc];
3646
3647 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL |
3648 DESC_TYPE_BOUNCE_HEAD |
3649 DESC_TYPE_SGL_SKB)) {
3650 (*pkts)++;
3651 (*bytes) += desc_cb->send_bytes;
3652 }
3653
3654 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
3655 hns3_free_buffer_detach(ring, ntc, budget);
3656
3657 if (++ntc == ring->desc_num)
3658 ntc = 0;
3659
3660 /* Issue prefetch for next Tx descriptor */
3661 prefetch(&ring->desc_cb[ntc]);
3662 reclaimed = true;
3663 }
3664
3665 if (unlikely(!reclaimed))
3666 return false;
3667
3668 /* This smp_store_release() pairs with smp_load_acquire() in
3669 * ring_space called by hns3_nic_net_xmit.
3670 */
3671 smp_store_release(&ring->next_to_clean, ntc);
3672
3673 hns3_tx_spare_update(ring);
3674
3675 return true;
3676 }
3677
hns3_clean_tx_ring(struct hns3_enet_ring * ring,int budget)3678 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
3679 {
3680 struct net_device *netdev = ring_to_netdev(ring);
3681 struct hns3_nic_priv *priv = netdev_priv(netdev);
3682 struct netdev_queue *dev_queue;
3683 int bytes, pkts;
3684
3685 bytes = 0;
3686 pkts = 0;
3687
3688 if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
3689 return;
3690
3691 ring->tqp_vector->tx_group.total_bytes += bytes;
3692 ring->tqp_vector->tx_group.total_packets += pkts;
3693
3694 u64_stats_update_begin(&ring->syncp);
3695 ring->stats.tx_bytes += bytes;
3696 ring->stats.tx_pkts += pkts;
3697 u64_stats_update_end(&ring->syncp);
3698
3699 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
3700 netdev_tx_completed_queue(dev_queue, pkts, bytes);
3701
3702 if (unlikely(netif_carrier_ok(netdev) &&
3703 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
3704 /* Make sure that anybody stopping the queue after this
3705 * sees the new next_to_clean.
3706 */
3707 smp_mb();
3708 if (netif_tx_queue_stopped(dev_queue) &&
3709 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
3710 netif_tx_wake_queue(dev_queue);
3711 ring->stats.restart_queue++;
3712 }
3713 }
3714 }
3715
hns3_desc_unused(struct hns3_enet_ring * ring)3716 static int hns3_desc_unused(struct hns3_enet_ring *ring)
3717 {
3718 int ntc = ring->next_to_clean;
3719 int ntu = ring->next_to_use;
3720
3721 if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
3722 return ring->desc_num;
3723
3724 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
3725 }
3726
3727 /* Return true if there is any allocation failure */
hns3_nic_alloc_rx_buffers(struct hns3_enet_ring * ring,int cleand_count)3728 static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
3729 int cleand_count)
3730 {
3731 struct hns3_desc_cb *desc_cb;
3732 struct hns3_desc_cb res_cbs;
3733 int i, ret;
3734
3735 for (i = 0; i < cleand_count; i++) {
3736 desc_cb = &ring->desc_cb[ring->next_to_use];
3737 if (desc_cb->reuse_flag) {
3738 hns3_ring_stats_update(ring, reuse_pg_cnt);
3739
3740 hns3_reuse_buffer(ring, ring->next_to_use);
3741 } else {
3742 ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
3743 if (ret) {
3744 hns3_ring_stats_update(ring, sw_err_cnt);
3745
3746 hns3_rl_err(ring_to_netdev(ring),
3747 "alloc rx buffer failed: %d\n",
3748 ret);
3749
3750 writel(i, ring->tqp->io_base +
3751 HNS3_RING_RX_RING_HEAD_REG);
3752 return true;
3753 }
3754 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
3755
3756 hns3_ring_stats_update(ring, non_reuse_pg);
3757 }
3758
3759 ring_ptr_move_fw(ring, next_to_use);
3760 }
3761
3762 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
3763 return false;
3764 }
3765
hns3_can_reuse_page(struct hns3_desc_cb * cb)3766 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
3767 {
3768 return page_count(cb->priv) == cb->pagecnt_bias;
3769 }
3770
hns3_handle_rx_copybreak(struct sk_buff * skb,int i,struct hns3_enet_ring * ring,int pull_len,struct hns3_desc_cb * desc_cb)3771 static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
3772 struct hns3_enet_ring *ring,
3773 int pull_len,
3774 struct hns3_desc_cb *desc_cb)
3775 {
3776 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
3777 u32 frag_offset = desc_cb->page_offset + pull_len;
3778 int size = le16_to_cpu(desc->rx.size);
3779 u32 frag_size = size - pull_len;
3780 void *frag = napi_alloc_frag(frag_size);
3781
3782 if (unlikely(!frag)) {
3783 hns3_ring_stats_update(ring, frag_alloc_err);
3784
3785 hns3_rl_err(ring_to_netdev(ring),
3786 "failed to allocate rx frag\n");
3787 return -ENOMEM;
3788 }
3789
3790 desc_cb->reuse_flag = 1;
3791 memcpy(frag, desc_cb->buf + frag_offset, frag_size);
3792 skb_add_rx_frag(skb, i, virt_to_page(frag),
3793 offset_in_page(frag), frag_size, frag_size);
3794
3795 hns3_ring_stats_update(ring, frag_alloc);
3796 return 0;
3797 }
3798
hns3_nic_reuse_page(struct sk_buff * skb,int i,struct hns3_enet_ring * ring,int pull_len,struct hns3_desc_cb * desc_cb)3799 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
3800 struct hns3_enet_ring *ring, int pull_len,
3801 struct hns3_desc_cb *desc_cb)
3802 {
3803 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
3804 u32 frag_offset = desc_cb->page_offset + pull_len;
3805 int size = le16_to_cpu(desc->rx.size);
3806 u32 truesize = hns3_buf_size(ring);
3807 u32 frag_size = size - pull_len;
3808 int ret = 0;
3809 bool reused;
3810
3811 if (ring->page_pool) {
3812 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
3813 frag_size, truesize);
3814 return;
3815 }
3816
3817 /* Avoid re-using remote or pfmem page */
3818 if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
3819 goto out;
3820
3821 reused = hns3_can_reuse_page(desc_cb);
3822
3823 /* Rx page can be reused when:
3824 * 1. Rx page is only owned by the driver when page_offset
3825 * is zero, which means 0 @ truesize will be used by
3826 * stack after skb_add_rx_frag() is called, and the rest
3827 * of rx page can be reused by driver.
3828 * Or
3829 * 2. Rx page is only owned by the driver when page_offset
3830 * is non-zero, which means page_offset @ truesize will
3831 * be used by stack after skb_add_rx_frag() is called,
3832 * and 0 @ truesize can be reused by driver.
3833 */
3834 if ((!desc_cb->page_offset && reused) ||
3835 ((desc_cb->page_offset + truesize + truesize) <=
3836 hns3_page_size(ring) && desc_cb->page_offset)) {
3837 desc_cb->page_offset += truesize;
3838 desc_cb->reuse_flag = 1;
3839 } else if (desc_cb->page_offset && reused) {
3840 desc_cb->page_offset = 0;
3841 desc_cb->reuse_flag = 1;
3842 } else if (frag_size <= ring->rx_copybreak) {
3843 ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
3844 if (!ret)
3845 return;
3846 }
3847
3848 out:
3849 desc_cb->pagecnt_bias--;
3850
3851 if (unlikely(!desc_cb->pagecnt_bias)) {
3852 page_ref_add(desc_cb->priv, USHRT_MAX);
3853 desc_cb->pagecnt_bias = USHRT_MAX;
3854 }
3855
3856 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
3857 frag_size, truesize);
3858
3859 if (unlikely(!desc_cb->reuse_flag))
3860 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
3861 }
3862
hns3_gro_complete(struct sk_buff * skb,u32 l234info)3863 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
3864 {
3865 __be16 type = skb->protocol;
3866 struct tcphdr *th;
3867 u32 depth = 0;
3868
3869 while (eth_type_vlan(type)) {
3870 struct vlan_hdr *vh;
3871
3872 if ((depth + VLAN_HLEN) > skb_headlen(skb))
3873 return -EFAULT;
3874
3875 vh = (struct vlan_hdr *)(skb->data + depth);
3876 type = vh->h_vlan_encapsulated_proto;
3877 depth += VLAN_HLEN;
3878 }
3879
3880 skb_set_network_header(skb, depth);
3881
3882 if (type == htons(ETH_P_IP)) {
3883 const struct iphdr *iph = ip_hdr(skb);
3884
3885 depth += sizeof(struct iphdr);
3886 skb_set_transport_header(skb, depth);
3887 th = tcp_hdr(skb);
3888 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
3889 iph->daddr, 0);
3890 } else if (type == htons(ETH_P_IPV6)) {
3891 const struct ipv6hdr *iph = ipv6_hdr(skb);
3892
3893 depth += sizeof(struct ipv6hdr);
3894 skb_set_transport_header(skb, depth);
3895 th = tcp_hdr(skb);
3896 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
3897 &iph->daddr, 0);
3898 } else {
3899 hns3_rl_err(skb->dev,
3900 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
3901 be16_to_cpu(type), depth);
3902 return -EFAULT;
3903 }
3904
3905 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
3906 if (th->cwr)
3907 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
3908
3909 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
3910 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
3911
3912 skb->csum_start = (unsigned char *)th - skb->head;
3913 skb->csum_offset = offsetof(struct tcphdr, check);
3914 skb->ip_summed = CHECKSUM_PARTIAL;
3915
3916 trace_hns3_gro(skb);
3917
3918 return 0;
3919 }
3920
hns3_checksum_complete(struct hns3_enet_ring * ring,struct sk_buff * skb,u32 ptype,u16 csum)3921 static void hns3_checksum_complete(struct hns3_enet_ring *ring,
3922 struct sk_buff *skb, u32 ptype, u16 csum)
3923 {
3924 if (ptype == HNS3_INVALID_PTYPE ||
3925 hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
3926 return;
3927
3928 hns3_ring_stats_update(ring, csum_complete);
3929 skb->ip_summed = CHECKSUM_COMPLETE;
3930 skb->csum = csum_unfold((__force __sum16)csum);
3931 }
3932
hns3_rx_handle_csum(struct sk_buff * skb,u32 l234info,u32 ol_info,u32 ptype)3933 static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
3934 u32 ol_info, u32 ptype)
3935 {
3936 int l3_type, l4_type;
3937 int ol4_type;
3938
3939 if (ptype != HNS3_INVALID_PTYPE) {
3940 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level;
3941 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed;
3942
3943 return;
3944 }
3945
3946 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
3947 HNS3_RXD_OL4ID_S);
3948 switch (ol4_type) {
3949 case HNS3_OL4_TYPE_MAC_IN_UDP:
3950 case HNS3_OL4_TYPE_NVGRE:
3951 skb->csum_level = 1;
3952 fallthrough;
3953 case HNS3_OL4_TYPE_NO_TUN:
3954 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
3955 HNS3_RXD_L3ID_S);
3956 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
3957 HNS3_RXD_L4ID_S);
3958 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
3959 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
3960 l3_type == HNS3_L3_TYPE_IPV6) &&
3961 (l4_type == HNS3_L4_TYPE_UDP ||
3962 l4_type == HNS3_L4_TYPE_TCP ||
3963 l4_type == HNS3_L4_TYPE_SCTP))
3964 skb->ip_summed = CHECKSUM_UNNECESSARY;
3965 break;
3966 default:
3967 break;
3968 }
3969 }
3970
hns3_rx_checksum(struct hns3_enet_ring * ring,struct sk_buff * skb,u32 l234info,u32 bd_base_info,u32 ol_info,u16 csum)3971 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
3972 u32 l234info, u32 bd_base_info, u32 ol_info,
3973 u16 csum)
3974 {
3975 struct net_device *netdev = ring_to_netdev(ring);
3976 struct hns3_nic_priv *priv = netdev_priv(netdev);
3977 u32 ptype = HNS3_INVALID_PTYPE;
3978
3979 skb->ip_summed = CHECKSUM_NONE;
3980
3981 skb_checksum_none_assert(skb);
3982
3983 if (!(netdev->features & NETIF_F_RXCSUM))
3984 return;
3985
3986 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state))
3987 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
3988 HNS3_RXD_PTYPE_S);
3989
3990 hns3_checksum_complete(ring, skb, ptype, csum);
3991
3992 /* check if hardware has done checksum */
3993 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
3994 return;
3995
3996 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
3997 BIT(HNS3_RXD_OL3E_B) |
3998 BIT(HNS3_RXD_OL4E_B)))) {
3999 skb->ip_summed = CHECKSUM_NONE;
4000 hns3_ring_stats_update(ring, l3l4_csum_err);
4001
4002 return;
4003 }
4004
4005 hns3_rx_handle_csum(skb, l234info, ol_info, ptype);
4006 }
4007
hns3_rx_skb(struct hns3_enet_ring * ring,struct sk_buff * skb)4008 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
4009 {
4010 if (skb_has_frag_list(skb))
4011 napi_gro_flush(&ring->tqp_vector->napi, false);
4012
4013 napi_gro_receive(&ring->tqp_vector->napi, skb);
4014 }
4015
hns3_parse_vlan_tag(struct hns3_enet_ring * ring,struct hns3_desc * desc,u32 l234info,u16 * vlan_tag)4016 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
4017 struct hns3_desc *desc, u32 l234info,
4018 u16 *vlan_tag)
4019 {
4020 struct hnae3_handle *handle = ring->tqp->handle;
4021 struct pci_dev *pdev = ring->tqp->handle->pdev;
4022 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4023
4024 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
4025 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
4026 if (!(*vlan_tag & VLAN_VID_MASK))
4027 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
4028
4029 return (*vlan_tag != 0);
4030 }
4031
4032 #define HNS3_STRP_OUTER_VLAN 0x1
4033 #define HNS3_STRP_INNER_VLAN 0x2
4034 #define HNS3_STRP_BOTH 0x3
4035
4036 /* Hardware always insert VLAN tag into RX descriptor when
4037 * remove the tag from packet, driver needs to determine
4038 * reporting which tag to stack.
4039 */
4040 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
4041 HNS3_RXD_STRP_TAGP_S)) {
4042 case HNS3_STRP_OUTER_VLAN:
4043 if (handle->port_base_vlan_state !=
4044 HNAE3_PORT_BASE_VLAN_DISABLE)
4045 return false;
4046
4047 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
4048 return true;
4049 case HNS3_STRP_INNER_VLAN:
4050 if (handle->port_base_vlan_state !=
4051 HNAE3_PORT_BASE_VLAN_DISABLE)
4052 return false;
4053
4054 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
4055 return true;
4056 case HNS3_STRP_BOTH:
4057 if (handle->port_base_vlan_state ==
4058 HNAE3_PORT_BASE_VLAN_DISABLE)
4059 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
4060 else
4061 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
4062
4063 return true;
4064 default:
4065 return false;
4066 }
4067 }
4068
hns3_rx_ring_move_fw(struct hns3_enet_ring * ring)4069 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
4070 {
4071 ring->desc[ring->next_to_clean].rx.bd_base_info &=
4072 cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
4073 ring->desc_cb[ring->next_to_clean].refill = 0;
4074 ring->next_to_clean += 1;
4075
4076 if (unlikely(ring->next_to_clean == ring->desc_num))
4077 ring->next_to_clean = 0;
4078 }
4079
hns3_alloc_skb(struct hns3_enet_ring * ring,unsigned int length,unsigned char * va)4080 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
4081 unsigned char *va)
4082 {
4083 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
4084 struct net_device *netdev = ring_to_netdev(ring);
4085 struct sk_buff *skb;
4086
4087 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
4088 skb = ring->skb;
4089 if (unlikely(!skb)) {
4090 hns3_rl_err(netdev, "alloc rx skb fail\n");
4091 hns3_ring_stats_update(ring, sw_err_cnt);
4092
4093 return -ENOMEM;
4094 }
4095
4096 trace_hns3_rx_desc(ring);
4097 prefetchw(skb->data);
4098
4099 ring->pending_buf = 1;
4100 ring->frag_num = 0;
4101 ring->tail_skb = NULL;
4102 if (length <= HNS3_RX_HEAD_SIZE) {
4103 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
4104
4105 /* We can reuse buffer as-is, just make sure it is reusable */
4106 if (dev_page_is_reusable(desc_cb->priv))
4107 desc_cb->reuse_flag = 1;
4108 else if (desc_cb->type & DESC_TYPE_PP_FRAG)
4109 page_pool_put_full_page(ring->page_pool, desc_cb->priv,
4110 false);
4111 else /* This page cannot be reused so discard it */
4112 __page_frag_cache_drain(desc_cb->priv,
4113 desc_cb->pagecnt_bias);
4114
4115 hns3_rx_ring_move_fw(ring);
4116 return 0;
4117 }
4118
4119 if (ring->page_pool)
4120 skb_mark_for_recycle(skb);
4121
4122 hns3_ring_stats_update(ring, seg_pkt_cnt);
4123
4124 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
4125 __skb_put(skb, ring->pull_len);
4126 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
4127 desc_cb);
4128 hns3_rx_ring_move_fw(ring);
4129
4130 return 0;
4131 }
4132
hns3_add_frag(struct hns3_enet_ring * ring)4133 static int hns3_add_frag(struct hns3_enet_ring *ring)
4134 {
4135 struct sk_buff *skb = ring->skb;
4136 struct sk_buff *head_skb = skb;
4137 struct sk_buff *new_skb;
4138 struct hns3_desc_cb *desc_cb;
4139 struct hns3_desc *desc;
4140 u32 bd_base_info;
4141
4142 do {
4143 desc = &ring->desc[ring->next_to_clean];
4144 desc_cb = &ring->desc_cb[ring->next_to_clean];
4145 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4146 /* make sure HW write desc complete */
4147 dma_rmb();
4148 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
4149 return -ENXIO;
4150
4151 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
4152 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
4153 if (unlikely(!new_skb)) {
4154 hns3_rl_err(ring_to_netdev(ring),
4155 "alloc rx fraglist skb fail\n");
4156 return -ENXIO;
4157 }
4158
4159 if (ring->page_pool)
4160 skb_mark_for_recycle(new_skb);
4161
4162 ring->frag_num = 0;
4163
4164 if (ring->tail_skb) {
4165 ring->tail_skb->next = new_skb;
4166 ring->tail_skb = new_skb;
4167 } else {
4168 skb_shinfo(skb)->frag_list = new_skb;
4169 ring->tail_skb = new_skb;
4170 }
4171 }
4172
4173 if (ring->tail_skb) {
4174 head_skb->truesize += hns3_buf_size(ring);
4175 head_skb->data_len += le16_to_cpu(desc->rx.size);
4176 head_skb->len += le16_to_cpu(desc->rx.size);
4177 skb = ring->tail_skb;
4178 }
4179
4180 dma_sync_single_for_cpu(ring_to_dev(ring),
4181 desc_cb->dma + desc_cb->page_offset,
4182 hns3_buf_size(ring),
4183 DMA_FROM_DEVICE);
4184
4185 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
4186 trace_hns3_rx_desc(ring);
4187 hns3_rx_ring_move_fw(ring);
4188 ring->pending_buf++;
4189 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
4190
4191 return 0;
4192 }
4193
hns3_set_gro_and_checksum(struct hns3_enet_ring * ring,struct sk_buff * skb,u32 l234info,u32 bd_base_info,u32 ol_info,u16 csum)4194 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
4195 struct sk_buff *skb, u32 l234info,
4196 u32 bd_base_info, u32 ol_info, u16 csum)
4197 {
4198 struct net_device *netdev = ring_to_netdev(ring);
4199 struct hns3_nic_priv *priv = netdev_priv(netdev);
4200 u32 l3_type;
4201
4202 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
4203 HNS3_RXD_GRO_SIZE_M,
4204 HNS3_RXD_GRO_SIZE_S);
4205 /* if there is no HW GRO, do not set gro params */
4206 if (!skb_shinfo(skb)->gso_size) {
4207 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info,
4208 csum);
4209 return 0;
4210 }
4211
4212 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
4213 HNS3_RXD_GRO_COUNT_M,
4214 HNS3_RXD_GRO_COUNT_S);
4215
4216 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
4217 u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
4218 HNS3_RXD_PTYPE_S);
4219
4220 l3_type = hns3_rx_ptype_tbl[ptype].l3_type;
4221 } else {
4222 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
4223 HNS3_RXD_L3ID_S);
4224 }
4225
4226 if (l3_type == HNS3_L3_TYPE_IPV4)
4227 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
4228 else if (l3_type == HNS3_L3_TYPE_IPV6)
4229 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
4230 else
4231 return -EFAULT;
4232
4233 return hns3_gro_complete(skb, l234info);
4234 }
4235
hns3_set_rx_skb_rss_type(struct hns3_enet_ring * ring,struct sk_buff * skb,u32 rss_hash,u32 l234info,u32 ol_info)4236 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
4237 struct sk_buff *skb, u32 rss_hash,
4238 u32 l234info, u32 ol_info)
4239 {
4240 enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE;
4241 struct net_device *netdev = ring_to_netdev(ring);
4242 struct hns3_nic_priv *priv = netdev_priv(netdev);
4243
4244 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
4245 u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
4246 HNS3_RXD_PTYPE_S);
4247
4248 rss_type = hns3_rx_ptype_tbl[ptype].hash_type;
4249 } else {
4250 int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
4251 HNS3_RXD_L3ID_S);
4252 int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
4253 HNS3_RXD_L4ID_S);
4254
4255 if (l3_type == HNS3_L3_TYPE_IPV4 ||
4256 l3_type == HNS3_L3_TYPE_IPV6) {
4257 if (l4_type == HNS3_L4_TYPE_UDP ||
4258 l4_type == HNS3_L4_TYPE_TCP ||
4259 l4_type == HNS3_L4_TYPE_SCTP)
4260 rss_type = PKT_HASH_TYPE_L4;
4261 else if (l4_type == HNS3_L4_TYPE_IGMP ||
4262 l4_type == HNS3_L4_TYPE_ICMP)
4263 rss_type = PKT_HASH_TYPE_L3;
4264 }
4265 }
4266
4267 skb_set_hash(skb, rss_hash, rss_type);
4268 }
4269
hns3_handle_rx_ts_info(struct net_device * netdev,struct hns3_desc * desc,struct sk_buff * skb,u32 bd_base_info)4270 static void hns3_handle_rx_ts_info(struct net_device *netdev,
4271 struct hns3_desc *desc, struct sk_buff *skb,
4272 u32 bd_base_info)
4273 {
4274 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
4275 struct hnae3_handle *h = hns3_get_handle(netdev);
4276 u32 nsec = le32_to_cpu(desc->ts_nsec);
4277 u32 sec = le32_to_cpu(desc->ts_sec);
4278
4279 if (h->ae_algo->ops->get_rx_hwts)
4280 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
4281 }
4282 }
4283
hns3_handle_rx_vlan_tag(struct hns3_enet_ring * ring,struct hns3_desc * desc,struct sk_buff * skb,u32 l234info)4284 static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring,
4285 struct hns3_desc *desc, struct sk_buff *skb,
4286 u32 l234info)
4287 {
4288 struct net_device *netdev = ring_to_netdev(ring);
4289
4290 /* Based on hw strategy, the tag offloaded will be stored at
4291 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
4292 * in one layer tag case.
4293 */
4294 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
4295 u16 vlan_tag;
4296
4297 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
4298 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
4299 vlan_tag);
4300 }
4301 }
4302
hns3_handle_bdinfo(struct hns3_enet_ring * ring,struct sk_buff * skb)4303 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
4304 {
4305 struct net_device *netdev = ring_to_netdev(ring);
4306 enum hns3_pkt_l2t_type l2_frame_type;
4307 u32 bd_base_info, l234info, ol_info;
4308 struct hns3_desc *desc;
4309 unsigned int len;
4310 int pre_ntc, ret;
4311 u16 csum;
4312
4313 /* bdinfo handled below is only valid on the last BD of the
4314 * current packet, and ring->next_to_clean indicates the first
4315 * descriptor of next packet, so need - 1 below.
4316 */
4317 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
4318 (ring->desc_num - 1);
4319 desc = &ring->desc[pre_ntc];
4320 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4321 l234info = le32_to_cpu(desc->rx.l234_info);
4322 ol_info = le32_to_cpu(desc->rx.ol_info);
4323 csum = le16_to_cpu(desc->csum);
4324
4325 hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info);
4326
4327 hns3_handle_rx_vlan_tag(ring, desc, skb, l234info);
4328
4329 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
4330 BIT(HNS3_RXD_L2E_B))))) {
4331 u64_stats_update_begin(&ring->syncp);
4332 if (l234info & BIT(HNS3_RXD_L2E_B))
4333 ring->stats.l2_err++;
4334 else
4335 ring->stats.err_pkt_len++;
4336 u64_stats_update_end(&ring->syncp);
4337
4338 return -EFAULT;
4339 }
4340
4341 len = skb->len;
4342
4343 /* Do update ip stack process */
4344 skb->protocol = eth_type_trans(skb, netdev);
4345
4346 /* This is needed in order to enable forwarding support */
4347 ret = hns3_set_gro_and_checksum(ring, skb, l234info,
4348 bd_base_info, ol_info, csum);
4349 if (unlikely(ret)) {
4350 hns3_ring_stats_update(ring, rx_err_cnt);
4351 return ret;
4352 }
4353
4354 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
4355 HNS3_RXD_DMAC_S);
4356
4357 u64_stats_update_begin(&ring->syncp);
4358 ring->stats.rx_pkts++;
4359 ring->stats.rx_bytes += len;
4360
4361 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
4362 ring->stats.rx_multicast++;
4363
4364 u64_stats_update_end(&ring->syncp);
4365
4366 ring->tqp_vector->rx_group.total_bytes += len;
4367
4368 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash),
4369 l234info, ol_info);
4370 return 0;
4371 }
4372
hns3_handle_rx_bd(struct hns3_enet_ring * ring)4373 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
4374 {
4375 struct sk_buff *skb = ring->skb;
4376 struct hns3_desc_cb *desc_cb;
4377 struct hns3_desc *desc;
4378 unsigned int length;
4379 u32 bd_base_info;
4380 int ret;
4381
4382 desc = &ring->desc[ring->next_to_clean];
4383 desc_cb = &ring->desc_cb[ring->next_to_clean];
4384
4385 prefetch(desc);
4386
4387 if (!skb) {
4388 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4389 /* Check valid BD */
4390 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
4391 return -ENXIO;
4392
4393 dma_rmb();
4394 length = le16_to_cpu(desc->rx.size);
4395
4396 ring->va = desc_cb->buf + desc_cb->page_offset;
4397
4398 dma_sync_single_for_cpu(ring_to_dev(ring),
4399 desc_cb->dma + desc_cb->page_offset,
4400 hns3_buf_size(ring),
4401 DMA_FROM_DEVICE);
4402
4403 /* Prefetch first cache line of first page.
4404 * Idea is to cache few bytes of the header of the packet.
4405 * Our L1 Cache line size is 64B so need to prefetch twice to make
4406 * it 128B. But in actual we can have greater size of caches with
4407 * 128B Level 1 cache lines. In such a case, single fetch would
4408 * suffice to cache in the relevant part of the header.
4409 */
4410 net_prefetch(ring->va);
4411
4412 ret = hns3_alloc_skb(ring, length, ring->va);
4413 skb = ring->skb;
4414
4415 if (ret < 0) /* alloc buffer fail */
4416 return ret;
4417 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
4418 ret = hns3_add_frag(ring);
4419 if (ret)
4420 return ret;
4421 }
4422 } else {
4423 ret = hns3_add_frag(ring);
4424 if (ret)
4425 return ret;
4426 }
4427
4428 /* As the head data may be changed when GRO enable, copy
4429 * the head data in after other data rx completed
4430 */
4431 if (skb->len > HNS3_RX_HEAD_SIZE)
4432 memcpy(skb->data, ring->va,
4433 ALIGN(ring->pull_len, sizeof(long)));
4434
4435 ret = hns3_handle_bdinfo(ring, skb);
4436 if (unlikely(ret)) {
4437 dev_kfree_skb_any(skb);
4438 return ret;
4439 }
4440
4441 skb_record_rx_queue(skb, ring->tqp->tqp_index);
4442 return 0;
4443 }
4444
hns3_clean_rx_ring(struct hns3_enet_ring * ring,int budget,void (* rx_fn)(struct hns3_enet_ring *,struct sk_buff *))4445 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
4446 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
4447 {
4448 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
4449 int unused_count = hns3_desc_unused(ring);
4450 bool failure = false;
4451 int recv_pkts = 0;
4452 int err;
4453
4454 unused_count -= ring->pending_buf;
4455
4456 while (recv_pkts < budget) {
4457 /* Reuse or realloc buffers */
4458 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
4459 failure = failure ||
4460 hns3_nic_alloc_rx_buffers(ring, unused_count);
4461 unused_count = 0;
4462 }
4463
4464 /* Poll one pkt */
4465 err = hns3_handle_rx_bd(ring);
4466 /* Do not get FE for the packet or failed to alloc skb */
4467 if (unlikely(!ring->skb || err == -ENXIO)) {
4468 goto out;
4469 } else if (likely(!err)) {
4470 rx_fn(ring, ring->skb);
4471 recv_pkts++;
4472 }
4473
4474 unused_count += ring->pending_buf;
4475 ring->skb = NULL;
4476 ring->pending_buf = 0;
4477 }
4478
4479 out:
4480 /* sync head pointer before exiting, since hardware will calculate
4481 * FBD number with head pointer
4482 */
4483 if (unused_count > 0)
4484 failure = failure ||
4485 hns3_nic_alloc_rx_buffers(ring, unused_count);
4486
4487 return failure ? budget : recv_pkts;
4488 }
4489
hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector * tqp_vector)4490 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
4491 {
4492 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
4493 struct dim_sample sample = {};
4494
4495 if (!rx_group->coal.adapt_enable)
4496 return;
4497
4498 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
4499 rx_group->total_bytes, &sample);
4500 net_dim(&rx_group->dim, &sample);
4501 }
4502
hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector * tqp_vector)4503 static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
4504 {
4505 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
4506 struct dim_sample sample = {};
4507
4508 if (!tx_group->coal.adapt_enable)
4509 return;
4510
4511 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
4512 tx_group->total_bytes, &sample);
4513 net_dim(&tx_group->dim, &sample);
4514 }
4515
hns3_nic_common_poll(struct napi_struct * napi,int budget)4516 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
4517 {
4518 struct hns3_nic_priv *priv = netdev_priv(napi->dev);
4519 struct hns3_enet_ring *ring;
4520 int rx_pkt_total = 0;
4521
4522 struct hns3_enet_tqp_vector *tqp_vector =
4523 container_of(napi, struct hns3_enet_tqp_vector, napi);
4524 bool clean_complete = true;
4525 int rx_budget = budget;
4526
4527 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
4528 napi_complete(napi);
4529 return 0;
4530 }
4531
4532 /* Since the actual Tx work is minimal, we can give the Tx a larger
4533 * budget and be more aggressive about cleaning up the Tx descriptors.
4534 */
4535 hns3_for_each_ring(ring, tqp_vector->tx_group)
4536 hns3_clean_tx_ring(ring, budget);
4537
4538 /* make sure rx ring budget not smaller than 1 */
4539 if (tqp_vector->num_tqps > 1)
4540 rx_budget = max(budget / tqp_vector->num_tqps, 1);
4541
4542 hns3_for_each_ring(ring, tqp_vector->rx_group) {
4543 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
4544 hns3_rx_skb);
4545 if (rx_cleaned >= rx_budget)
4546 clean_complete = false;
4547
4548 rx_pkt_total += rx_cleaned;
4549 }
4550
4551 tqp_vector->rx_group.total_packets += rx_pkt_total;
4552
4553 if (!clean_complete)
4554 return budget;
4555
4556 if (napi_complete(napi) &&
4557 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
4558 hns3_update_rx_int_coalesce(tqp_vector);
4559 hns3_update_tx_int_coalesce(tqp_vector);
4560
4561 hns3_mask_vector_irq(tqp_vector, 1);
4562 }
4563
4564 return rx_pkt_total;
4565 }
4566
hns3_create_ring_chain(struct hns3_enet_tqp_vector * tqp_vector,struct hnae3_ring_chain_node ** head,bool is_tx)4567 static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
4568 struct hnae3_ring_chain_node **head,
4569 bool is_tx)
4570 {
4571 u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
4572 u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
4573 struct hnae3_ring_chain_node *cur_chain = *head;
4574 struct pci_dev *pdev = tqp_vector->handle->pdev;
4575 struct hnae3_ring_chain_node *chain;
4576 struct hns3_enet_ring *ring;
4577
4578 ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
4579
4580 if (cur_chain) {
4581 while (cur_chain->next)
4582 cur_chain = cur_chain->next;
4583 }
4584
4585 while (ring) {
4586 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
4587 if (!chain)
4588 return -ENOMEM;
4589 if (cur_chain)
4590 cur_chain->next = chain;
4591 else
4592 *head = chain;
4593 chain->tqp_index = ring->tqp->tqp_index;
4594 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
4595 bit_value);
4596 hnae3_set_field(chain->int_gl_idx,
4597 HNAE3_RING_GL_IDX_M,
4598 HNAE3_RING_GL_IDX_S, field_value);
4599
4600 cur_chain = chain;
4601
4602 ring = ring->next;
4603 }
4604
4605 return 0;
4606 }
4607
4608 static struct hnae3_ring_chain_node *
hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector * tqp_vector)4609 hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
4610 {
4611 struct pci_dev *pdev = tqp_vector->handle->pdev;
4612 struct hnae3_ring_chain_node *cur_chain = NULL;
4613 struct hnae3_ring_chain_node *chain;
4614
4615 if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
4616 goto err_free_chain;
4617
4618 if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
4619 goto err_free_chain;
4620
4621 return cur_chain;
4622
4623 err_free_chain:
4624 while (cur_chain) {
4625 chain = cur_chain->next;
4626 devm_kfree(&pdev->dev, cur_chain);
4627 cur_chain = chain;
4628 }
4629
4630 return NULL;
4631 }
4632
hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector * tqp_vector,struct hnae3_ring_chain_node * head)4633 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
4634 struct hnae3_ring_chain_node *head)
4635 {
4636 struct pci_dev *pdev = tqp_vector->handle->pdev;
4637 struct hnae3_ring_chain_node *chain_tmp, *chain;
4638
4639 chain = head;
4640
4641 while (chain) {
4642 chain_tmp = chain->next;
4643 devm_kfree(&pdev->dev, chain);
4644 chain = chain_tmp;
4645 }
4646 }
4647
hns3_add_ring_to_group(struct hns3_enet_ring_group * group,struct hns3_enet_ring * ring)4648 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
4649 struct hns3_enet_ring *ring)
4650 {
4651 ring->next = group->ring;
4652 group->ring = ring;
4653
4654 group->count++;
4655 }
4656
hns3_nic_set_cpumask(struct hns3_nic_priv * priv)4657 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
4658 {
4659 struct pci_dev *pdev = priv->ae_handle->pdev;
4660 struct hns3_enet_tqp_vector *tqp_vector;
4661 int num_vectors = priv->vector_num;
4662 int numa_node;
4663 int vector_i;
4664
4665 numa_node = dev_to_node(&pdev->dev);
4666
4667 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
4668 tqp_vector = &priv->tqp_vector[vector_i];
4669 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
4670 &tqp_vector->affinity_mask);
4671 }
4672 }
4673
hns3_rx_dim_work(struct work_struct * work)4674 static void hns3_rx_dim_work(struct work_struct *work)
4675 {
4676 struct dim *dim = container_of(work, struct dim, work);
4677 struct hns3_enet_ring_group *group = container_of(dim,
4678 struct hns3_enet_ring_group, dim);
4679 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
4680 struct dim_cq_moder cur_moder =
4681 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
4682
4683 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec);
4684 tqp_vector->rx_group.coal.int_gl = cur_moder.usec;
4685
4686 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) {
4687 hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts);
4688 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts;
4689 }
4690
4691 dim->state = DIM_START_MEASURE;
4692 }
4693
hns3_tx_dim_work(struct work_struct * work)4694 static void hns3_tx_dim_work(struct work_struct *work)
4695 {
4696 struct dim *dim = container_of(work, struct dim, work);
4697 struct hns3_enet_ring_group *group = container_of(dim,
4698 struct hns3_enet_ring_group, dim);
4699 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
4700 struct dim_cq_moder cur_moder =
4701 net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
4702
4703 hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec);
4704 tqp_vector->tx_group.coal.int_gl = cur_moder.usec;
4705
4706 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) {
4707 hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts);
4708 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts;
4709 }
4710
4711 dim->state = DIM_START_MEASURE;
4712 }
4713
hns3_nic_init_dim(struct hns3_enet_tqp_vector * tqp_vector)4714 static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector)
4715 {
4716 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
4717 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
4718 }
4719
hns3_nic_init_vector_data(struct hns3_nic_priv * priv)4720 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
4721 {
4722 struct hnae3_handle *h = priv->ae_handle;
4723 struct hns3_enet_tqp_vector *tqp_vector;
4724 int ret;
4725 int i;
4726
4727 hns3_nic_set_cpumask(priv);
4728
4729 for (i = 0; i < priv->vector_num; i++) {
4730 tqp_vector = &priv->tqp_vector[i];
4731 hns3_vector_coalesce_init_hw(tqp_vector, priv);
4732 tqp_vector->num_tqps = 0;
4733 hns3_nic_init_dim(tqp_vector);
4734 }
4735
4736 for (i = 0; i < h->kinfo.num_tqps; i++) {
4737 u16 vector_i = i % priv->vector_num;
4738 u16 tqp_num = h->kinfo.num_tqps;
4739
4740 tqp_vector = &priv->tqp_vector[vector_i];
4741
4742 hns3_add_ring_to_group(&tqp_vector->tx_group,
4743 &priv->ring[i]);
4744
4745 hns3_add_ring_to_group(&tqp_vector->rx_group,
4746 &priv->ring[i + tqp_num]);
4747
4748 priv->ring[i].tqp_vector = tqp_vector;
4749 priv->ring[i + tqp_num].tqp_vector = tqp_vector;
4750 tqp_vector->num_tqps++;
4751 }
4752
4753 for (i = 0; i < priv->vector_num; i++) {
4754 struct hnae3_ring_chain_node *vector_ring_chain;
4755
4756 tqp_vector = &priv->tqp_vector[i];
4757
4758 tqp_vector->rx_group.total_bytes = 0;
4759 tqp_vector->rx_group.total_packets = 0;
4760 tqp_vector->tx_group.total_bytes = 0;
4761 tqp_vector->tx_group.total_packets = 0;
4762 tqp_vector->handle = h;
4763
4764 vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
4765 if (!vector_ring_chain) {
4766 ret = -ENOMEM;
4767 goto map_ring_fail;
4768 }
4769
4770 ret = h->ae_algo->ops->map_ring_to_vector(h,
4771 tqp_vector->vector_irq, vector_ring_chain);
4772
4773 hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
4774
4775 if (ret)
4776 goto map_ring_fail;
4777
4778 netif_napi_add(priv->netdev, &tqp_vector->napi,
4779 hns3_nic_common_poll);
4780 }
4781
4782 return 0;
4783
4784 map_ring_fail:
4785 while (i--)
4786 netif_napi_del(&priv->tqp_vector[i].napi);
4787
4788 return ret;
4789 }
4790
hns3_nic_init_coal_cfg(struct hns3_nic_priv * priv)4791 static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
4792 {
4793 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
4794 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
4795 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
4796
4797 /* initialize the configuration for interrupt coalescing.
4798 * 1. GL (Interrupt Gap Limiter)
4799 * 2. RL (Interrupt Rate Limiter)
4800 * 3. QL (Interrupt Quantity Limiter)
4801 *
4802 * Default: enable interrupt coalescing self-adaptive and GL
4803 */
4804 tx_coal->adapt_enable = 1;
4805 rx_coal->adapt_enable = 1;
4806
4807 tx_coal->int_gl = HNS3_INT_GL_50K;
4808 rx_coal->int_gl = HNS3_INT_GL_50K;
4809
4810 rx_coal->flow_level = HNS3_FLOW_LOW;
4811 tx_coal->flow_level = HNS3_FLOW_LOW;
4812
4813 if (ae_dev->dev_specs.int_ql_max) {
4814 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
4815 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
4816 }
4817 }
4818
hns3_nic_alloc_vector_data(struct hns3_nic_priv * priv)4819 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
4820 {
4821 struct hnae3_handle *h = priv->ae_handle;
4822 struct hns3_enet_tqp_vector *tqp_vector;
4823 struct hnae3_vector_info *vector;
4824 struct pci_dev *pdev = h->pdev;
4825 u16 tqp_num = h->kinfo.num_tqps;
4826 u16 vector_num;
4827 int ret = 0;
4828 u16 i;
4829
4830 /* RSS size, cpu online and vector_num should be the same */
4831 /* Should consider 2p/4p later */
4832 vector_num = min_t(u16, num_online_cpus(), tqp_num);
4833
4834 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
4835 GFP_KERNEL);
4836 if (!vector)
4837 return -ENOMEM;
4838
4839 /* save the actual available vector number */
4840 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
4841
4842 priv->vector_num = vector_num;
4843 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
4844 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
4845 GFP_KERNEL);
4846 if (!priv->tqp_vector) {
4847 ret = -ENOMEM;
4848 goto out;
4849 }
4850
4851 for (i = 0; i < priv->vector_num; i++) {
4852 tqp_vector = &priv->tqp_vector[i];
4853 tqp_vector->idx = i;
4854 tqp_vector->mask_addr = vector[i].io_addr;
4855 tqp_vector->vector_irq = vector[i].vector;
4856 hns3_vector_coalesce_init(tqp_vector, priv);
4857 }
4858
4859 out:
4860 devm_kfree(&pdev->dev, vector);
4861 return ret;
4862 }
4863
hns3_clear_ring_group(struct hns3_enet_ring_group * group)4864 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
4865 {
4866 group->ring = NULL;
4867 group->count = 0;
4868 }
4869
hns3_nic_uninit_vector_data(struct hns3_nic_priv * priv)4870 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
4871 {
4872 struct hnae3_ring_chain_node *vector_ring_chain;
4873 struct hnae3_handle *h = priv->ae_handle;
4874 struct hns3_enet_tqp_vector *tqp_vector;
4875 int i;
4876
4877 for (i = 0; i < priv->vector_num; i++) {
4878 tqp_vector = &priv->tqp_vector[i];
4879
4880 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
4881 continue;
4882
4883 /* Since the mapping can be overwritten, when fail to get the
4884 * chain between vector and ring, we should go on to deal with
4885 * the remaining options.
4886 */
4887 vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
4888 if (!vector_ring_chain)
4889 dev_warn(priv->dev, "failed to get ring chain\n");
4890
4891 h->ae_algo->ops->unmap_ring_from_vector(h,
4892 tqp_vector->vector_irq, vector_ring_chain);
4893
4894 hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
4895
4896 hns3_clear_ring_group(&tqp_vector->rx_group);
4897 hns3_clear_ring_group(&tqp_vector->tx_group);
4898 netif_napi_del(&priv->tqp_vector[i].napi);
4899 }
4900 }
4901
hns3_nic_dealloc_vector_data(struct hns3_nic_priv * priv)4902 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
4903 {
4904 struct hnae3_handle *h = priv->ae_handle;
4905 struct pci_dev *pdev = h->pdev;
4906 int i, ret;
4907
4908 for (i = 0; i < priv->vector_num; i++) {
4909 struct hns3_enet_tqp_vector *tqp_vector;
4910
4911 tqp_vector = &priv->tqp_vector[i];
4912 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
4913 if (ret)
4914 return;
4915 }
4916
4917 devm_kfree(&pdev->dev, priv->tqp_vector);
4918 }
4919
hns3_update_tx_spare_buf_config(struct hns3_nic_priv * priv)4920 static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
4921 {
4922 #define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
4923 #define HNS3_MAX_PACKET_SIZE (64 * 1024)
4924
4925 struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
4926 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
4927 struct hnae3_handle *handle = priv->ae_handle;
4928
4929 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
4930 return;
4931
4932 if (!(domain && iommu_is_dma_domain(domain)))
4933 return;
4934
4935 priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
4936 priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
4937
4938 if (priv->tx_copybreak < priv->min_tx_copybreak)
4939 priv->tx_copybreak = priv->min_tx_copybreak;
4940 if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
4941 handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
4942 }
4943
hns3_ring_get_cfg(struct hnae3_queue * q,struct hns3_nic_priv * priv,unsigned int ring_type)4944 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
4945 unsigned int ring_type)
4946 {
4947 int queue_num = priv->ae_handle->kinfo.num_tqps;
4948 struct hns3_enet_ring *ring;
4949 int desc_num;
4950
4951 if (ring_type == HNAE3_RING_TYPE_TX) {
4952 ring = &priv->ring[q->tqp_index];
4953 desc_num = priv->ae_handle->kinfo.num_tx_desc;
4954 ring->queue_index = q->tqp_index;
4955 ring->tx_copybreak = priv->tx_copybreak;
4956 ring->last_to_use = 0;
4957 } else {
4958 ring = &priv->ring[q->tqp_index + queue_num];
4959 desc_num = priv->ae_handle->kinfo.num_rx_desc;
4960 ring->queue_index = q->tqp_index;
4961 ring->rx_copybreak = priv->rx_copybreak;
4962 }
4963
4964 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
4965
4966 ring->tqp = q;
4967 ring->desc = NULL;
4968 ring->desc_cb = NULL;
4969 ring->dev = priv->dev;
4970 ring->desc_dma_addr = 0;
4971 ring->buf_size = q->buf_size;
4972 ring->desc_num = desc_num;
4973 ring->next_to_use = 0;
4974 ring->next_to_clean = 0;
4975 }
4976
hns3_queue_to_ring(struct hnae3_queue * tqp,struct hns3_nic_priv * priv)4977 static void hns3_queue_to_ring(struct hnae3_queue *tqp,
4978 struct hns3_nic_priv *priv)
4979 {
4980 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
4981 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
4982 }
4983
hns3_get_ring_config(struct hns3_nic_priv * priv)4984 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
4985 {
4986 struct hnae3_handle *h = priv->ae_handle;
4987 struct pci_dev *pdev = h->pdev;
4988 int i;
4989
4990 priv->ring = devm_kzalloc(&pdev->dev,
4991 array3_size(h->kinfo.num_tqps,
4992 sizeof(*priv->ring), 2),
4993 GFP_KERNEL);
4994 if (!priv->ring)
4995 return -ENOMEM;
4996
4997 for (i = 0; i < h->kinfo.num_tqps; i++)
4998 hns3_queue_to_ring(h->kinfo.tqp[i], priv);
4999
5000 return 0;
5001 }
5002
hns3_put_ring_config(struct hns3_nic_priv * priv)5003 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
5004 {
5005 if (!priv->ring)
5006 return;
5007
5008 devm_kfree(priv->dev, priv->ring);
5009 priv->ring = NULL;
5010 }
5011
hns3_alloc_page_pool(struct hns3_enet_ring * ring)5012 static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
5013 {
5014 struct page_pool_params pp_params = {
5015 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
5016 .order = hns3_page_order(ring),
5017 .pool_size = ring->desc_num * hns3_buf_size(ring) /
5018 (PAGE_SIZE << hns3_page_order(ring)),
5019 .nid = dev_to_node(ring_to_dev(ring)),
5020 .dev = ring_to_dev(ring),
5021 .dma_dir = DMA_FROM_DEVICE,
5022 .offset = 0,
5023 .max_len = PAGE_SIZE << hns3_page_order(ring),
5024 };
5025
5026 ring->page_pool = page_pool_create(&pp_params);
5027 if (IS_ERR(ring->page_pool)) {
5028 dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n",
5029 PTR_ERR(ring->page_pool));
5030 ring->page_pool = NULL;
5031 }
5032 }
5033
hns3_alloc_ring_memory(struct hns3_enet_ring * ring)5034 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
5035 {
5036 int ret;
5037
5038 if (ring->desc_num <= 0 || ring->buf_size <= 0)
5039 return -EINVAL;
5040
5041 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
5042 sizeof(ring->desc_cb[0]), GFP_KERNEL);
5043 if (!ring->desc_cb) {
5044 ret = -ENOMEM;
5045 goto out;
5046 }
5047
5048 ret = hns3_alloc_desc(ring);
5049 if (ret)
5050 goto out_with_desc_cb;
5051
5052 if (!HNAE3_IS_TX_RING(ring)) {
5053 if (page_pool_enabled)
5054 hns3_alloc_page_pool(ring);
5055
5056 ret = hns3_alloc_ring_buffers(ring);
5057 if (ret)
5058 goto out_with_desc;
5059 } else {
5060 hns3_init_tx_spare_buffer(ring);
5061 }
5062
5063 return 0;
5064
5065 out_with_desc:
5066 hns3_free_desc(ring);
5067 out_with_desc_cb:
5068 devm_kfree(ring_to_dev(ring), ring->desc_cb);
5069 ring->desc_cb = NULL;
5070 out:
5071 return ret;
5072 }
5073
hns3_fini_ring(struct hns3_enet_ring * ring)5074 void hns3_fini_ring(struct hns3_enet_ring *ring)
5075 {
5076 hns3_free_desc(ring);
5077 devm_kfree(ring_to_dev(ring), ring->desc_cb);
5078 ring->desc_cb = NULL;
5079 ring->next_to_clean = 0;
5080 ring->next_to_use = 0;
5081 ring->last_to_use = 0;
5082 ring->pending_buf = 0;
5083 if (!HNAE3_IS_TX_RING(ring) && ring->skb) {
5084 dev_kfree_skb_any(ring->skb);
5085 ring->skb = NULL;
5086 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) {
5087 struct hns3_tx_spare *tx_spare = ring->tx_spare;
5088
5089 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len,
5090 DMA_TO_DEVICE);
5091 free_pages((unsigned long)tx_spare->buf,
5092 get_order(tx_spare->len));
5093 devm_kfree(ring_to_dev(ring), tx_spare);
5094 ring->tx_spare = NULL;
5095 }
5096
5097 if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) {
5098 page_pool_destroy(ring->page_pool);
5099 ring->page_pool = NULL;
5100 }
5101 }
5102
hns3_buf_size2type(u32 buf_size)5103 static int hns3_buf_size2type(u32 buf_size)
5104 {
5105 int bd_size_type;
5106
5107 switch (buf_size) {
5108 case 512:
5109 bd_size_type = HNS3_BD_SIZE_512_TYPE;
5110 break;
5111 case 1024:
5112 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
5113 break;
5114 case 2048:
5115 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
5116 break;
5117 case 4096:
5118 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
5119 break;
5120 default:
5121 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
5122 }
5123
5124 return bd_size_type;
5125 }
5126
hns3_init_ring_hw(struct hns3_enet_ring * ring)5127 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
5128 {
5129 dma_addr_t dma = ring->desc_dma_addr;
5130 struct hnae3_queue *q = ring->tqp;
5131
5132 if (!HNAE3_IS_TX_RING(ring)) {
5133 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
5134 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
5135 (u32)((dma >> 31) >> 1));
5136
5137 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
5138 hns3_buf_size2type(ring->buf_size));
5139 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
5140 ring->desc_num / 8 - 1);
5141 } else {
5142 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
5143 (u32)dma);
5144 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
5145 (u32)((dma >> 31) >> 1));
5146
5147 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
5148 ring->desc_num / 8 - 1);
5149 }
5150 }
5151
hns3_init_tx_ring_tc(struct hns3_nic_priv * priv)5152 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
5153 {
5154 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
5155 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
5156 int i;
5157
5158 for (i = 0; i < tc_info->num_tc; i++) {
5159 int j;
5160
5161 for (j = 0; j < tc_info->tqp_count[i]; j++) {
5162 struct hnae3_queue *q;
5163
5164 q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
5165 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
5166 }
5167 }
5168 }
5169
hns3_init_all_ring(struct hns3_nic_priv * priv)5170 int hns3_init_all_ring(struct hns3_nic_priv *priv)
5171 {
5172 struct hnae3_handle *h = priv->ae_handle;
5173 int ring_num = h->kinfo.num_tqps * 2;
5174 int i, j;
5175 int ret;
5176
5177 hns3_update_tx_spare_buf_config(priv);
5178 for (i = 0; i < ring_num; i++) {
5179 ret = hns3_alloc_ring_memory(&priv->ring[i]);
5180 if (ret) {
5181 dev_err(priv->dev,
5182 "Alloc ring memory fail! ret=%d\n", ret);
5183 goto out_when_alloc_ring_memory;
5184 }
5185
5186 u64_stats_init(&priv->ring[i].syncp);
5187 cond_resched();
5188 }
5189
5190 return 0;
5191
5192 out_when_alloc_ring_memory:
5193 for (j = i - 1; j >= 0; j--)
5194 hns3_fini_ring(&priv->ring[j]);
5195
5196 return -ENOMEM;
5197 }
5198
hns3_uninit_all_ring(struct hns3_nic_priv * priv)5199 static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
5200 {
5201 struct hnae3_handle *h = priv->ae_handle;
5202 int i;
5203
5204 for (i = 0; i < h->kinfo.num_tqps; i++) {
5205 hns3_fini_ring(&priv->ring[i]);
5206 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
5207 }
5208 }
5209
5210 /* Set mac addr if it is configured. or leave it to the AE driver */
hns3_init_mac_addr(struct net_device * netdev)5211 static int hns3_init_mac_addr(struct net_device *netdev)
5212 {
5213 struct hns3_nic_priv *priv = netdev_priv(netdev);
5214 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
5215 struct hnae3_handle *h = priv->ae_handle;
5216 u8 mac_addr_temp[ETH_ALEN] = {0};
5217 int ret = 0;
5218
5219 if (h->ae_algo->ops->get_mac_addr)
5220 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
5221
5222 /* Check if the MAC address is valid, if not get a random one */
5223 if (!is_valid_ether_addr(mac_addr_temp)) {
5224 eth_hw_addr_random(netdev);
5225 hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr);
5226 dev_warn(priv->dev, "using random MAC address %s\n",
5227 format_mac_addr);
5228 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
5229 eth_hw_addr_set(netdev, mac_addr_temp);
5230 ether_addr_copy(netdev->perm_addr, mac_addr_temp);
5231 } else {
5232 return 0;
5233 }
5234
5235 if (h->ae_algo->ops->set_mac_addr)
5236 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
5237
5238 return ret;
5239 }
5240
hns3_init_phy(struct net_device * netdev)5241 static int hns3_init_phy(struct net_device *netdev)
5242 {
5243 struct hnae3_handle *h = hns3_get_handle(netdev);
5244 int ret = 0;
5245
5246 if (h->ae_algo->ops->mac_connect_phy)
5247 ret = h->ae_algo->ops->mac_connect_phy(h);
5248
5249 return ret;
5250 }
5251
hns3_uninit_phy(struct net_device * netdev)5252 static void hns3_uninit_phy(struct net_device *netdev)
5253 {
5254 struct hnae3_handle *h = hns3_get_handle(netdev);
5255
5256 if (h->ae_algo->ops->mac_disconnect_phy)
5257 h->ae_algo->ops->mac_disconnect_phy(h);
5258 }
5259
hns3_client_start(struct hnae3_handle * handle)5260 static int hns3_client_start(struct hnae3_handle *handle)
5261 {
5262 if (!handle->ae_algo->ops->client_start)
5263 return 0;
5264
5265 return handle->ae_algo->ops->client_start(handle);
5266 }
5267
hns3_client_stop(struct hnae3_handle * handle)5268 static void hns3_client_stop(struct hnae3_handle *handle)
5269 {
5270 if (!handle->ae_algo->ops->client_stop)
5271 return;
5272
5273 handle->ae_algo->ops->client_stop(handle);
5274 }
5275
hns3_info_show(struct hns3_nic_priv * priv)5276 static void hns3_info_show(struct hns3_nic_priv *priv)
5277 {
5278 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
5279 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
5280
5281 hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr);
5282 dev_info(priv->dev, "MAC address: %s\n", format_mac_addr);
5283 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
5284 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
5285 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
5286 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
5287 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
5288 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
5289 dev_info(priv->dev, "Total number of enabled TCs: %u\n",
5290 kinfo->tc_info.num_tc);
5291 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
5292 }
5293
hns3_set_cq_period_mode(struct hns3_nic_priv * priv,enum dim_cq_period_mode mode,bool is_tx)5294 static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
5295 enum dim_cq_period_mode mode, bool is_tx)
5296 {
5297 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
5298 struct hnae3_handle *handle = priv->ae_handle;
5299 int i;
5300
5301 if (is_tx) {
5302 priv->tx_cqe_mode = mode;
5303
5304 for (i = 0; i < priv->vector_num; i++)
5305 priv->tqp_vector[i].tx_group.dim.mode = mode;
5306 } else {
5307 priv->rx_cqe_mode = mode;
5308
5309 for (i = 0; i < priv->vector_num; i++)
5310 priv->tqp_vector[i].rx_group.dim.mode = mode;
5311 }
5312
5313 if (hnae3_ae_dev_cq_supported(ae_dev)) {
5314 u32 new_mode;
5315 u64 reg;
5316
5317 new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ?
5318 HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE;
5319 reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG;
5320
5321 writel(new_mode, handle->kinfo.io_base + reg);
5322 }
5323 }
5324
hns3_cq_period_mode_init(struct hns3_nic_priv * priv,enum dim_cq_period_mode tx_mode,enum dim_cq_period_mode rx_mode)5325 void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
5326 enum dim_cq_period_mode tx_mode,
5327 enum dim_cq_period_mode rx_mode)
5328 {
5329 hns3_set_cq_period_mode(priv, tx_mode, true);
5330 hns3_set_cq_period_mode(priv, rx_mode, false);
5331 }
5332
hns3_state_init(struct hnae3_handle * handle)5333 static void hns3_state_init(struct hnae3_handle *handle)
5334 {
5335 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
5336 struct net_device *netdev = handle->kinfo.netdev;
5337 struct hns3_nic_priv *priv = netdev_priv(netdev);
5338
5339 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
5340
5341 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
5342 set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
5343
5344 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5345 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
5346
5347 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
5348 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
5349
5350 if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
5351 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
5352 }
5353
hns3_state_uninit(struct hnae3_handle * handle)5354 static void hns3_state_uninit(struct hnae3_handle *handle)
5355 {
5356 struct hns3_nic_priv *priv = handle->priv;
5357
5358 clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
5359 }
5360
hns3_client_init(struct hnae3_handle * handle)5361 static int hns3_client_init(struct hnae3_handle *handle)
5362 {
5363 struct pci_dev *pdev = handle->pdev;
5364 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5365 u16 alloc_tqps, max_rss_size;
5366 struct hns3_nic_priv *priv;
5367 struct net_device *netdev;
5368 int ret;
5369
5370 ae_dev->handle = handle;
5371
5372 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
5373 &max_rss_size);
5374 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
5375 if (!netdev)
5376 return -ENOMEM;
5377
5378 priv = netdev_priv(netdev);
5379 priv->dev = &pdev->dev;
5380 priv->netdev = netdev;
5381 priv->ae_handle = handle;
5382 priv->tx_timeout_count = 0;
5383 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
5384 priv->min_tx_copybreak = 0;
5385 priv->min_tx_spare_buf_size = 0;
5386 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
5387
5388 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
5389
5390 handle->kinfo.netdev = netdev;
5391 handle->priv = (void *)priv;
5392
5393 hns3_init_mac_addr(netdev);
5394
5395 hns3_set_default_feature(netdev);
5396
5397 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
5398 netdev->priv_flags |= IFF_UNICAST_FLT;
5399 netdev->netdev_ops = &hns3_nic_netdev_ops;
5400 SET_NETDEV_DEV(netdev, &pdev->dev);
5401 hns3_ethtool_set_ops(netdev);
5402
5403 /* Carrier off reporting is important to ethtool even BEFORE open */
5404 netif_carrier_off(netdev);
5405
5406 ret = hns3_get_ring_config(priv);
5407 if (ret) {
5408 ret = -ENOMEM;
5409 goto out_get_ring_cfg;
5410 }
5411
5412 hns3_nic_init_coal_cfg(priv);
5413
5414 ret = hns3_nic_alloc_vector_data(priv);
5415 if (ret) {
5416 ret = -ENOMEM;
5417 goto out_alloc_vector_data;
5418 }
5419
5420 ret = hns3_nic_init_vector_data(priv);
5421 if (ret) {
5422 ret = -ENOMEM;
5423 goto out_init_vector_data;
5424 }
5425
5426 ret = hns3_init_all_ring(priv);
5427 if (ret) {
5428 ret = -ENOMEM;
5429 goto out_init_ring;
5430 }
5431
5432 hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE,
5433 DIM_CQ_PERIOD_MODE_START_FROM_EQE);
5434
5435 ret = hns3_init_phy(netdev);
5436 if (ret)
5437 goto out_init_phy;
5438
5439 /* the device can work without cpu rmap, only aRFS needs it */
5440 ret = hns3_set_rx_cpu_rmap(netdev);
5441 if (ret)
5442 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
5443
5444 ret = hns3_nic_init_irq(priv);
5445 if (ret) {
5446 dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
5447 hns3_free_rx_cpu_rmap(netdev);
5448 goto out_init_irq_fail;
5449 }
5450
5451 ret = hns3_client_start(handle);
5452 if (ret) {
5453 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
5454 goto out_client_start;
5455 }
5456
5457 hns3_dcbnl_setup(handle);
5458
5459 ret = hns3_dbg_init(handle);
5460 if (ret) {
5461 dev_err(priv->dev, "failed to init debugfs, ret = %d\n",
5462 ret);
5463 goto out_client_start;
5464 }
5465
5466 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
5467
5468 hns3_state_init(handle);
5469
5470 ret = register_netdev(netdev);
5471 if (ret) {
5472 dev_err(priv->dev, "probe register netdev fail!\n");
5473 goto out_reg_netdev_fail;
5474 }
5475
5476 if (netif_msg_drv(handle))
5477 hns3_info_show(priv);
5478
5479 return ret;
5480
5481 out_reg_netdev_fail:
5482 hns3_state_uninit(handle);
5483 hns3_dbg_uninit(handle);
5484 hns3_client_stop(handle);
5485 out_client_start:
5486 hns3_free_rx_cpu_rmap(netdev);
5487 hns3_nic_uninit_irq(priv);
5488 out_init_irq_fail:
5489 hns3_uninit_phy(netdev);
5490 out_init_phy:
5491 hns3_uninit_all_ring(priv);
5492 out_init_ring:
5493 hns3_nic_uninit_vector_data(priv);
5494 out_init_vector_data:
5495 hns3_nic_dealloc_vector_data(priv);
5496 out_alloc_vector_data:
5497 priv->ring = NULL;
5498 out_get_ring_cfg:
5499 priv->ae_handle = NULL;
5500 free_netdev(netdev);
5501 return ret;
5502 }
5503
hns3_client_uninit(struct hnae3_handle * handle,bool reset)5504 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
5505 {
5506 struct net_device *netdev = handle->kinfo.netdev;
5507 struct hns3_nic_priv *priv = netdev_priv(netdev);
5508
5509 if (netdev->reg_state != NETREG_UNINITIALIZED)
5510 unregister_netdev(netdev);
5511
5512 hns3_client_stop(handle);
5513
5514 hns3_uninit_phy(netdev);
5515
5516 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5517 netdev_warn(netdev, "already uninitialized\n");
5518 goto out_netdev_free;
5519 }
5520
5521 hns3_free_rx_cpu_rmap(netdev);
5522
5523 hns3_nic_uninit_irq(priv);
5524
5525 hns3_clear_all_ring(handle, true);
5526
5527 hns3_nic_uninit_vector_data(priv);
5528
5529 hns3_nic_dealloc_vector_data(priv);
5530
5531 hns3_uninit_all_ring(priv);
5532
5533 hns3_put_ring_config(priv);
5534
5535 out_netdev_free:
5536 hns3_dbg_uninit(handle);
5537 free_netdev(netdev);
5538 }
5539
hns3_link_status_change(struct hnae3_handle * handle,bool linkup)5540 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
5541 {
5542 struct net_device *netdev = handle->kinfo.netdev;
5543
5544 if (!netdev)
5545 return;
5546
5547 if (linkup) {
5548 netif_tx_wake_all_queues(netdev);
5549 netif_carrier_on(netdev);
5550 if (netif_msg_link(handle))
5551 netdev_info(netdev, "link up\n");
5552 } else {
5553 netif_carrier_off(netdev);
5554 netif_tx_stop_all_queues(netdev);
5555 if (netif_msg_link(handle))
5556 netdev_info(netdev, "link down\n");
5557 }
5558 }
5559
hns3_clear_tx_ring(struct hns3_enet_ring * ring)5560 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
5561 {
5562 while (ring->next_to_clean != ring->next_to_use) {
5563 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
5564 hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
5565 ring_ptr_move_fw(ring, next_to_clean);
5566 }
5567
5568 ring->pending_buf = 0;
5569 }
5570
hns3_clear_rx_ring(struct hns3_enet_ring * ring)5571 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
5572 {
5573 struct hns3_desc_cb res_cbs;
5574 int ret;
5575
5576 while (ring->next_to_use != ring->next_to_clean) {
5577 /* When a buffer is not reused, it's memory has been
5578 * freed in hns3_handle_rx_bd or will be freed by
5579 * stack, so we need to replace the buffer here.
5580 */
5581 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
5582 ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
5583 if (ret) {
5584 hns3_ring_stats_update(ring, sw_err_cnt);
5585 /* if alloc new buffer fail, exit directly
5586 * and reclear in up flow.
5587 */
5588 netdev_warn(ring_to_netdev(ring),
5589 "reserve buffer map failed, ret = %d\n",
5590 ret);
5591 return ret;
5592 }
5593 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
5594 }
5595 ring_ptr_move_fw(ring, next_to_use);
5596 }
5597
5598 /* Free the pending skb in rx ring */
5599 if (ring->skb) {
5600 dev_kfree_skb_any(ring->skb);
5601 ring->skb = NULL;
5602 ring->pending_buf = 0;
5603 }
5604
5605 return 0;
5606 }
5607
hns3_force_clear_rx_ring(struct hns3_enet_ring * ring)5608 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
5609 {
5610 while (ring->next_to_use != ring->next_to_clean) {
5611 /* When a buffer is not reused, it's memory has been
5612 * freed in hns3_handle_rx_bd or will be freed by
5613 * stack, so only need to unmap the buffer here.
5614 */
5615 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
5616 hns3_unmap_buffer(ring,
5617 &ring->desc_cb[ring->next_to_use]);
5618 ring->desc_cb[ring->next_to_use].dma = 0;
5619 }
5620
5621 ring_ptr_move_fw(ring, next_to_use);
5622 }
5623 }
5624
hns3_clear_all_ring(struct hnae3_handle * h,bool force)5625 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
5626 {
5627 struct net_device *ndev = h->kinfo.netdev;
5628 struct hns3_nic_priv *priv = netdev_priv(ndev);
5629 u32 i;
5630
5631 for (i = 0; i < h->kinfo.num_tqps; i++) {
5632 struct hns3_enet_ring *ring;
5633
5634 ring = &priv->ring[i];
5635 hns3_clear_tx_ring(ring);
5636
5637 ring = &priv->ring[i + h->kinfo.num_tqps];
5638 /* Continue to clear other rings even if clearing some
5639 * rings failed.
5640 */
5641 if (force)
5642 hns3_force_clear_rx_ring(ring);
5643 else
5644 hns3_clear_rx_ring(ring);
5645 }
5646 }
5647
hns3_nic_reset_all_ring(struct hnae3_handle * h)5648 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
5649 {
5650 struct net_device *ndev = h->kinfo.netdev;
5651 struct hns3_nic_priv *priv = netdev_priv(ndev);
5652 struct hns3_enet_ring *rx_ring;
5653 int i, j;
5654 int ret;
5655
5656 ret = h->ae_algo->ops->reset_queue(h);
5657 if (ret)
5658 return ret;
5659
5660 for (i = 0; i < h->kinfo.num_tqps; i++) {
5661 hns3_init_ring_hw(&priv->ring[i]);
5662
5663 /* We need to clear tx ring here because self test will
5664 * use the ring and will not run down before up
5665 */
5666 hns3_clear_tx_ring(&priv->ring[i]);
5667 priv->ring[i].next_to_clean = 0;
5668 priv->ring[i].next_to_use = 0;
5669 priv->ring[i].last_to_use = 0;
5670
5671 rx_ring = &priv->ring[i + h->kinfo.num_tqps];
5672 hns3_init_ring_hw(rx_ring);
5673 ret = hns3_clear_rx_ring(rx_ring);
5674 if (ret)
5675 return ret;
5676
5677 /* We can not know the hardware head and tail when this
5678 * function is called in reset flow, so we reuse all desc.
5679 */
5680 for (j = 0; j < rx_ring->desc_num; j++)
5681 hns3_reuse_buffer(rx_ring, j);
5682
5683 rx_ring->next_to_clean = 0;
5684 rx_ring->next_to_use = 0;
5685 }
5686
5687 hns3_init_tx_ring_tc(priv);
5688
5689 return 0;
5690 }
5691
hns3_reset_notify_down_enet(struct hnae3_handle * handle)5692 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
5693 {
5694 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5695 struct net_device *ndev = kinfo->netdev;
5696 struct hns3_nic_priv *priv = netdev_priv(ndev);
5697
5698 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
5699 return 0;
5700
5701 if (!netif_running(ndev))
5702 return 0;
5703
5704 return hns3_nic_net_stop(ndev);
5705 }
5706
hns3_reset_notify_up_enet(struct hnae3_handle * handle)5707 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
5708 {
5709 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5710 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
5711 int ret = 0;
5712
5713 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5714 netdev_err(kinfo->netdev, "device is not initialized yet\n");
5715 return -EFAULT;
5716 }
5717
5718 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
5719
5720 if (netif_running(kinfo->netdev)) {
5721 ret = hns3_nic_net_open(kinfo->netdev);
5722 if (ret) {
5723 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
5724 netdev_err(kinfo->netdev,
5725 "net up fail, ret=%d!\n", ret);
5726 return ret;
5727 }
5728 }
5729
5730 return ret;
5731 }
5732
hns3_reset_notify_init_enet(struct hnae3_handle * handle)5733 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
5734 {
5735 struct net_device *netdev = handle->kinfo.netdev;
5736 struct hns3_nic_priv *priv = netdev_priv(netdev);
5737 int ret;
5738
5739 /* Carrier off reporting is important to ethtool even BEFORE open */
5740 netif_carrier_off(netdev);
5741
5742 ret = hns3_get_ring_config(priv);
5743 if (ret)
5744 return ret;
5745
5746 ret = hns3_nic_alloc_vector_data(priv);
5747 if (ret)
5748 goto err_put_ring;
5749
5750 ret = hns3_nic_init_vector_data(priv);
5751 if (ret)
5752 goto err_dealloc_vector;
5753
5754 ret = hns3_init_all_ring(priv);
5755 if (ret)
5756 goto err_uninit_vector;
5757
5758 hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode);
5759
5760 /* the device can work without cpu rmap, only aRFS needs it */
5761 ret = hns3_set_rx_cpu_rmap(netdev);
5762 if (ret)
5763 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
5764
5765 ret = hns3_nic_init_irq(priv);
5766 if (ret) {
5767 dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
5768 hns3_free_rx_cpu_rmap(netdev);
5769 goto err_init_irq_fail;
5770 }
5771
5772 if (!hns3_is_phys_func(handle->pdev))
5773 hns3_init_mac_addr(netdev);
5774
5775 ret = hns3_client_start(handle);
5776 if (ret) {
5777 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
5778 goto err_client_start_fail;
5779 }
5780
5781 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
5782
5783 return ret;
5784
5785 err_client_start_fail:
5786 hns3_free_rx_cpu_rmap(netdev);
5787 hns3_nic_uninit_irq(priv);
5788 err_init_irq_fail:
5789 hns3_uninit_all_ring(priv);
5790 err_uninit_vector:
5791 hns3_nic_uninit_vector_data(priv);
5792 err_dealloc_vector:
5793 hns3_nic_dealloc_vector_data(priv);
5794 err_put_ring:
5795 hns3_put_ring_config(priv);
5796
5797 return ret;
5798 }
5799
hns3_reset_notify_uninit_enet(struct hnae3_handle * handle)5800 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
5801 {
5802 struct net_device *netdev = handle->kinfo.netdev;
5803 struct hns3_nic_priv *priv = netdev_priv(netdev);
5804
5805 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
5806 hns3_nic_net_stop(netdev);
5807
5808 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5809 netdev_warn(netdev, "already uninitialized\n");
5810 return 0;
5811 }
5812
5813 hns3_free_rx_cpu_rmap(netdev);
5814 hns3_nic_uninit_irq(priv);
5815 hns3_clear_all_ring(handle, true);
5816 hns3_reset_tx_queue(priv->ae_handle);
5817
5818 hns3_nic_uninit_vector_data(priv);
5819
5820 hns3_nic_dealloc_vector_data(priv);
5821
5822 hns3_uninit_all_ring(priv);
5823
5824 hns3_put_ring_config(priv);
5825
5826 return 0;
5827 }
5828
hns3_reset_notify(struct hnae3_handle * handle,enum hnae3_reset_notify_type type)5829 int hns3_reset_notify(struct hnae3_handle *handle,
5830 enum hnae3_reset_notify_type type)
5831 {
5832 int ret = 0;
5833
5834 switch (type) {
5835 case HNAE3_UP_CLIENT:
5836 ret = hns3_reset_notify_up_enet(handle);
5837 break;
5838 case HNAE3_DOWN_CLIENT:
5839 ret = hns3_reset_notify_down_enet(handle);
5840 break;
5841 case HNAE3_INIT_CLIENT:
5842 ret = hns3_reset_notify_init_enet(handle);
5843 break;
5844 case HNAE3_UNINIT_CLIENT:
5845 ret = hns3_reset_notify_uninit_enet(handle);
5846 break;
5847 default:
5848 break;
5849 }
5850
5851 return ret;
5852 }
5853
hns3_change_channels(struct hnae3_handle * handle,u32 new_tqp_num,bool rxfh_configured)5854 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num,
5855 bool rxfh_configured)
5856 {
5857 int ret;
5858
5859 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num,
5860 rxfh_configured);
5861 if (ret) {
5862 dev_err(&handle->pdev->dev,
5863 "Change tqp num(%u) fail.\n", new_tqp_num);
5864 return ret;
5865 }
5866
5867 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT);
5868 if (ret)
5869 return ret;
5870
5871 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT);
5872 if (ret)
5873 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT);
5874
5875 return ret;
5876 }
5877
hns3_set_channels(struct net_device * netdev,struct ethtool_channels * ch)5878 int hns3_set_channels(struct net_device *netdev,
5879 struct ethtool_channels *ch)
5880 {
5881 struct hnae3_handle *h = hns3_get_handle(netdev);
5882 struct hnae3_knic_private_info *kinfo = &h->kinfo;
5883 bool rxfh_configured = netif_is_rxfh_configured(netdev);
5884 u32 new_tqp_num = ch->combined_count;
5885 u16 org_tqp_num;
5886 int ret;
5887
5888 if (hns3_nic_resetting(netdev))
5889 return -EBUSY;
5890
5891 if (ch->rx_count || ch->tx_count)
5892 return -EINVAL;
5893
5894 if (kinfo->tc_info.mqprio_active) {
5895 dev_err(&netdev->dev,
5896 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
5897 return -EINVAL;
5898 }
5899
5900 if (new_tqp_num > hns3_get_max_available_channels(h) ||
5901 new_tqp_num < 1) {
5902 dev_err(&netdev->dev,
5903 "Change tqps fail, the tqp range is from 1 to %u",
5904 hns3_get_max_available_channels(h));
5905 return -EINVAL;
5906 }
5907
5908 if (kinfo->rss_size == new_tqp_num)
5909 return 0;
5910
5911 netif_dbg(h, drv, netdev,
5912 "set channels: tqp_num=%u, rxfh=%d\n",
5913 new_tqp_num, rxfh_configured);
5914
5915 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
5916 if (ret)
5917 return ret;
5918
5919 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
5920 if (ret)
5921 return ret;
5922
5923 org_tqp_num = h->kinfo.num_tqps;
5924 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured);
5925 if (ret) {
5926 int ret1;
5927
5928 netdev_warn(netdev,
5929 "Change channels fail, revert to old value\n");
5930 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured);
5931 if (ret1) {
5932 netdev_err(netdev,
5933 "revert to old channel fail\n");
5934 return ret1;
5935 }
5936
5937 return ret;
5938 }
5939
5940 return 0;
5941 }
5942
hns3_external_lb_prepare(struct net_device * ndev,bool if_running)5943 void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
5944 {
5945 struct hns3_nic_priv *priv = netdev_priv(ndev);
5946
5947 if (!if_running)
5948 return;
5949
5950 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
5951 return;
5952
5953 netif_carrier_off(ndev);
5954 netif_tx_disable(ndev);
5955
5956 hns3_disable_irqs_and_tqps(ndev);
5957
5958 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
5959 * during reset process, because driver may not be able
5960 * to disable the ring through firmware when downing the netdev.
5961 */
5962 if (!hns3_nic_resetting(ndev))
5963 hns3_nic_reset_all_ring(priv->ae_handle);
5964
5965 hns3_reset_tx_queue(priv->ae_handle);
5966 }
5967
hns3_external_lb_restore(struct net_device * ndev,bool if_running)5968 void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
5969 {
5970 struct hns3_nic_priv *priv = netdev_priv(ndev);
5971 struct hnae3_handle *h = priv->ae_handle;
5972
5973 if (!if_running)
5974 return;
5975
5976 if (hns3_nic_resetting(ndev))
5977 return;
5978
5979 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
5980 return;
5981
5982 if (hns3_nic_reset_all_ring(priv->ae_handle))
5983 return;
5984
5985 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
5986
5987 hns3_enable_irqs_and_tqps(ndev);
5988
5989 netif_tx_wake_all_queues(ndev);
5990
5991 if (h->ae_algo->ops->get_status(h))
5992 netif_carrier_on(ndev);
5993 }
5994
5995 static const struct hns3_hw_error_info hns3_hw_err[] = {
5996 { .type = HNAE3_PPU_POISON_ERROR,
5997 .msg = "PPU poison" },
5998 { .type = HNAE3_CMDQ_ECC_ERROR,
5999 .msg = "IMP CMDQ error" },
6000 { .type = HNAE3_IMP_RD_POISON_ERROR,
6001 .msg = "IMP RD poison" },
6002 { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
6003 .msg = "ROCEE AXI RESP error" },
6004 };
6005
hns3_process_hw_error(struct hnae3_handle * handle,enum hnae3_hw_error_type type)6006 static void hns3_process_hw_error(struct hnae3_handle *handle,
6007 enum hnae3_hw_error_type type)
6008 {
6009 u32 i;
6010
6011 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
6012 if (hns3_hw_err[i].type == type) {
6013 dev_err(&handle->pdev->dev, "Detected %s!\n",
6014 hns3_hw_err[i].msg);
6015 break;
6016 }
6017 }
6018 }
6019
6020 static const struct hnae3_client_ops client_ops = {
6021 .init_instance = hns3_client_init,
6022 .uninit_instance = hns3_client_uninit,
6023 .link_status_change = hns3_link_status_change,
6024 .reset_notify = hns3_reset_notify,
6025 .process_hw_error = hns3_process_hw_error,
6026 };
6027
6028 /* hns3_init_module - Driver registration routine
6029 * hns3_init_module is the first routine called when the driver is
6030 * loaded. All it does is register with the PCI subsystem.
6031 */
hns3_init_module(void)6032 static int __init hns3_init_module(void)
6033 {
6034 int ret;
6035
6036 pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
6037 pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright);
6038
6039 client.type = HNAE3_CLIENT_KNIC;
6040 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
6041 hns3_driver_name);
6042
6043 client.ops = &client_ops;
6044
6045 INIT_LIST_HEAD(&client.node);
6046
6047 hns3_dbg_register_debugfs(hns3_driver_name);
6048
6049 ret = hnae3_register_client(&client);
6050 if (ret)
6051 goto err_reg_client;
6052
6053 ret = pci_register_driver(&hns3_driver);
6054 if (ret)
6055 goto err_reg_driver;
6056
6057 return ret;
6058
6059 err_reg_driver:
6060 hnae3_unregister_client(&client);
6061 err_reg_client:
6062 hns3_dbg_unregister_debugfs();
6063 return ret;
6064 }
6065 module_init(hns3_init_module);
6066
6067 /* hns3_exit_module - Driver exit cleanup routine
6068 * hns3_exit_module is called just before the driver is removed
6069 * from memory.
6070 */
hns3_exit_module(void)6071 static void __exit hns3_exit_module(void)
6072 {
6073 hnae3_acquire_unload_lock();
6074 pci_unregister_driver(&hns3_driver);
6075 hnae3_unregister_client(&client);
6076 hns3_dbg_unregister_debugfs();
6077 hnae3_release_unload_lock();
6078 }
6079 module_exit(hns3_exit_module);
6080
6081 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
6082 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
6083 MODULE_LICENSE("GPL");
6084 MODULE_ALIAS("pci:hns-nic");
6085