1 // SPDX-License-Identifier: GPL-2.0
2 /* Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/etherdevice.h>
24 #include <linux/netdevice.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ethtool.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sfp.h>
29
30 #include "hinic_hw_qp.h"
31 #include "hinic_hw_dev.h"
32 #include "hinic_port.h"
33 #include "hinic_tx.h"
34 #include "hinic_rx.h"
35 #include "hinic_dev.h"
36
37 #define SET_LINK_STR_MAX_LEN 16
38
39 #define GET_SUPPORTED_MODE 0
40 #define GET_ADVERTISED_MODE 1
41
42 #define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
43 ((ecmd)->supported |= \
44 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
45 #define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
46 ((ecmd)->advertising |= \
47 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
48 #define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
49 ((ecmd)->supported |= SUPPORTED_##mode)
50 #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
51 ((ecmd)->advertising |= ADVERTISED_##mode)
52
53 #define COALESCE_PENDING_LIMIT_UNIT 8
54 #define COALESCE_TIMER_CFG_UNIT 9
55 #define COALESCE_ALL_QUEUE 0xFFFF
56 #define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
57 #define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
58
59 struct hw2ethtool_link_mode {
60 enum ethtool_link_mode_bit_indices link_mode_bit;
61 u32 speed;
62 enum hinic_link_mode hw_link_mode;
63 };
64
65 struct cmd_link_settings {
66 u64 supported;
67 u64 advertising;
68
69 u32 speed;
70 u8 duplex;
71 u8 port;
72 u8 autoneg;
73 };
74
75 static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
76 SPEED_10, SPEED_100,
77 SPEED_1000, SPEED_10000,
78 SPEED_25000, SPEED_40000,
79 SPEED_100000
80 };
81
82 static struct hw2ethtool_link_mode
83 hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
84 {
85 .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
86 .speed = SPEED_10000,
87 .hw_link_mode = HINIC_10GE_BASE_KR,
88 },
89 {
90 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
91 .speed = SPEED_40000,
92 .hw_link_mode = HINIC_40GE_BASE_KR4,
93 },
94 {
95 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
96 .speed = SPEED_40000,
97 .hw_link_mode = HINIC_40GE_BASE_CR4,
98 },
99 {
100 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
101 .speed = SPEED_100000,
102 .hw_link_mode = HINIC_100GE_BASE_KR4,
103 },
104 {
105 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
106 .speed = SPEED_100000,
107 .hw_link_mode = HINIC_100GE_BASE_CR4,
108 },
109 {
110 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
111 .speed = SPEED_25000,
112 .hw_link_mode = HINIC_25GE_BASE_KR_S,
113 },
114 {
115 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
116 .speed = SPEED_25000,
117 .hw_link_mode = HINIC_25GE_BASE_CR_S,
118 },
119 {
120 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
121 .speed = SPEED_25000,
122 .hw_link_mode = HINIC_25GE_BASE_KR,
123 },
124 {
125 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
126 .speed = SPEED_25000,
127 .hw_link_mode = HINIC_25GE_BASE_CR,
128 },
129 {
130 .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
131 .speed = SPEED_1000,
132 .hw_link_mode = HINIC_GE_BASE_KX,
133 },
134 };
135
136 #define LP_DEFAULT_TIME 5 /* seconds */
137 #define LP_PKT_LEN 1514
138
139 #define PORT_DOWN_ERR_IDX 0
140 enum diag_test_index {
141 INTERNAL_LP_TEST = 0,
142 EXTERNAL_LP_TEST = 1,
143 DIAG_TEST_MAX = 2,
144 };
145
set_link_speed(struct ethtool_link_ksettings * link_ksettings,enum hinic_speed speed)146 static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
147 enum hinic_speed speed)
148 {
149 switch (speed) {
150 case HINIC_SPEED_10MB_LINK:
151 link_ksettings->base.speed = SPEED_10;
152 break;
153
154 case HINIC_SPEED_100MB_LINK:
155 link_ksettings->base.speed = SPEED_100;
156 break;
157
158 case HINIC_SPEED_1000MB_LINK:
159 link_ksettings->base.speed = SPEED_1000;
160 break;
161
162 case HINIC_SPEED_10GB_LINK:
163 link_ksettings->base.speed = SPEED_10000;
164 break;
165
166 case HINIC_SPEED_25GB_LINK:
167 link_ksettings->base.speed = SPEED_25000;
168 break;
169
170 case HINIC_SPEED_40GB_LINK:
171 link_ksettings->base.speed = SPEED_40000;
172 break;
173
174 case HINIC_SPEED_100GB_LINK:
175 link_ksettings->base.speed = SPEED_100000;
176 break;
177
178 default:
179 link_ksettings->base.speed = SPEED_UNKNOWN;
180 break;
181 }
182 }
183
hinic_get_link_mode_index(enum hinic_link_mode link_mode)184 static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
185 {
186 int i = 0;
187
188 for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
189 if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
190 break;
191 }
192
193 return i;
194 }
195
hinic_add_ethtool_link_mode(struct cmd_link_settings * link_settings,enum hinic_link_mode hw_link_mode,u32 name)196 static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
197 enum hinic_link_mode hw_link_mode,
198 u32 name)
199 {
200 enum hinic_link_mode link_mode;
201 int idx = 0;
202
203 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
204 if (hw_link_mode & ((u32)1 << link_mode)) {
205 idx = hinic_get_link_mode_index(link_mode);
206 if (idx >= HINIC_LINK_MODE_NUMBERS)
207 continue;
208
209 if (name == GET_SUPPORTED_MODE)
210 ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
211 (link_settings, idx);
212 else
213 ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
214 (link_settings, idx);
215 }
216 }
217 }
218
hinic_link_port_type(struct cmd_link_settings * link_settings,enum hinic_port_type port_type)219 static void hinic_link_port_type(struct cmd_link_settings *link_settings,
220 enum hinic_port_type port_type)
221 {
222 switch (port_type) {
223 case HINIC_PORT_ELEC:
224 case HINIC_PORT_TP:
225 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
226 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
227 link_settings->port = PORT_TP;
228 break;
229
230 case HINIC_PORT_AOC:
231 case HINIC_PORT_FIBRE:
232 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
233 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
234 link_settings->port = PORT_FIBRE;
235 break;
236
237 case HINIC_PORT_COPPER:
238 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
239 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
240 link_settings->port = PORT_DA;
241 break;
242
243 case HINIC_PORT_BACKPLANE:
244 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
245 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
246 link_settings->port = PORT_NONE;
247 break;
248
249 default:
250 link_settings->port = PORT_OTHER;
251 break;
252 }
253 }
254
hinic_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * link_ksettings)255 static int hinic_get_link_ksettings(struct net_device *netdev,
256 struct ethtool_link_ksettings
257 *link_ksettings)
258 {
259 struct hinic_dev *nic_dev = netdev_priv(netdev);
260 struct hinic_link_mode_cmd link_mode = { 0 };
261 struct hinic_pause_config pause_info = { 0 };
262 struct cmd_link_settings settings = { 0 };
263 enum hinic_port_link_state link_state;
264 struct hinic_port_cap port_cap;
265 int err;
266
267 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
268 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
269
270 link_ksettings->base.speed = SPEED_UNKNOWN;
271 link_ksettings->base.autoneg = AUTONEG_DISABLE;
272 link_ksettings->base.duplex = DUPLEX_UNKNOWN;
273
274 err = hinic_port_get_cap(nic_dev, &port_cap);
275 if (err)
276 return err;
277
278 hinic_link_port_type(&settings, port_cap.port_type);
279 link_ksettings->base.port = settings.port;
280
281 err = hinic_port_link_state(nic_dev, &link_state);
282 if (err)
283 return err;
284
285 if (link_state == HINIC_LINK_STATE_UP) {
286 set_link_speed(link_ksettings, port_cap.speed);
287 link_ksettings->base.duplex =
288 (port_cap.duplex == HINIC_DUPLEX_FULL) ?
289 DUPLEX_FULL : DUPLEX_HALF;
290 }
291
292 if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
293 ethtool_link_ksettings_add_link_mode(link_ksettings,
294 advertising, Autoneg);
295
296 if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
297 link_ksettings->base.autoneg = AUTONEG_ENABLE;
298
299 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
300 if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
301 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
302 return -EIO;
303
304 hinic_add_ethtool_link_mode(&settings, link_mode.supported,
305 GET_SUPPORTED_MODE);
306 hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
307 GET_ADVERTISED_MODE);
308
309 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
310 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
311 if (err)
312 return err;
313 ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
314 if (pause_info.rx_pause && pause_info.tx_pause) {
315 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
316 } else if (pause_info.tx_pause) {
317 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
318 } else if (pause_info.rx_pause) {
319 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
320 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
321 }
322 }
323
324 linkmode_copy(link_ksettings->link_modes.supported,
325 (unsigned long *)&settings.supported);
326 linkmode_copy(link_ksettings->link_modes.advertising,
327 (unsigned long *)&settings.advertising);
328
329 return 0;
330 }
331
hinic_ethtool_to_hw_speed_level(u32 speed)332 static int hinic_ethtool_to_hw_speed_level(u32 speed)
333 {
334 int i;
335
336 for (i = 0; i < LINK_SPEED_LEVELS; i++) {
337 if (hw_to_ethtool_speed[i] == speed)
338 break;
339 }
340
341 return i;
342 }
343
hinic_is_support_speed(enum hinic_link_mode supported_link,u32 speed)344 static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
345 u32 speed)
346 {
347 enum hinic_link_mode link_mode;
348 int idx;
349
350 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
351 if (!(supported_link & ((u32)1 << link_mode)))
352 continue;
353
354 idx = hinic_get_link_mode_index(link_mode);
355 if (idx >= HINIC_LINK_MODE_NUMBERS)
356 continue;
357
358 if (hw_to_ethtool_link_mode_table[idx].speed == speed)
359 return true;
360 }
361
362 return false;
363 }
364
hinic_is_speed_legal(struct hinic_dev * nic_dev,u32 speed)365 static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
366 {
367 struct hinic_link_mode_cmd link_mode = { 0 };
368 struct net_device *netdev = nic_dev->netdev;
369 enum nic_speed_level speed_level = 0;
370 int err;
371
372 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
373 if (err)
374 return false;
375
376 if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
377 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
378 return false;
379
380 speed_level = hinic_ethtool_to_hw_speed_level(speed);
381 if (speed_level >= LINK_SPEED_LEVELS ||
382 !hinic_is_support_speed(link_mode.supported, speed)) {
383 netif_err(nic_dev, drv, netdev,
384 "Unsupported speed: %d\n", speed);
385 return false;
386 }
387
388 return true;
389 }
390
get_link_settings_type(struct hinic_dev * nic_dev,u8 autoneg,u32 speed,u32 * set_settings)391 static int get_link_settings_type(struct hinic_dev *nic_dev,
392 u8 autoneg, u32 speed, u32 *set_settings)
393 {
394 struct hinic_port_cap port_cap = { 0 };
395 int err;
396
397 err = hinic_port_get_cap(nic_dev, &port_cap);
398 if (err)
399 return err;
400
401 /* always set autonegotiation */
402 if (port_cap.autoneg_cap)
403 *set_settings |= HILINK_LINK_SET_AUTONEG;
404
405 if (autoneg == AUTONEG_ENABLE) {
406 if (!port_cap.autoneg_cap) {
407 netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
408 return -EOPNOTSUPP;
409 }
410 } else if (speed != (u32)SPEED_UNKNOWN) {
411 /* set speed only when autoneg is disabled */
412 if (!hinic_is_speed_legal(nic_dev, speed))
413 return -EINVAL;
414 *set_settings |= HILINK_LINK_SET_SPEED;
415 } else {
416 netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
417 return -EOPNOTSUPP;
418 }
419
420 return 0;
421 }
422
set_link_settings_separate_cmd(struct hinic_dev * nic_dev,u32 set_settings,u8 autoneg,u32 speed)423 static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
424 u32 set_settings, u8 autoneg,
425 u32 speed)
426 {
427 enum nic_speed_level speed_level = 0;
428 int err = 0;
429
430 if (set_settings & HILINK_LINK_SET_AUTONEG) {
431 err = hinic_set_autoneg(nic_dev->hwdev,
432 (autoneg == AUTONEG_ENABLE));
433 if (err)
434 netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
435 (autoneg == AUTONEG_ENABLE) ?
436 "Enable" : "Disable");
437 else
438 netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
439 (autoneg == AUTONEG_ENABLE) ?
440 "Enable" : "Disable");
441 }
442
443 if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
444 speed_level = hinic_ethtool_to_hw_speed_level(speed);
445 err = hinic_set_speed(nic_dev->hwdev, speed_level);
446 if (err)
447 netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
448 speed);
449 else
450 netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
451 speed);
452 }
453
454 return err;
455 }
456
hinic_set_settings_to_hw(struct hinic_dev * nic_dev,u32 set_settings,u8 autoneg,u32 speed)457 static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
458 u32 set_settings, u8 autoneg, u32 speed)
459 {
460 struct hinic_link_ksettings_info settings = {0};
461 char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
462 const char *autoneg_str;
463 struct net_device *netdev = nic_dev->netdev;
464 enum nic_speed_level speed_level = 0;
465 int err;
466
467 autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ?
468 (autoneg ? "autong enable " : "autong disable ") : "";
469
470 if (set_settings & HILINK_LINK_SET_SPEED) {
471 speed_level = hinic_ethtool_to_hw_speed_level(speed);
472 err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
473 "speed %d ", speed);
474 if (err >= SET_LINK_STR_MAX_LEN) {
475 netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
476 err, SET_LINK_STR_MAX_LEN);
477 return -EFAULT;
478 }
479 }
480
481 settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
482 settings.valid_bitmap = set_settings;
483 settings.autoneg = autoneg;
484 settings.speed = speed_level;
485
486 err = hinic_set_link_settings(nic_dev->hwdev, &settings);
487 if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
488 if (err)
489 netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n",
490 autoneg_str, set_link_str);
491 else
492 netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n",
493 autoneg_str, set_link_str);
494
495 return err;
496 }
497
498 return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
499 speed);
500 }
501
set_link_settings(struct net_device * netdev,u8 autoneg,u32 speed)502 static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
503 {
504 struct hinic_dev *nic_dev = netdev_priv(netdev);
505 u32 set_settings = 0;
506 int err;
507
508 err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
509 if (err)
510 return err;
511
512 if (set_settings)
513 err = hinic_set_settings_to_hw(nic_dev, set_settings,
514 autoneg, speed);
515 else
516 netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
517
518 return err;
519 }
520
hinic_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * link_settings)521 static int hinic_set_link_ksettings(struct net_device *netdev, const struct
522 ethtool_link_ksettings *link_settings)
523 {
524 /* only support to set autoneg and speed */
525 return set_link_settings(netdev, link_settings->base.autoneg,
526 link_settings->base.speed);
527 }
528
hinic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)529 static void hinic_get_drvinfo(struct net_device *netdev,
530 struct ethtool_drvinfo *info)
531 {
532 struct hinic_dev *nic_dev = netdev_priv(netdev);
533 u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
534 struct hinic_hwdev *hwdev = nic_dev->hwdev;
535 struct hinic_hwif *hwif = hwdev->hwif;
536 int err;
537
538 strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
539 strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
540
541 err = hinic_get_mgmt_version(nic_dev, mgmt_ver);
542 if (err)
543 return;
544
545 snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver);
546 }
547
hinic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)548 static void hinic_get_ringparam(struct net_device *netdev,
549 struct ethtool_ringparam *ring,
550 struct kernel_ethtool_ringparam *kernel_ring,
551 struct netlink_ext_ack *extack)
552 {
553 struct hinic_dev *nic_dev = netdev_priv(netdev);
554
555 ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
556 ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
557 ring->rx_pending = nic_dev->rq_depth;
558 ring->tx_pending = nic_dev->sq_depth;
559 }
560
check_ringparam_valid(struct hinic_dev * nic_dev,struct ethtool_ringparam * ring)561 static int check_ringparam_valid(struct hinic_dev *nic_dev,
562 struct ethtool_ringparam *ring)
563 {
564 if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
565 netif_err(nic_dev, drv, nic_dev->netdev,
566 "Unsupported rx_jumbo_pending/rx_mini_pending\n");
567 return -EINVAL;
568 }
569
570 if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
571 ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
572 ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
573 ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
574 netif_err(nic_dev, drv, nic_dev->netdev,
575 "Queue depth out of range [%d-%d]\n",
576 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
577 return -EINVAL;
578 }
579
580 return 0;
581 }
582
hinic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)583 static int hinic_set_ringparam(struct net_device *netdev,
584 struct ethtool_ringparam *ring,
585 struct kernel_ethtool_ringparam *kernel_ring,
586 struct netlink_ext_ack *extack)
587 {
588 struct hinic_dev *nic_dev = netdev_priv(netdev);
589 u16 new_sq_depth, new_rq_depth;
590 int err;
591
592 err = check_ringparam_valid(nic_dev, ring);
593 if (err)
594 return err;
595
596 new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
597 new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
598
599 if (new_sq_depth == nic_dev->sq_depth &&
600 new_rq_depth == nic_dev->rq_depth)
601 return 0;
602
603 netif_info(nic_dev, drv, netdev,
604 "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
605 nic_dev->sq_depth, nic_dev->rq_depth,
606 new_sq_depth, new_rq_depth);
607
608 nic_dev->sq_depth = new_sq_depth;
609 nic_dev->rq_depth = new_rq_depth;
610
611 if (netif_running(netdev)) {
612 netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
613 err = hinic_close(netdev);
614 if (err) {
615 netif_err(nic_dev, drv, netdev,
616 "Failed to close netdev\n");
617 return -EFAULT;
618 }
619
620 err = hinic_open(netdev);
621 if (err) {
622 netif_err(nic_dev, drv, netdev,
623 "Failed to open netdev\n");
624 return -EFAULT;
625 }
626 }
627
628 return 0;
629 }
630
__hinic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,u16 queue)631 static int __hinic_get_coalesce(struct net_device *netdev,
632 struct ethtool_coalesce *coal, u16 queue)
633 {
634 struct hinic_dev *nic_dev = netdev_priv(netdev);
635 struct hinic_intr_coal_info *rx_intr_coal_info;
636 struct hinic_intr_coal_info *tx_intr_coal_info;
637
638 if (queue == COALESCE_ALL_QUEUE) {
639 /* get tx/rx irq0 as default parameters */
640 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0];
641 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0];
642 } else {
643 if (queue >= nic_dev->num_qps) {
644 netif_err(nic_dev, drv, netdev,
645 "Invalid queue_id: %d\n", queue);
646 return -EINVAL;
647 }
648 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue];
649 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue];
650 }
651
652 /* coalesce_timer is in unit of 9us */
653 coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg *
654 COALESCE_TIMER_CFG_UNIT;
655 /* coalesced_frames is in unit of 8 */
656 coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt *
657 COALESCE_PENDING_LIMIT_UNIT;
658 coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg *
659 COALESCE_TIMER_CFG_UNIT;
660 coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt *
661 COALESCE_PENDING_LIMIT_UNIT;
662
663 return 0;
664 }
665
is_coalesce_exceed_limit(const struct ethtool_coalesce * coal)666 static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal)
667 {
668 if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
669 coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT ||
670 coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
671 coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT)
672 return -ERANGE;
673
674 return 0;
675 }
676
set_queue_coalesce(struct hinic_dev * nic_dev,u16 q_id,struct hinic_intr_coal_info * coal,bool set_rx_coal)677 static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id,
678 struct hinic_intr_coal_info *coal,
679 bool set_rx_coal)
680 {
681 struct hinic_intr_coal_info *intr_coal = NULL;
682 struct hinic_msix_config interrupt_info = {0};
683 struct net_device *netdev = nic_dev->netdev;
684 u16 msix_idx;
685 int err;
686
687 intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] :
688 &nic_dev->tx_intr_coalesce[q_id];
689
690 intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg;
691 intr_coal->pending_limt = coal->pending_limt;
692
693 /* netdev not running or qp not in using,
694 * don't need to set coalesce to hw
695 */
696 if (!(nic_dev->flags & HINIC_INTF_UP) ||
697 q_id >= nic_dev->num_qps)
698 return 0;
699
700 msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry :
701 nic_dev->txqs[q_id].sq->msix_entry;
702 interrupt_info.msix_index = msix_idx;
703 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
704 interrupt_info.pending_cnt = intr_coal->pending_limt;
705 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
706
707 err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info);
708 if (err)
709 netif_warn(nic_dev, drv, netdev,
710 "Failed to set %s queue%d coalesce",
711 set_rx_coal ? "rx" : "tx", q_id);
712
713 return err;
714 }
715
__set_hw_coal_param(struct hinic_dev * nic_dev,struct hinic_intr_coal_info * intr_coal,u16 queue,bool set_rx_coal)716 static int __set_hw_coal_param(struct hinic_dev *nic_dev,
717 struct hinic_intr_coal_info *intr_coal,
718 u16 queue, bool set_rx_coal)
719 {
720 int err;
721 u16 i;
722
723 if (queue == COALESCE_ALL_QUEUE) {
724 for (i = 0; i < nic_dev->max_qps; i++) {
725 err = set_queue_coalesce(nic_dev, i, intr_coal,
726 set_rx_coal);
727 if (err)
728 return err;
729 }
730 } else {
731 if (queue >= nic_dev->num_qps) {
732 netif_err(nic_dev, drv, nic_dev->netdev,
733 "Invalid queue_id: %d\n", queue);
734 return -EINVAL;
735 }
736 err = set_queue_coalesce(nic_dev, queue, intr_coal,
737 set_rx_coal);
738 if (err)
739 return err;
740 }
741
742 return 0;
743 }
744
__hinic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,u16 queue)745 static int __hinic_set_coalesce(struct net_device *netdev,
746 struct ethtool_coalesce *coal, u16 queue)
747 {
748 struct hinic_dev *nic_dev = netdev_priv(netdev);
749 struct hinic_intr_coal_info rx_intr_coal = {0};
750 struct hinic_intr_coal_info tx_intr_coal = {0};
751 bool set_rx_coal = false;
752 bool set_tx_coal = false;
753 int err;
754
755 err = is_coalesce_exceed_limit(coal);
756 if (err)
757 return err;
758
759 if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) {
760 rx_intr_coal.coalesce_timer_cfg =
761 (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
762 rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
763 COALESCE_PENDING_LIMIT_UNIT);
764 set_rx_coal = true;
765 }
766
767 if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) {
768 tx_intr_coal.coalesce_timer_cfg =
769 (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
770 tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames /
771 COALESCE_PENDING_LIMIT_UNIT);
772 set_tx_coal = true;
773 }
774
775 /* setting coalesce timer or pending limit to zero will disable
776 * coalesce
777 */
778 if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg ||
779 !rx_intr_coal.pending_limt))
780 netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n");
781 if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg ||
782 !tx_intr_coal.pending_limt))
783 netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n");
784
785 if (set_rx_coal) {
786 err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true);
787 if (err)
788 return err;
789 }
790 if (set_tx_coal) {
791 err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false);
792 if (err)
793 return err;
794 }
795 return 0;
796 }
797
hinic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)798 static int hinic_get_coalesce(struct net_device *netdev,
799 struct ethtool_coalesce *coal,
800 struct kernel_ethtool_coalesce *kernel_coal,
801 struct netlink_ext_ack *extack)
802 {
803 return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
804 }
805
hinic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)806 static int hinic_set_coalesce(struct net_device *netdev,
807 struct ethtool_coalesce *coal,
808 struct kernel_ethtool_coalesce *kernel_coal,
809 struct netlink_ext_ack *extack)
810 {
811 return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
812 }
813
hinic_get_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * coal)814 static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
815 struct ethtool_coalesce *coal)
816 {
817 return __hinic_get_coalesce(netdev, coal, queue);
818 }
819
hinic_set_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * coal)820 static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
821 struct ethtool_coalesce *coal)
822 {
823 return __hinic_set_coalesce(netdev, coal, queue);
824 }
825
hinic_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)826 static void hinic_get_pauseparam(struct net_device *netdev,
827 struct ethtool_pauseparam *pause)
828 {
829 struct hinic_dev *nic_dev = netdev_priv(netdev);
830 struct hinic_pause_config pause_info = {0};
831 struct hinic_nic_cfg *nic_cfg;
832 int err;
833
834 nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
835
836 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
837 if (!err) {
838 pause->autoneg = pause_info.auto_neg;
839 if (nic_cfg->pause_set || !pause_info.auto_neg) {
840 pause->rx_pause = nic_cfg->rx_pause;
841 pause->tx_pause = nic_cfg->tx_pause;
842 } else {
843 pause->rx_pause = pause_info.rx_pause;
844 pause->tx_pause = pause_info.tx_pause;
845 }
846 }
847 }
848
hinic_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)849 static int hinic_set_pauseparam(struct net_device *netdev,
850 struct ethtool_pauseparam *pause)
851 {
852 struct hinic_dev *nic_dev = netdev_priv(netdev);
853 struct hinic_pause_config pause_info = {0};
854 struct hinic_port_cap port_cap = {0};
855 int err;
856
857 err = hinic_port_get_cap(nic_dev, &port_cap);
858 if (err)
859 return -EIO;
860
861 if (pause->autoneg != port_cap.autoneg_state)
862 return -EOPNOTSUPP;
863
864 pause_info.auto_neg = pause->autoneg;
865 pause_info.rx_pause = pause->rx_pause;
866 pause_info.tx_pause = pause->tx_pause;
867
868 mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
869 err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
870 if (err) {
871 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
872 return err;
873 }
874 nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true;
875 nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg;
876 nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause;
877 nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause;
878 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
879
880 return 0;
881 }
882
hinic_get_channels(struct net_device * netdev,struct ethtool_channels * channels)883 static void hinic_get_channels(struct net_device *netdev,
884 struct ethtool_channels *channels)
885 {
886 struct hinic_dev *nic_dev = netdev_priv(netdev);
887 struct hinic_hwdev *hwdev = nic_dev->hwdev;
888
889 channels->max_combined = nic_dev->max_qps;
890 channels->combined_count = hinic_hwdev_num_qps(hwdev);
891 }
892
hinic_set_channels(struct net_device * netdev,struct ethtool_channels * channels)893 static int hinic_set_channels(struct net_device *netdev,
894 struct ethtool_channels *channels)
895 {
896 struct hinic_dev *nic_dev = netdev_priv(netdev);
897 unsigned int count = channels->combined_count;
898 int err;
899
900 netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
901 hinic_hwdev_num_qps(nic_dev->hwdev), count);
902
903 if (netif_running(netdev)) {
904 netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
905 hinic_close(netdev);
906
907 nic_dev->hwdev->nic_cap.num_qps = count;
908
909 err = hinic_open(netdev);
910 if (err) {
911 netif_err(nic_dev, drv, netdev,
912 "Failed to open netdev\n");
913 return -EFAULT;
914 }
915 } else {
916 nic_dev->hwdev->nic_cap.num_qps = count;
917 }
918
919 return 0;
920 }
921
hinic_get_rxfh_fields(struct net_device * netdev,struct ethtool_rxfh_fields * cmd)922 static int hinic_get_rxfh_fields(struct net_device *netdev,
923 struct ethtool_rxfh_fields *cmd)
924 {
925 struct hinic_dev *nic_dev = netdev_priv(netdev);
926 struct hinic_rss_type rss_type = { 0 };
927 int err;
928
929 cmd->data = 0;
930
931 if (!(nic_dev->flags & HINIC_RSS_ENABLE))
932 return 0;
933
934 err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
935 &rss_type);
936 if (err)
937 return err;
938
939 cmd->data = RXH_IP_SRC | RXH_IP_DST;
940 switch (cmd->flow_type) {
941 case TCP_V4_FLOW:
942 if (rss_type.tcp_ipv4)
943 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
944 break;
945 case TCP_V6_FLOW:
946 if (rss_type.tcp_ipv6)
947 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
948 break;
949 case UDP_V4_FLOW:
950 if (rss_type.udp_ipv4)
951 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
952 break;
953 case UDP_V6_FLOW:
954 if (rss_type.udp_ipv6)
955 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
956 break;
957 case IPV4_FLOW:
958 case IPV6_FLOW:
959 break;
960 default:
961 cmd->data = 0;
962 return -EINVAL;
963 }
964
965 return 0;
966 }
967
set_l4_rss_hash_ops(const struct ethtool_rxfh_fields * cmd,struct hinic_rss_type * rss_type)968 static int set_l4_rss_hash_ops(const struct ethtool_rxfh_fields *cmd,
969 struct hinic_rss_type *rss_type)
970 {
971 u8 rss_l4_en = 0;
972
973 switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
974 case 0:
975 rss_l4_en = 0;
976 break;
977 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
978 rss_l4_en = 1;
979 break;
980 default:
981 return -EINVAL;
982 }
983
984 switch (cmd->flow_type) {
985 case TCP_V4_FLOW:
986 rss_type->tcp_ipv4 = rss_l4_en;
987 break;
988 case TCP_V6_FLOW:
989 rss_type->tcp_ipv6 = rss_l4_en;
990 break;
991 case UDP_V4_FLOW:
992 rss_type->udp_ipv4 = rss_l4_en;
993 break;
994 case UDP_V6_FLOW:
995 rss_type->udp_ipv6 = rss_l4_en;
996 break;
997 default:
998 return -EINVAL;
999 }
1000
1001 return 0;
1002 }
1003
hinic_set_rxfh_fields(struct net_device * dev,const struct ethtool_rxfh_fields * cmd,struct netlink_ext_ack * extack)1004 static int hinic_set_rxfh_fields(struct net_device *dev,
1005 const struct ethtool_rxfh_fields *cmd,
1006 struct netlink_ext_ack *extack)
1007 {
1008 struct hinic_dev *nic_dev = netdev_priv(dev);
1009 struct hinic_rss_type *rss_type;
1010 int err;
1011
1012 rss_type = &nic_dev->rss_type;
1013
1014 if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1015 return -EOPNOTSUPP;
1016
1017 /* RSS does not support anything other than hashing
1018 * to queues on src and dst IPs and ports
1019 */
1020 if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
1021 RXH_L4_B_2_3))
1022 return -EINVAL;
1023
1024 /* We need at least the IP SRC and DEST fields for hashing */
1025 if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
1026 return -EINVAL;
1027
1028 err = hinic_get_rss_type(nic_dev,
1029 nic_dev->rss_tmpl_idx, rss_type);
1030 if (err)
1031 return -EFAULT;
1032
1033 switch (cmd->flow_type) {
1034 case TCP_V4_FLOW:
1035 case TCP_V6_FLOW:
1036 case UDP_V4_FLOW:
1037 case UDP_V6_FLOW:
1038 err = set_l4_rss_hash_ops(cmd, rss_type);
1039 if (err)
1040 return err;
1041 break;
1042 case IPV4_FLOW:
1043 rss_type->ipv4 = 1;
1044 break;
1045 case IPV6_FLOW:
1046 rss_type->ipv6 = 1;
1047 break;
1048 default:
1049 return -EINVAL;
1050 }
1051
1052 err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
1053 *rss_type);
1054 if (err)
1055 return -EFAULT;
1056
1057 return 0;
1058 }
1059
__set_rss_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key)1060 static int __set_rss_rxfh(struct net_device *netdev,
1061 const u32 *indir, const u8 *key)
1062 {
1063 struct hinic_dev *nic_dev = netdev_priv(netdev);
1064 int err;
1065
1066 if (indir) {
1067 if (!nic_dev->rss_indir_user) {
1068 nic_dev->rss_indir_user =
1069 kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
1070 GFP_KERNEL);
1071 if (!nic_dev->rss_indir_user)
1072 return -ENOMEM;
1073 }
1074
1075 memcpy(nic_dev->rss_indir_user, indir,
1076 sizeof(u32) * HINIC_RSS_INDIR_SIZE);
1077
1078 err = hinic_rss_set_indir_tbl(nic_dev,
1079 nic_dev->rss_tmpl_idx, indir);
1080 if (err)
1081 return -EFAULT;
1082 }
1083
1084 if (key) {
1085 if (!nic_dev->rss_hkey_user) {
1086 nic_dev->rss_hkey_user =
1087 kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
1088
1089 if (!nic_dev->rss_hkey_user)
1090 return -ENOMEM;
1091 }
1092
1093 memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
1094
1095 err = hinic_rss_set_template_tbl(nic_dev,
1096 nic_dev->rss_tmpl_idx, key);
1097 if (err)
1098 return -EFAULT;
1099 }
1100
1101 return 0;
1102 }
1103
hinic_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)1104 static int hinic_get_rxnfc(struct net_device *netdev,
1105 struct ethtool_rxnfc *cmd, u32 *rule_locs)
1106 {
1107 struct hinic_dev *nic_dev = netdev_priv(netdev);
1108 int err = 0;
1109
1110 switch (cmd->cmd) {
1111 case ETHTOOL_GRXRINGS:
1112 cmd->data = nic_dev->num_qps;
1113 break;
1114 default:
1115 err = -EOPNOTSUPP;
1116 break;
1117 }
1118
1119 return err;
1120 }
1121
hinic_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)1122 static int hinic_get_rxfh(struct net_device *netdev,
1123 struct ethtool_rxfh_param *rxfh)
1124 {
1125 struct hinic_dev *nic_dev = netdev_priv(netdev);
1126 u8 hash_engine_type = 0;
1127 int err = 0;
1128
1129 if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1130 return -EOPNOTSUPP;
1131
1132 err = hinic_rss_get_hash_engine(nic_dev,
1133 nic_dev->rss_tmpl_idx,
1134 &hash_engine_type);
1135 if (err)
1136 return -EFAULT;
1137
1138 rxfh->hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
1139
1140 if (rxfh->indir) {
1141 err = hinic_rss_get_indir_tbl(nic_dev,
1142 nic_dev->rss_tmpl_idx,
1143 rxfh->indir);
1144 if (err)
1145 return -EFAULT;
1146 }
1147
1148 if (rxfh->key)
1149 err = hinic_rss_get_template_tbl(nic_dev,
1150 nic_dev->rss_tmpl_idx,
1151 rxfh->key);
1152
1153 return err;
1154 }
1155
hinic_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1156 static int hinic_set_rxfh(struct net_device *netdev,
1157 struct ethtool_rxfh_param *rxfh,
1158 struct netlink_ext_ack *extack)
1159 {
1160 struct hinic_dev *nic_dev = netdev_priv(netdev);
1161 int err = 0;
1162
1163 if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1164 return -EOPNOTSUPP;
1165
1166 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE) {
1167 if (rxfh->hfunc != ETH_RSS_HASH_TOP &&
1168 rxfh->hfunc != ETH_RSS_HASH_XOR)
1169 return -EOPNOTSUPP;
1170
1171 nic_dev->rss_hash_engine = (rxfh->hfunc == ETH_RSS_HASH_XOR) ?
1172 HINIC_RSS_HASH_ENGINE_TYPE_XOR :
1173 HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
1174 err = hinic_rss_set_hash_engine
1175 (nic_dev, nic_dev->rss_tmpl_idx,
1176 nic_dev->rss_hash_engine);
1177 if (err)
1178 return -EFAULT;
1179 }
1180
1181 err = __set_rss_rxfh(netdev, rxfh->indir, rxfh->key);
1182
1183 return err;
1184 }
1185
hinic_get_rxfh_key_size(struct net_device * netdev)1186 static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
1187 {
1188 return HINIC_RSS_KEY_SIZE;
1189 }
1190
hinic_get_rxfh_indir_size(struct net_device * netdev)1191 static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
1192 {
1193 return HINIC_RSS_INDIR_SIZE;
1194 }
1195
1196 #define HINIC_FUNC_STAT(_stat_item) { \
1197 .name = #_stat_item, \
1198 .size = sizeof_field(struct hinic_vport_stats, _stat_item), \
1199 .offset = offsetof(struct hinic_vport_stats, _stat_item) \
1200 }
1201
1202 static struct hinic_stats hinic_function_stats[] = {
1203 HINIC_FUNC_STAT(tx_unicast_pkts_vport),
1204 HINIC_FUNC_STAT(tx_unicast_bytes_vport),
1205 HINIC_FUNC_STAT(tx_multicast_pkts_vport),
1206 HINIC_FUNC_STAT(tx_multicast_bytes_vport),
1207 HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
1208 HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
1209
1210 HINIC_FUNC_STAT(rx_unicast_pkts_vport),
1211 HINIC_FUNC_STAT(rx_unicast_bytes_vport),
1212 HINIC_FUNC_STAT(rx_multicast_pkts_vport),
1213 HINIC_FUNC_STAT(rx_multicast_bytes_vport),
1214 HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
1215 HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
1216
1217 HINIC_FUNC_STAT(tx_discard_vport),
1218 HINIC_FUNC_STAT(rx_discard_vport),
1219 HINIC_FUNC_STAT(tx_err_vport),
1220 HINIC_FUNC_STAT(rx_err_vport),
1221 };
1222
1223 static char hinic_test_strings[][ETH_GSTRING_LEN] = {
1224 "Internal lb test (on/offline)",
1225 "External lb test (external_lb)",
1226 };
1227
1228 #define HINIC_PORT_STAT(_stat_item) { \
1229 .name = #_stat_item, \
1230 .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
1231 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
1232 }
1233
1234 static struct hinic_stats hinic_port_stats[] = {
1235 HINIC_PORT_STAT(mac_rx_total_pkt_num),
1236 HINIC_PORT_STAT(mac_rx_total_oct_num),
1237 HINIC_PORT_STAT(mac_rx_bad_pkt_num),
1238 HINIC_PORT_STAT(mac_rx_bad_oct_num),
1239 HINIC_PORT_STAT(mac_rx_good_pkt_num),
1240 HINIC_PORT_STAT(mac_rx_good_oct_num),
1241 HINIC_PORT_STAT(mac_rx_uni_pkt_num),
1242 HINIC_PORT_STAT(mac_rx_multi_pkt_num),
1243 HINIC_PORT_STAT(mac_rx_broad_pkt_num),
1244 HINIC_PORT_STAT(mac_tx_total_pkt_num),
1245 HINIC_PORT_STAT(mac_tx_total_oct_num),
1246 HINIC_PORT_STAT(mac_tx_bad_pkt_num),
1247 HINIC_PORT_STAT(mac_tx_bad_oct_num),
1248 HINIC_PORT_STAT(mac_tx_good_pkt_num),
1249 HINIC_PORT_STAT(mac_tx_good_oct_num),
1250 HINIC_PORT_STAT(mac_tx_uni_pkt_num),
1251 HINIC_PORT_STAT(mac_tx_multi_pkt_num),
1252 HINIC_PORT_STAT(mac_tx_broad_pkt_num),
1253 HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
1254 HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
1255 HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
1256 HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
1257 HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
1258 HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
1259 HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
1260 HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
1261 HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
1262 HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
1263 HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
1264 HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
1265 HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
1266 HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
1267 HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
1268 HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
1269 HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
1270 HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
1271 HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
1272 HINIC_PORT_STAT(mac_rx_pause_num),
1273 HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
1274 HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
1275 HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
1276 HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
1277 HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
1278 HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
1279 HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
1280 HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
1281 HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
1282 HINIC_PORT_STAT(mac_rx_control_pkt_num),
1283 HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
1284 HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
1285 HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
1286 HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
1287 HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
1288 HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
1289 HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
1290 HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
1291 HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
1292 HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
1293 HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
1294 HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
1295 HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
1296 HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
1297 HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
1298 HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
1299 HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
1300 HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
1301 HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
1302 HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
1303 HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
1304 HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
1305 HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
1306 HINIC_PORT_STAT(mac_tx_pause_num),
1307 HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
1308 HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
1309 HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
1310 HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
1311 HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
1312 HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
1313 HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
1314 HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
1315 HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
1316 HINIC_PORT_STAT(mac_tx_control_pkt_num),
1317 HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
1318 HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
1319 HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
1320 };
1321
1322 #define HINIC_TXQ_STAT(_stat_item) { \
1323 .name = "txq%d_"#_stat_item, \
1324 .size = sizeof_field(struct hinic_txq_stats, _stat_item), \
1325 .offset = offsetof(struct hinic_txq_stats, _stat_item) \
1326 }
1327
1328 static struct hinic_stats hinic_tx_queue_stats[] = {
1329 HINIC_TXQ_STAT(pkts),
1330 HINIC_TXQ_STAT(bytes),
1331 HINIC_TXQ_STAT(tx_busy),
1332 HINIC_TXQ_STAT(tx_wake),
1333 HINIC_TXQ_STAT(tx_dropped),
1334 HINIC_TXQ_STAT(big_frags_pkts),
1335 };
1336
1337 #define HINIC_RXQ_STAT(_stat_item) { \
1338 .name = "rxq%d_"#_stat_item, \
1339 .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
1340 .offset = offsetof(struct hinic_rxq_stats, _stat_item) \
1341 }
1342
1343 static struct hinic_stats hinic_rx_queue_stats[] = {
1344 HINIC_RXQ_STAT(pkts),
1345 HINIC_RXQ_STAT(bytes),
1346 HINIC_RXQ_STAT(errors),
1347 HINIC_RXQ_STAT(csum_errors),
1348 HINIC_RXQ_STAT(other_errors),
1349 };
1350
get_drv_queue_stats(struct hinic_dev * nic_dev,u64 * data)1351 static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
1352 {
1353 struct hinic_txq_stats txq_stats;
1354 struct hinic_rxq_stats rxq_stats;
1355 u16 i = 0, j = 0, qid = 0;
1356 char *p;
1357
1358 for (qid = 0; qid < nic_dev->num_qps; qid++) {
1359 if (!nic_dev->txqs)
1360 break;
1361
1362 hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
1363 for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) {
1364 p = (char *)&txq_stats +
1365 hinic_tx_queue_stats[j].offset;
1366 data[i] = (hinic_tx_queue_stats[j].size ==
1367 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1368 }
1369 }
1370
1371 for (qid = 0; qid < nic_dev->num_qps; qid++) {
1372 if (!nic_dev->rxqs)
1373 break;
1374
1375 hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
1376 for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) {
1377 p = (char *)&rxq_stats +
1378 hinic_rx_queue_stats[j].offset;
1379 data[i] = (hinic_rx_queue_stats[j].size ==
1380 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1381 }
1382 }
1383 }
1384
hinic_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1385 static void hinic_get_ethtool_stats(struct net_device *netdev,
1386 struct ethtool_stats *stats, u64 *data)
1387 {
1388 struct hinic_dev *nic_dev = netdev_priv(netdev);
1389 struct hinic_vport_stats vport_stats = {0};
1390 struct hinic_phy_port_stats *port_stats;
1391 u16 i = 0, j = 0;
1392 char *p;
1393 int err;
1394
1395 err = hinic_get_vport_stats(nic_dev, &vport_stats);
1396 if (err)
1397 netif_err(nic_dev, drv, netdev,
1398 "Failed to get vport stats from firmware\n");
1399
1400 for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) {
1401 p = (char *)&vport_stats + hinic_function_stats[j].offset;
1402 data[i] = (hinic_function_stats[j].size ==
1403 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1404 }
1405
1406 port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
1407 if (!port_stats) {
1408 memset(&data[i], 0,
1409 ARRAY_SIZE(hinic_port_stats) * sizeof(*data));
1410 i += ARRAY_SIZE(hinic_port_stats);
1411 goto get_drv_stats;
1412 }
1413
1414 err = hinic_get_phy_port_stats(nic_dev, port_stats);
1415 if (err)
1416 netif_err(nic_dev, drv, netdev,
1417 "Failed to get port stats from firmware\n");
1418
1419 for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) {
1420 p = (char *)port_stats + hinic_port_stats[j].offset;
1421 data[i] = (hinic_port_stats[j].size ==
1422 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1423 }
1424
1425 kfree(port_stats);
1426
1427 get_drv_stats:
1428 get_drv_queue_stats(nic_dev, data + i);
1429 }
1430
hinic_get_sset_count(struct net_device * netdev,int sset)1431 static int hinic_get_sset_count(struct net_device *netdev, int sset)
1432 {
1433 struct hinic_dev *nic_dev = netdev_priv(netdev);
1434 int count, q_num;
1435
1436 switch (sset) {
1437 case ETH_SS_TEST:
1438 return ARRAY_SIZE(hinic_test_strings);
1439 case ETH_SS_STATS:
1440 q_num = nic_dev->num_qps;
1441 count = ARRAY_SIZE(hinic_function_stats) +
1442 (ARRAY_SIZE(hinic_tx_queue_stats) +
1443 ARRAY_SIZE(hinic_rx_queue_stats)) * q_num;
1444
1445 count += ARRAY_SIZE(hinic_port_stats);
1446
1447 return count;
1448 default:
1449 return -EOPNOTSUPP;
1450 }
1451 }
1452
hinic_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1453 static void hinic_get_strings(struct net_device *netdev,
1454 u32 stringset, u8 *data)
1455 {
1456 struct hinic_dev *nic_dev = netdev_priv(netdev);
1457 u16 i, j;
1458
1459 switch (stringset) {
1460 case ETH_SS_TEST:
1461 memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
1462 return;
1463 case ETH_SS_STATS:
1464 for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++)
1465 ethtool_puts(&data, hinic_function_stats[i].name);
1466
1467 for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++)
1468 ethtool_puts(&data, hinic_port_stats[i].name);
1469
1470 for (i = 0; i < nic_dev->num_qps; i++)
1471 for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++)
1472 ethtool_sprintf(&data, hinic_tx_queue_stats[j].name, i);
1473
1474 for (i = 0; i < nic_dev->num_qps; i++)
1475 for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++)
1476 ethtool_sprintf(&data, hinic_rx_queue_stats[j].name, i);
1477
1478 return;
1479 default:
1480 return;
1481 }
1482 }
1483
hinic_run_lp_test(struct hinic_dev * nic_dev,u32 test_time)1484 static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
1485 {
1486 u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
1487 struct net_device *netdev = nic_dev->netdev;
1488 struct sk_buff *skb_tmp = NULL;
1489 struct sk_buff *skb = NULL;
1490 u32 cnt = test_time * 5;
1491 u8 *test_data = NULL;
1492 u32 i;
1493 u8 j;
1494
1495 skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
1496 if (!skb_tmp)
1497 return -ENOMEM;
1498
1499 test_data = __skb_put(skb_tmp, LP_PKT_LEN);
1500
1501 memset(test_data, 0xFF, 2 * ETH_ALEN);
1502 test_data[ETH_ALEN] = 0xFE;
1503 test_data[2 * ETH_ALEN] = 0x08;
1504 test_data[2 * ETH_ALEN + 1] = 0x0;
1505
1506 for (i = ETH_HLEN; i < LP_PKT_LEN; i++)
1507 test_data[i] = i & 0xFF;
1508
1509 skb_tmp->queue_mapping = 0;
1510 skb_tmp->ip_summed = CHECKSUM_COMPLETE;
1511 skb_tmp->dev = netdev;
1512
1513 for (i = 0; i < cnt; i++) {
1514 nic_dev->lb_test_rx_idx = 0;
1515 memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
1516
1517 for (j = 0; j < LP_PKT_CNT; j++) {
1518 skb = pskb_copy(skb_tmp, GFP_ATOMIC);
1519 if (!skb) {
1520 dev_kfree_skb_any(skb_tmp);
1521 netif_err(nic_dev, drv, netdev,
1522 "Copy skb failed for loopback test\n");
1523 return -ENOMEM;
1524 }
1525
1526 /* mark index for every pkt */
1527 skb->data[LP_PKT_LEN - 1] = j;
1528
1529 if (hinic_lb_xmit_frame(skb, netdev)) {
1530 dev_kfree_skb_any(skb);
1531 dev_kfree_skb_any(skb_tmp);
1532 netif_err(nic_dev, drv, netdev,
1533 "Xmit pkt failed for loopback test\n");
1534 return -EBUSY;
1535 }
1536 }
1537
1538 /* wait till all pkts received to RX buffer */
1539 msleep(200);
1540
1541 for (j = 0; j < LP_PKT_CNT; j++) {
1542 if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN,
1543 skb_tmp->data, LP_PKT_LEN - 1) ||
1544 (*(lb_test_rx_buf + j * LP_PKT_LEN +
1545 LP_PKT_LEN - 1) != j)) {
1546 dev_kfree_skb_any(skb_tmp);
1547 netif_err(nic_dev, drv, netdev,
1548 "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n",
1549 j + i * LP_PKT_CNT,
1550 LP_PKT_LEN - 1,
1551 *(lb_test_rx_buf + j * LP_PKT_LEN +
1552 LP_PKT_LEN - 1));
1553 return -EIO;
1554 }
1555 }
1556 }
1557
1558 dev_kfree_skb_any(skb_tmp);
1559 return 0;
1560 }
1561
do_lp_test(struct hinic_dev * nic_dev,u32 flags,u32 test_time,enum diag_test_index * test_index)1562 static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time,
1563 enum diag_test_index *test_index)
1564 {
1565 struct net_device *netdev = nic_dev->netdev;
1566 u8 *lb_test_rx_buf = NULL;
1567 int err = 0;
1568
1569 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
1570 *test_index = INTERNAL_LP_TEST;
1571 if (hinic_set_loopback_mode(nic_dev->hwdev,
1572 HINIC_INTERNAL_LP_MODE, true)) {
1573 netif_err(nic_dev, drv, netdev,
1574 "Failed to set port loopback mode before loopback test\n");
1575 return -EIO;
1576 }
1577 } else {
1578 *test_index = EXTERNAL_LP_TEST;
1579 }
1580
1581 lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
1582 if (!lb_test_rx_buf) {
1583 err = -ENOMEM;
1584 } else {
1585 nic_dev->lb_test_rx_buf = lb_test_rx_buf;
1586 nic_dev->lb_pkt_len = LP_PKT_LEN;
1587 nic_dev->flags |= HINIC_LP_TEST;
1588 err = hinic_run_lp_test(nic_dev, test_time);
1589 nic_dev->flags &= ~HINIC_LP_TEST;
1590 msleep(100);
1591 vfree(lb_test_rx_buf);
1592 nic_dev->lb_test_rx_buf = NULL;
1593 }
1594
1595 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
1596 if (hinic_set_loopback_mode(nic_dev->hwdev,
1597 HINIC_INTERNAL_LP_MODE, false)) {
1598 netif_err(nic_dev, drv, netdev,
1599 "Failed to cancel port loopback mode after loopback test\n");
1600 err = -EIO;
1601 }
1602 }
1603
1604 return err;
1605 }
1606
hinic_diag_test(struct net_device * netdev,struct ethtool_test * eth_test,u64 * data)1607 static void hinic_diag_test(struct net_device *netdev,
1608 struct ethtool_test *eth_test, u64 *data)
1609 {
1610 struct hinic_dev *nic_dev = netdev_priv(netdev);
1611 enum hinic_port_link_state link_state;
1612 enum diag_test_index test_index = 0;
1613 int err = 0;
1614
1615 memset(data, 0, DIAG_TEST_MAX * sizeof(u64));
1616
1617 /* don't support loopback test when netdev is closed. */
1618 if (!(nic_dev->flags & HINIC_INTF_UP)) {
1619 netif_err(nic_dev, drv, netdev,
1620 "Do not support loopback test when netdev is closed\n");
1621 eth_test->flags |= ETH_TEST_FL_FAILED;
1622 data[PORT_DOWN_ERR_IDX] = 1;
1623 return;
1624 }
1625
1626 netif_carrier_off(netdev);
1627 netif_tx_disable(netdev);
1628
1629 err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
1630 &test_index);
1631 if (err) {
1632 eth_test->flags |= ETH_TEST_FL_FAILED;
1633 data[test_index] = 1;
1634 }
1635
1636 netif_tx_wake_all_queues(netdev);
1637
1638 err = hinic_port_link_state(nic_dev, &link_state);
1639 if (!err && link_state == HINIC_LINK_STATE_UP)
1640 netif_carrier_on(netdev);
1641 }
1642
hinic_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)1643 static int hinic_set_phys_id(struct net_device *netdev,
1644 enum ethtool_phys_id_state state)
1645 {
1646 struct hinic_dev *nic_dev = netdev_priv(netdev);
1647 int err = 0;
1648 u8 port;
1649
1650 port = nic_dev->hwdev->port_id;
1651
1652 switch (state) {
1653 case ETHTOOL_ID_ACTIVE:
1654 err = hinic_set_led_status(nic_dev->hwdev, port,
1655 HINIC_LED_TYPE_LINK,
1656 HINIC_LED_MODE_FORCE_2HZ);
1657 if (err)
1658 netif_err(nic_dev, drv, netdev,
1659 "Set LED blinking in 2HZ failed\n");
1660 break;
1661
1662 case ETHTOOL_ID_INACTIVE:
1663 err = hinic_reset_led_status(nic_dev->hwdev, port);
1664 if (err)
1665 netif_err(nic_dev, drv, netdev,
1666 "Reset LED to original status failed\n");
1667 break;
1668
1669 default:
1670 return -EOPNOTSUPP;
1671 }
1672
1673 return err;
1674 }
1675
hinic_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)1676 static int hinic_get_module_info(struct net_device *netdev,
1677 struct ethtool_modinfo *modinfo)
1678 {
1679 struct hinic_dev *nic_dev = netdev_priv(netdev);
1680 u8 sfp_type_ext;
1681 u8 sfp_type;
1682 int err;
1683
1684 err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
1685 if (err)
1686 return err;
1687
1688 switch (sfp_type) {
1689 case SFF8024_ID_SFP:
1690 modinfo->type = ETH_MODULE_SFF_8472;
1691 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1692 break;
1693 case SFF8024_ID_QSFP_8438:
1694 modinfo->type = ETH_MODULE_SFF_8436;
1695 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1696 break;
1697 case SFF8024_ID_QSFP_8436_8636:
1698 if (sfp_type_ext >= 0x3) {
1699 modinfo->type = ETH_MODULE_SFF_8636;
1700 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1701
1702 } else {
1703 modinfo->type = ETH_MODULE_SFF_8436;
1704 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1705 }
1706 break;
1707 case SFF8024_ID_QSFP28_8636:
1708 modinfo->type = ETH_MODULE_SFF_8636;
1709 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1710 break;
1711 default:
1712 netif_warn(nic_dev, drv, netdev,
1713 "Optical module unknown: 0x%x\n", sfp_type);
1714 return -EINVAL;
1715 }
1716
1717 return 0;
1718 }
1719
hinic_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1720 static int hinic_get_module_eeprom(struct net_device *netdev,
1721 struct ethtool_eeprom *ee, u8 *data)
1722 {
1723 struct hinic_dev *nic_dev = netdev_priv(netdev);
1724 u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
1725 u16 len;
1726 int err;
1727
1728 if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
1729 return -EINVAL;
1730
1731 memset(data, 0, ee->len);
1732
1733 err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
1734 if (err)
1735 return err;
1736
1737 memcpy(data, sfp_data + ee->offset, ee->len);
1738
1739 return 0;
1740 }
1741
1742 static int
hinic_get_link_ext_state(struct net_device * netdev,struct ethtool_link_ext_state_info * link_ext_state_info)1743 hinic_get_link_ext_state(struct net_device *netdev,
1744 struct ethtool_link_ext_state_info *link_ext_state_info)
1745 {
1746 struct hinic_dev *nic_dev = netdev_priv(netdev);
1747
1748 if (netif_carrier_ok(netdev))
1749 return -ENODATA;
1750
1751 if (nic_dev->cable_unplugged)
1752 link_ext_state_info->link_ext_state =
1753 ETHTOOL_LINK_EXT_STATE_NO_CABLE;
1754 else if (nic_dev->module_unrecognized)
1755 link_ext_state_info->link_ext_state =
1756 ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH;
1757
1758 return 0;
1759 }
1760
1761 static const struct ethtool_ops hinic_ethtool_ops = {
1762 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1763 ETHTOOL_COALESCE_RX_MAX_FRAMES |
1764 ETHTOOL_COALESCE_TX_USECS |
1765 ETHTOOL_COALESCE_TX_MAX_FRAMES,
1766
1767 .get_link_ksettings = hinic_get_link_ksettings,
1768 .set_link_ksettings = hinic_set_link_ksettings,
1769 .get_drvinfo = hinic_get_drvinfo,
1770 .get_link = ethtool_op_get_link,
1771 .get_link_ext_state = hinic_get_link_ext_state,
1772 .get_ringparam = hinic_get_ringparam,
1773 .set_ringparam = hinic_set_ringparam,
1774 .get_coalesce = hinic_get_coalesce,
1775 .set_coalesce = hinic_set_coalesce,
1776 .get_per_queue_coalesce = hinic_get_per_queue_coalesce,
1777 .set_per_queue_coalesce = hinic_set_per_queue_coalesce,
1778 .get_pauseparam = hinic_get_pauseparam,
1779 .set_pauseparam = hinic_set_pauseparam,
1780 .get_channels = hinic_get_channels,
1781 .set_channels = hinic_set_channels,
1782 .get_rxnfc = hinic_get_rxnfc,
1783 .get_rxfh_key_size = hinic_get_rxfh_key_size,
1784 .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
1785 .get_rxfh = hinic_get_rxfh,
1786 .set_rxfh = hinic_set_rxfh,
1787 .get_rxfh_fields = hinic_get_rxfh_fields,
1788 .set_rxfh_fields = hinic_set_rxfh_fields,
1789 .get_sset_count = hinic_get_sset_count,
1790 .get_ethtool_stats = hinic_get_ethtool_stats,
1791 .get_strings = hinic_get_strings,
1792 .self_test = hinic_diag_test,
1793 .set_phys_id = hinic_set_phys_id,
1794 .get_module_info = hinic_get_module_info,
1795 .get_module_eeprom = hinic_get_module_eeprom,
1796 };
1797
1798 static const struct ethtool_ops hinicvf_ethtool_ops = {
1799 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1800 ETHTOOL_COALESCE_RX_MAX_FRAMES |
1801 ETHTOOL_COALESCE_TX_USECS |
1802 ETHTOOL_COALESCE_TX_MAX_FRAMES,
1803
1804 .get_link_ksettings = hinic_get_link_ksettings,
1805 .get_drvinfo = hinic_get_drvinfo,
1806 .get_link = ethtool_op_get_link,
1807 .get_ringparam = hinic_get_ringparam,
1808 .set_ringparam = hinic_set_ringparam,
1809 .get_coalesce = hinic_get_coalesce,
1810 .set_coalesce = hinic_set_coalesce,
1811 .get_per_queue_coalesce = hinic_get_per_queue_coalesce,
1812 .set_per_queue_coalesce = hinic_set_per_queue_coalesce,
1813 .get_channels = hinic_get_channels,
1814 .set_channels = hinic_set_channels,
1815 .get_rxnfc = hinic_get_rxnfc,
1816 .get_rxfh_key_size = hinic_get_rxfh_key_size,
1817 .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
1818 .get_rxfh = hinic_get_rxfh,
1819 .set_rxfh = hinic_set_rxfh,
1820 .get_rxfh_fields = hinic_get_rxfh_fields,
1821 .set_rxfh_fields = hinic_set_rxfh_fields,
1822 .get_sset_count = hinic_get_sset_count,
1823 .get_ethtool_stats = hinic_get_ethtool_stats,
1824 .get_strings = hinic_get_strings,
1825 };
1826
hinic_set_ethtool_ops(struct net_device * netdev)1827 void hinic_set_ethtool_ops(struct net_device *netdev)
1828 {
1829 struct hinic_dev *nic_dev = netdev_priv(netdev);
1830
1831 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
1832 netdev->ethtool_ops = &hinic_ethtool_ops;
1833 else
1834 netdev->ethtool_ops = &hinicvf_ethtool_ops;
1835 }
1836