1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2
3 #include <linux/ethtool.h>
4 #include <linux/linkmode.h>
5 #include <linux/netdevice.h>
6 #include <linux/nvme.h>
7 #include <linux/io.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/pci.h>
10 #include <linux/rtnetlink.h>
11 #include "funeth.h"
12 #include "fun_port.h"
13 #include "funeth_txrx.h"
14
15 /* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K
16 * pages is 8. Require it for all types of queues though some could work with
17 * fewer entries.
18 */
19 #define FUNETH_MIN_QDEPTH 8
20
21 static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = {
22 "mac_tx_octets_total",
23 "mac_tx_frames_total",
24 "mac_tx_vlan_frames_ok",
25 "mac_tx_unicast_frames",
26 "mac_tx_multicast_frames",
27 "mac_tx_broadcast_frames",
28 "mac_tx_errors",
29 "mac_tx_CBFCPAUSE0",
30 "mac_tx_CBFCPAUSE1",
31 "mac_tx_CBFCPAUSE2",
32 "mac_tx_CBFCPAUSE3",
33 "mac_tx_CBFCPAUSE4",
34 "mac_tx_CBFCPAUSE5",
35 "mac_tx_CBFCPAUSE6",
36 "mac_tx_CBFCPAUSE7",
37 "mac_tx_CBFCPAUSE8",
38 "mac_tx_CBFCPAUSE9",
39 "mac_tx_CBFCPAUSE10",
40 "mac_tx_CBFCPAUSE11",
41 "mac_tx_CBFCPAUSE12",
42 "mac_tx_CBFCPAUSE13",
43 "mac_tx_CBFCPAUSE14",
44 "mac_tx_CBFCPAUSE15",
45 };
46
47 static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = {
48 "mac_rx_octets_total",
49 "mac_rx_frames_total",
50 "mac_rx_VLAN_frames_ok",
51 "mac_rx_unicast_frames",
52 "mac_rx_multicast_frames",
53 "mac_rx_broadcast_frames",
54 "mac_rx_drop_events",
55 "mac_rx_errors",
56 "mac_rx_alignment_errors",
57 "mac_rx_CBFCPAUSE0",
58 "mac_rx_CBFCPAUSE1",
59 "mac_rx_CBFCPAUSE2",
60 "mac_rx_CBFCPAUSE3",
61 "mac_rx_CBFCPAUSE4",
62 "mac_rx_CBFCPAUSE5",
63 "mac_rx_CBFCPAUSE6",
64 "mac_rx_CBFCPAUSE7",
65 "mac_rx_CBFCPAUSE8",
66 "mac_rx_CBFCPAUSE9",
67 "mac_rx_CBFCPAUSE10",
68 "mac_rx_CBFCPAUSE11",
69 "mac_rx_CBFCPAUSE12",
70 "mac_rx_CBFCPAUSE13",
71 "mac_rx_CBFCPAUSE14",
72 "mac_rx_CBFCPAUSE15",
73 };
74
75 static const char * const txq_stat_names[] = {
76 "tx_pkts",
77 "tx_bytes",
78 "tx_cso",
79 "tx_tso",
80 "tx_encapsulated_tso",
81 "tx_uso",
82 "tx_more",
83 "tx_queue_stops",
84 "tx_queue_restarts",
85 "tx_mapping_errors",
86 "tx_tls_encrypted_packets",
87 "tx_tls_encrypted_bytes",
88 "tx_tls_ooo",
89 "tx_tls_drop_no_sync_data",
90 };
91
92 static const char * const xdpq_stat_names[] = {
93 "tx_xdp_pkts",
94 "tx_xdp_bytes",
95 "tx_xdp_full",
96 "tx_xdp_mapping_errors",
97 };
98
99 static const char * const rxq_stat_names[] = {
100 "rx_pkts",
101 "rx_bytes",
102 "rx_cso",
103 "gro_pkts",
104 "gro_merged",
105 "rx_xdp_tx",
106 "rx_xdp_redir",
107 "rx_xdp_drops",
108 "rx_buffers",
109 "rx_page_allocs",
110 "rx_drops",
111 "rx_budget_exhausted",
112 "rx_mapping_errors",
113 };
114
115 static const char * const tls_stat_names[] = {
116 "tx_tls_ctx",
117 "tx_tls_del",
118 "tx_tls_resync",
119 };
120
fun_link_modes_to_ethtool(u64 modes,unsigned long * ethtool_modes_map)121 static void fun_link_modes_to_ethtool(u64 modes,
122 unsigned long *ethtool_modes_map)
123 {
124 #define ADD_LINK_MODE(mode) \
125 __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map)
126
127 if (modes & FUN_PORT_CAP_AUTONEG)
128 ADD_LINK_MODE(Autoneg);
129 if (modes & FUN_PORT_CAP_1000_X)
130 ADD_LINK_MODE(1000baseX_Full);
131 if (modes & FUN_PORT_CAP_10G_R) {
132 ADD_LINK_MODE(10000baseCR_Full);
133 ADD_LINK_MODE(10000baseSR_Full);
134 ADD_LINK_MODE(10000baseLR_Full);
135 ADD_LINK_MODE(10000baseER_Full);
136 }
137 if (modes & FUN_PORT_CAP_25G_R) {
138 ADD_LINK_MODE(25000baseCR_Full);
139 ADD_LINK_MODE(25000baseSR_Full);
140 }
141 if (modes & FUN_PORT_CAP_40G_R4) {
142 ADD_LINK_MODE(40000baseCR4_Full);
143 ADD_LINK_MODE(40000baseSR4_Full);
144 ADD_LINK_MODE(40000baseLR4_Full);
145 }
146 if (modes & FUN_PORT_CAP_50G_R2) {
147 ADD_LINK_MODE(50000baseCR2_Full);
148 ADD_LINK_MODE(50000baseSR2_Full);
149 }
150 if (modes & FUN_PORT_CAP_50G_R) {
151 ADD_LINK_MODE(50000baseCR_Full);
152 ADD_LINK_MODE(50000baseSR_Full);
153 ADD_LINK_MODE(50000baseLR_ER_FR_Full);
154 }
155 if (modes & FUN_PORT_CAP_100G_R4) {
156 ADD_LINK_MODE(100000baseCR4_Full);
157 ADD_LINK_MODE(100000baseSR4_Full);
158 ADD_LINK_MODE(100000baseLR4_ER4_Full);
159 }
160 if (modes & FUN_PORT_CAP_100G_R2) {
161 ADD_LINK_MODE(100000baseCR2_Full);
162 ADD_LINK_MODE(100000baseSR2_Full);
163 ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full);
164 }
165 if (modes & FUN_PORT_CAP_FEC_NONE)
166 ADD_LINK_MODE(FEC_NONE);
167 if (modes & FUN_PORT_CAP_FEC_FC)
168 ADD_LINK_MODE(FEC_BASER);
169 if (modes & FUN_PORT_CAP_FEC_RS)
170 ADD_LINK_MODE(FEC_RS);
171 if (modes & FUN_PORT_CAP_RX_PAUSE)
172 ADD_LINK_MODE(Pause);
173
174 #undef ADD_LINK_MODE
175 }
176
set_asym_pause(u64 advertising,struct ethtool_link_ksettings * ks)177 static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks)
178 {
179 bool rx_pause, tx_pause;
180
181 rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE;
182 tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE;
183 if (tx_pause ^ rx_pause)
184 ethtool_link_ksettings_add_link_mode(ks, advertising,
185 Asym_Pause);
186 }
187
fun_port_type(unsigned int xcvr)188 static unsigned int fun_port_type(unsigned int xcvr)
189 {
190 if (!xcvr)
191 return PORT_NONE;
192
193 switch (xcvr & 7) {
194 case FUN_XCVR_BASET:
195 return PORT_TP;
196 case FUN_XCVR_CU:
197 return PORT_DA;
198 default:
199 return PORT_FIBRE;
200 }
201 }
202
fun_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ks)203 static int fun_get_link_ksettings(struct net_device *netdev,
204 struct ethtool_link_ksettings *ks)
205 {
206 const struct funeth_priv *fp = netdev_priv(netdev);
207 unsigned int seq, speed, xcvr;
208 u64 lp_advertising;
209 bool link_up;
210
211 ethtool_link_ksettings_zero_link_mode(ks, supported);
212 ethtool_link_ksettings_zero_link_mode(ks, advertising);
213 ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
214
215 /* Link settings change asynchronously, take a consistent snapshot */
216 do {
217 seq = read_seqcount_begin(&fp->link_seq);
218 link_up = netif_carrier_ok(netdev);
219 speed = fp->link_speed;
220 xcvr = fp->xcvr_type;
221 lp_advertising = fp->lp_advertising;
222 } while (read_seqcount_retry(&fp->link_seq, seq));
223
224 if (link_up) {
225 ks->base.speed = speed;
226 ks->base.duplex = DUPLEX_FULL;
227 fun_link_modes_to_ethtool(lp_advertising,
228 ks->link_modes.lp_advertising);
229 } else {
230 ks->base.speed = SPEED_UNKNOWN;
231 ks->base.duplex = DUPLEX_UNKNOWN;
232 }
233
234 ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ?
235 AUTONEG_ENABLE : AUTONEG_DISABLE;
236 ks->base.port = fun_port_type(xcvr);
237
238 fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported);
239 if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE))
240 ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
241
242 fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising);
243 set_asym_pause(fp->advertising, ks);
244 return 0;
245 }
246
fun_advert_modes(const struct ethtool_link_ksettings * ks)247 static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks)
248 {
249 u64 modes = 0;
250
251 #define HAS_MODE(mode) \
252 ethtool_link_ksettings_test_link_mode(ks, advertising, mode)
253
254 if (HAS_MODE(1000baseX_Full))
255 modes |= FUN_PORT_CAP_1000_X;
256 if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) ||
257 HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full))
258 modes |= FUN_PORT_CAP_10G_R;
259 if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full))
260 modes |= FUN_PORT_CAP_25G_R;
261 if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) ||
262 HAS_MODE(40000baseLR4_Full))
263 modes |= FUN_PORT_CAP_40G_R4;
264 if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full))
265 modes |= FUN_PORT_CAP_50G_R2;
266 if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) ||
267 HAS_MODE(50000baseLR_ER_FR_Full))
268 modes |= FUN_PORT_CAP_50G_R;
269 if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) ||
270 HAS_MODE(100000baseLR4_ER4_Full))
271 modes |= FUN_PORT_CAP_100G_R4;
272 if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) ||
273 HAS_MODE(100000baseLR2_ER2_FR2_Full))
274 modes |= FUN_PORT_CAP_100G_R2;
275
276 return modes;
277 #undef HAS_MODE
278 }
279
fun_speed_to_link_mode(unsigned int speed)280 static u64 fun_speed_to_link_mode(unsigned int speed)
281 {
282 switch (speed) {
283 case SPEED_100000:
284 return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2;
285 case SPEED_50000:
286 return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2;
287 case SPEED_40000:
288 return FUN_PORT_CAP_40G_R4;
289 case SPEED_25000:
290 return FUN_PORT_CAP_25G_R;
291 case SPEED_10000:
292 return FUN_PORT_CAP_10G_R;
293 case SPEED_1000:
294 return FUN_PORT_CAP_1000_X;
295 default:
296 return 0;
297 }
298 }
299
fun_change_advert(struct funeth_priv * fp,u64 new_advert)300 static int fun_change_advert(struct funeth_priv *fp, u64 new_advert)
301 {
302 int err;
303
304 if (new_advert == fp->advertising)
305 return 0;
306
307 err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert);
308 if (!err)
309 fp->advertising = new_advert;
310 return err;
311 }
312
313 #define FUN_PORT_CAP_FEC_MASK \
314 (FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS)
315
fun_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * ks)316 static int fun_set_link_ksettings(struct net_device *netdev,
317 const struct ethtool_link_ksettings *ks)
318 {
319 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
320 struct funeth_priv *fp = netdev_priv(netdev);
321 u64 new_advert;
322
323 /* eswitch ports don't support mode changes */
324 if (fp->port_caps & FUN_PORT_CAP_VPORT)
325 return -EOPNOTSUPP;
326
327 if (ks->base.duplex == DUPLEX_HALF)
328 return -EINVAL;
329 if (ks->base.autoneg == AUTONEG_ENABLE &&
330 !(fp->port_caps & FUN_PORT_CAP_AUTONEG))
331 return -EINVAL;
332
333 if (ks->base.autoneg == AUTONEG_ENABLE) {
334 if (linkmode_empty(ks->link_modes.advertising))
335 return -EINVAL;
336
337 fun_link_modes_to_ethtool(fp->port_caps, supported);
338 if (!linkmode_subset(ks->link_modes.advertising, supported))
339 return -EINVAL;
340
341 new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG;
342 } else {
343 new_advert = fun_speed_to_link_mode(ks->base.speed);
344 new_advert &= fp->port_caps;
345 if (!new_advert)
346 return -EINVAL;
347 }
348 new_advert |= fp->advertising &
349 (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK);
350
351 return fun_change_advert(fp, new_advert);
352 }
353
fun_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)354 static void fun_get_pauseparam(struct net_device *netdev,
355 struct ethtool_pauseparam *pause)
356 {
357 const struct funeth_priv *fp = netdev_priv(netdev);
358 u8 active_pause = fp->active_fc;
359
360 pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE);
361 pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE);
362 pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG);
363 }
364
fun_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)365 static int fun_set_pauseparam(struct net_device *netdev,
366 struct ethtool_pauseparam *pause)
367 {
368 struct funeth_priv *fp = netdev_priv(netdev);
369 u64 new_advert;
370
371 if (fp->port_caps & FUN_PORT_CAP_VPORT)
372 return -EOPNOTSUPP;
373 /* Forcing PAUSE settings with AN enabled is unsupported. */
374 if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG))
375 return -EOPNOTSUPP;
376 if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG))
377 return -EINVAL;
378 if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE))
379 return -EINVAL;
380 if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE))
381 return -EINVAL;
382
383 new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK;
384 if (pause->tx_pause)
385 new_advert |= FUN_PORT_CAP_TX_PAUSE;
386 if (pause->rx_pause)
387 new_advert |= FUN_PORT_CAP_RX_PAUSE;
388
389 return fun_change_advert(fp, new_advert);
390 }
391
fun_restart_an(struct net_device * netdev)392 static int fun_restart_an(struct net_device *netdev)
393 {
394 struct funeth_priv *fp = netdev_priv(netdev);
395
396 if (!(fp->advertising & FUN_PORT_CAP_AUTONEG))
397 return -EOPNOTSUPP;
398
399 return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT,
400 FUN_PORT_CAP_AUTONEG);
401 }
402
fun_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)403 static int fun_set_phys_id(struct net_device *netdev,
404 enum ethtool_phys_id_state state)
405 {
406 struct funeth_priv *fp = netdev_priv(netdev);
407 unsigned int beacon;
408
409 if (fp->port_caps & FUN_PORT_CAP_VPORT)
410 return -EOPNOTSUPP;
411 if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE)
412 return -EOPNOTSUPP;
413
414 beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON :
415 FUN_PORT_LED_BEACON_OFF;
416 return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon);
417 }
418
fun_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)419 static void fun_get_drvinfo(struct net_device *netdev,
420 struct ethtool_drvinfo *info)
421 {
422 const struct funeth_priv *fp = netdev_priv(netdev);
423
424 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
425 strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info));
426 }
427
fun_get_msglevel(struct net_device * netdev)428 static u32 fun_get_msglevel(struct net_device *netdev)
429 {
430 const struct funeth_priv *fp = netdev_priv(netdev);
431
432 return fp->msg_enable;
433 }
434
fun_set_msglevel(struct net_device * netdev,u32 value)435 static void fun_set_msglevel(struct net_device *netdev, u32 value)
436 {
437 struct funeth_priv *fp = netdev_priv(netdev);
438
439 fp->msg_enable = value;
440 }
441
fun_get_regs_len(struct net_device * dev)442 static int fun_get_regs_len(struct net_device *dev)
443 {
444 return NVME_REG_ACQ + sizeof(u64);
445 }
446
fun_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)447 static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs,
448 void *buf)
449 {
450 const struct funeth_priv *fp = netdev_priv(dev);
451 void __iomem *bar = fp->fdev->bar;
452
453 regs->version = 0;
454 *(u64 *)(buf + NVME_REG_CAP) = readq(bar + NVME_REG_CAP);
455 *(u32 *)(buf + NVME_REG_VS) = readl(bar + NVME_REG_VS);
456 *(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS);
457 *(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC);
458 *(u32 *)(buf + NVME_REG_CC) = readl(bar + NVME_REG_CC);
459 *(u32 *)(buf + NVME_REG_CSTS) = readl(bar + NVME_REG_CSTS);
460 *(u32 *)(buf + NVME_REG_AQA) = readl(bar + NVME_REG_AQA);
461 *(u64 *)(buf + NVME_REG_ASQ) = readq(bar + NVME_REG_ASQ);
462 *(u64 *)(buf + NVME_REG_ACQ) = readq(bar + NVME_REG_ACQ);
463 }
464
fun_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kcoal,struct netlink_ext_ack * ext_ack)465 static int fun_get_coalesce(struct net_device *netdev,
466 struct ethtool_coalesce *coal,
467 struct kernel_ethtool_coalesce *kcoal,
468 struct netlink_ext_ack *ext_ack)
469 {
470 const struct funeth_priv *fp = netdev_priv(netdev);
471
472 coal->rx_coalesce_usecs = fp->rx_coal_usec;
473 coal->rx_max_coalesced_frames = fp->rx_coal_count;
474 coal->use_adaptive_rx_coalesce = !fp->cq_irq_db;
475 coal->tx_coalesce_usecs = fp->tx_coal_usec;
476 coal->tx_max_coalesced_frames = fp->tx_coal_count;
477 return 0;
478 }
479
fun_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kcoal,struct netlink_ext_ack * ext_ack)480 static int fun_set_coalesce(struct net_device *netdev,
481 struct ethtool_coalesce *coal,
482 struct kernel_ethtool_coalesce *kcoal,
483 struct netlink_ext_ack *ext_ack)
484 {
485 struct funeth_priv *fp = netdev_priv(netdev);
486 struct funeth_rxq **rxqs;
487 unsigned int i, db_val;
488
489 if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
490 coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
491 (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 ||
492 coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
493 coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
494 (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0)
495 return -EINVAL;
496
497 /* a timer is required if there's any coalescing */
498 if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) ||
499 (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs))
500 return -EINVAL;
501
502 fp->rx_coal_usec = coal->rx_coalesce_usecs;
503 fp->rx_coal_count = coal->rx_max_coalesced_frames;
504 fp->tx_coal_usec = coal->tx_coalesce_usecs;
505 fp->tx_coal_count = coal->tx_max_coalesced_frames;
506
507 db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
508 WRITE_ONCE(fp->cq_irq_db, db_val);
509
510 rxqs = rtnl_dereference(fp->rxqs);
511 if (!rxqs)
512 return 0;
513
514 for (i = 0; i < netdev->real_num_rx_queues; i++)
515 WRITE_ONCE(rxqs[i]->irq_db_val, db_val);
516
517 db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count);
518 for (i = 0; i < netdev->real_num_tx_queues; i++)
519 WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val);
520
521 return 0;
522 }
523
fun_get_channels(struct net_device * netdev,struct ethtool_channels * chan)524 static void fun_get_channels(struct net_device *netdev,
525 struct ethtool_channels *chan)
526 {
527 chan->max_rx = netdev->num_rx_queues;
528 chan->rx_count = netdev->real_num_rx_queues;
529
530 chan->max_tx = netdev->num_tx_queues;
531 chan->tx_count = netdev->real_num_tx_queues;
532 }
533
fun_set_channels(struct net_device * netdev,struct ethtool_channels * chan)534 static int fun_set_channels(struct net_device *netdev,
535 struct ethtool_channels *chan)
536 {
537 if (!chan->tx_count || !chan->rx_count)
538 return -EINVAL;
539
540 if (chan->tx_count == netdev->real_num_tx_queues &&
541 chan->rx_count == netdev->real_num_rx_queues)
542 return 0;
543
544 if (netif_running(netdev))
545 return fun_change_num_queues(netdev, chan->tx_count,
546 chan->rx_count);
547
548 fun_set_ring_count(netdev, chan->tx_count, chan->rx_count);
549 return 0;
550 }
551
fun_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * extack)552 static void fun_get_ringparam(struct net_device *netdev,
553 struct ethtool_ringparam *ring,
554 struct kernel_ethtool_ringparam *kring,
555 struct netlink_ext_ack *extack)
556 {
557 const struct funeth_priv *fp = netdev_priv(netdev);
558 unsigned int max_depth = fp->fdev->q_depth;
559
560 /* We size CQs to be twice the RQ depth so max RQ depth is half the
561 * max queue depth.
562 */
563 ring->rx_max_pending = max_depth / 2;
564 ring->tx_max_pending = max_depth;
565
566 ring->rx_pending = fp->rq_depth;
567 ring->tx_pending = fp->sq_depth;
568
569 kring->rx_buf_len = PAGE_SIZE;
570 kring->cqe_size = FUNETH_CQE_SIZE;
571 }
572
fun_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * extack)573 static int fun_set_ringparam(struct net_device *netdev,
574 struct ethtool_ringparam *ring,
575 struct kernel_ethtool_ringparam *kring,
576 struct netlink_ext_ack *extack)
577 {
578 struct funeth_priv *fp = netdev_priv(netdev);
579 int rc;
580
581 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
582 return -EINVAL;
583
584 /* queue depths must be powers-of-2 */
585 if (!is_power_of_2(ring->rx_pending) ||
586 !is_power_of_2(ring->tx_pending))
587 return -EINVAL;
588
589 if (ring->rx_pending < FUNETH_MIN_QDEPTH ||
590 ring->tx_pending < FUNETH_MIN_QDEPTH)
591 return -EINVAL;
592
593 if (fp->sq_depth == ring->tx_pending &&
594 fp->rq_depth == ring->rx_pending)
595 return 0;
596
597 if (netif_running(netdev)) {
598 struct fun_qset req = {
599 .cq_depth = 2 * ring->rx_pending,
600 .rq_depth = ring->rx_pending,
601 .sq_depth = ring->tx_pending
602 };
603
604 rc = fun_replace_queues(netdev, &req, extack);
605 if (rc)
606 return rc;
607 }
608
609 fp->sq_depth = ring->tx_pending;
610 fp->rq_depth = ring->rx_pending;
611 fp->cq_depth = 2 * fp->rq_depth;
612 return 0;
613 }
614
fun_get_sset_count(struct net_device * dev,int sset)615 static int fun_get_sset_count(struct net_device *dev, int sset)
616 {
617 const struct funeth_priv *fp = netdev_priv(dev);
618 int n;
619
620 switch (sset) {
621 case ETH_SS_STATS:
622 n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) +
623 (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) +
624 (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) +
625 ARRAY_SIZE(tls_stat_names);
626 if (fp->port_caps & FUN_PORT_CAP_STATS) {
627 n += ARRAY_SIZE(mac_tx_stat_names) +
628 ARRAY_SIZE(mac_rx_stat_names);
629 }
630 return n;
631 default:
632 break;
633 }
634 return 0;
635 }
636
fun_get_strings(struct net_device * netdev,u32 sset,u8 * data)637 static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
638 {
639 const struct funeth_priv *fp = netdev_priv(netdev);
640 unsigned int i, j;
641 u8 *p = data;
642
643 switch (sset) {
644 case ETH_SS_STATS:
645 if (fp->port_caps & FUN_PORT_CAP_STATS) {
646 memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names));
647 p += sizeof(mac_tx_stat_names);
648 memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names));
649 p += sizeof(mac_rx_stat_names);
650 }
651
652 for (i = 0; i < netdev->real_num_tx_queues; i++) {
653 for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
654 ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j],
655 i);
656 }
657 for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
658 ethtool_puts(&p, txq_stat_names[j]);
659
660 for (i = 0; i < fp->num_xdpqs; i++) {
661 for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
662 ethtool_sprintf(&p, "%s[%u]",
663 xdpq_stat_names[j], i);
664 }
665 for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
666 ethtool_puts(&p, xdpq_stat_names[j]);
667
668 for (i = 0; i < netdev->real_num_rx_queues; i++) {
669 for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
670 ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j],
671 i);
672 }
673 for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
674 ethtool_puts(&p, rxq_stat_names[j]);
675
676 for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
677 ethtool_puts(&p, tls_stat_names[j]);
678 break;
679 default:
680 break;
681 }
682 }
683
get_mac_stats(const struct funeth_priv * fp,u64 * data)684 static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data)
685 {
686 #define TX_STAT(s) \
687 *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
688
689 TX_STAT(etherStatsOctets);
690 TX_STAT(etherStatsPkts);
691 TX_STAT(VLANTransmittedOK);
692 TX_STAT(ifOutUcastPkts);
693 TX_STAT(ifOutMulticastPkts);
694 TX_STAT(ifOutBroadcastPkts);
695 TX_STAT(ifOutErrors);
696 TX_STAT(CBFCPAUSEFramesTransmitted_0);
697 TX_STAT(CBFCPAUSEFramesTransmitted_1);
698 TX_STAT(CBFCPAUSEFramesTransmitted_2);
699 TX_STAT(CBFCPAUSEFramesTransmitted_3);
700 TX_STAT(CBFCPAUSEFramesTransmitted_4);
701 TX_STAT(CBFCPAUSEFramesTransmitted_5);
702 TX_STAT(CBFCPAUSEFramesTransmitted_6);
703 TX_STAT(CBFCPAUSEFramesTransmitted_7);
704 TX_STAT(CBFCPAUSEFramesTransmitted_8);
705 TX_STAT(CBFCPAUSEFramesTransmitted_9);
706 TX_STAT(CBFCPAUSEFramesTransmitted_10);
707 TX_STAT(CBFCPAUSEFramesTransmitted_11);
708 TX_STAT(CBFCPAUSEFramesTransmitted_12);
709 TX_STAT(CBFCPAUSEFramesTransmitted_13);
710 TX_STAT(CBFCPAUSEFramesTransmitted_14);
711 TX_STAT(CBFCPAUSEFramesTransmitted_15);
712
713 #define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s])
714
715 RX_STAT(etherStatsOctets);
716 RX_STAT(etherStatsPkts);
717 RX_STAT(VLANReceivedOK);
718 RX_STAT(ifInUcastPkts);
719 RX_STAT(ifInMulticastPkts);
720 RX_STAT(ifInBroadcastPkts);
721 RX_STAT(etherStatsDropEvents);
722 RX_STAT(ifInErrors);
723 RX_STAT(aAlignmentErrors);
724 RX_STAT(CBFCPAUSEFramesReceived_0);
725 RX_STAT(CBFCPAUSEFramesReceived_1);
726 RX_STAT(CBFCPAUSEFramesReceived_2);
727 RX_STAT(CBFCPAUSEFramesReceived_3);
728 RX_STAT(CBFCPAUSEFramesReceived_4);
729 RX_STAT(CBFCPAUSEFramesReceived_5);
730 RX_STAT(CBFCPAUSEFramesReceived_6);
731 RX_STAT(CBFCPAUSEFramesReceived_7);
732 RX_STAT(CBFCPAUSEFramesReceived_8);
733 RX_STAT(CBFCPAUSEFramesReceived_9);
734 RX_STAT(CBFCPAUSEFramesReceived_10);
735 RX_STAT(CBFCPAUSEFramesReceived_11);
736 RX_STAT(CBFCPAUSEFramesReceived_12);
737 RX_STAT(CBFCPAUSEFramesReceived_13);
738 RX_STAT(CBFCPAUSEFramesReceived_14);
739 RX_STAT(CBFCPAUSEFramesReceived_15);
740
741 return data;
742
743 #undef TX_STAT
744 #undef RX_STAT
745 }
746
fun_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)747 static void fun_get_ethtool_stats(struct net_device *netdev,
748 struct ethtool_stats *stats, u64 *data)
749 {
750 const struct funeth_priv *fp = netdev_priv(netdev);
751 struct funeth_txq_stats txs;
752 struct funeth_rxq_stats rxs;
753 struct funeth_txq **xdpqs;
754 struct funeth_rxq **rxqs;
755 unsigned int i, start;
756 u64 *totals, *tot;
757
758 if (fp->port_caps & FUN_PORT_CAP_STATS)
759 data = get_mac_stats(fp, data);
760
761 rxqs = rtnl_dereference(fp->rxqs);
762 if (!rxqs)
763 return;
764
765 #define ADD_STAT(cnt) do { \
766 *data = (cnt); *tot++ += *data++; \
767 } while (0)
768
769 /* Tx queues */
770 totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names);
771
772 for (i = 0; i < netdev->real_num_tx_queues; i++) {
773 tot = totals;
774
775 FUN_QSTAT_READ(fp->txqs[i], start, txs);
776
777 ADD_STAT(txs.tx_pkts);
778 ADD_STAT(txs.tx_bytes);
779 ADD_STAT(txs.tx_cso);
780 ADD_STAT(txs.tx_tso);
781 ADD_STAT(txs.tx_encap_tso);
782 ADD_STAT(txs.tx_uso);
783 ADD_STAT(txs.tx_more);
784 ADD_STAT(txs.tx_nstops);
785 ADD_STAT(txs.tx_nrestarts);
786 ADD_STAT(txs.tx_map_err);
787 ADD_STAT(txs.tx_tls_pkts);
788 ADD_STAT(txs.tx_tls_bytes);
789 ADD_STAT(txs.tx_tls_fallback);
790 ADD_STAT(txs.tx_tls_drops);
791 }
792 data += ARRAY_SIZE(txq_stat_names);
793
794 /* XDP Tx queues */
795 xdpqs = rtnl_dereference(fp->xdpqs);
796 totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names);
797
798 for (i = 0; i < fp->num_xdpqs; i++) {
799 tot = totals;
800
801 FUN_QSTAT_READ(xdpqs[i], start, txs);
802
803 ADD_STAT(txs.tx_pkts);
804 ADD_STAT(txs.tx_bytes);
805 ADD_STAT(txs.tx_xdp_full);
806 ADD_STAT(txs.tx_map_err);
807 }
808 data += ARRAY_SIZE(xdpq_stat_names);
809
810 /* Rx queues */
811 totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names);
812
813 for (i = 0; i < netdev->real_num_rx_queues; i++) {
814 tot = totals;
815
816 FUN_QSTAT_READ(rxqs[i], start, rxs);
817
818 ADD_STAT(rxs.rx_pkts);
819 ADD_STAT(rxs.rx_bytes);
820 ADD_STAT(rxs.rx_cso);
821 ADD_STAT(rxs.gro_pkts);
822 ADD_STAT(rxs.gro_merged);
823 ADD_STAT(rxs.xdp_tx);
824 ADD_STAT(rxs.xdp_redir);
825 ADD_STAT(rxs.xdp_drops);
826 ADD_STAT(rxs.rx_bufs);
827 ADD_STAT(rxs.rx_page_alloc);
828 ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err);
829 ADD_STAT(rxs.rx_budget);
830 ADD_STAT(rxs.rx_map_err);
831 }
832 data += ARRAY_SIZE(rxq_stat_names);
833 #undef ADD_STAT
834
835 *data++ = atomic64_read(&fp->tx_tls_add);
836 *data++ = atomic64_read(&fp->tx_tls_del);
837 *data++ = atomic64_read(&fp->tx_tls_resync);
838 }
839
840 #define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s])
841 #define TX_STAT(fp, s) \
842 be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
843 #define FEC_STAT(fp, s) \
844 be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \
845 PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s])
846
fun_get_pause_stats(struct net_device * netdev,struct ethtool_pause_stats * stats)847 static void fun_get_pause_stats(struct net_device *netdev,
848 struct ethtool_pause_stats *stats)
849 {
850 const struct funeth_priv *fp = netdev_priv(netdev);
851
852 if (!(fp->port_caps & FUN_PORT_CAP_STATS))
853 return;
854
855 stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted);
856 stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived);
857 }
858
fun_get_802_3_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * stats)859 static void fun_get_802_3_stats(struct net_device *netdev,
860 struct ethtool_eth_mac_stats *stats)
861 {
862 const struct funeth_priv *fp = netdev_priv(netdev);
863
864 if (!(fp->port_caps & FUN_PORT_CAP_STATS))
865 return;
866
867 stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK);
868 stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK);
869 stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors);
870 stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK);
871 stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK);
872 stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors);
873 stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors);
874 }
875
fun_get_802_3_ctrl_stats(struct net_device * netdev,struct ethtool_eth_ctrl_stats * stats)876 static void fun_get_802_3_ctrl_stats(struct net_device *netdev,
877 struct ethtool_eth_ctrl_stats *stats)
878 {
879 const struct funeth_priv *fp = netdev_priv(netdev);
880
881 if (!(fp->port_caps & FUN_PORT_CAP_STATS))
882 return;
883
884 stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted);
885 stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived);
886 }
887
fun_get_rmon_stats(struct net_device * netdev,struct ethtool_rmon_stats * stats,const struct ethtool_rmon_hist_range ** ranges)888 static void fun_get_rmon_stats(struct net_device *netdev,
889 struct ethtool_rmon_stats *stats,
890 const struct ethtool_rmon_hist_range **ranges)
891 {
892 static const struct ethtool_rmon_hist_range rmon_ranges[] = {
893 { 64, 64 },
894 { 65, 127 },
895 { 128, 255 },
896 { 256, 511 },
897 { 512, 1023 },
898 { 1024, 1518 },
899 { 1519, 32767 },
900 {}
901 };
902
903 const struct funeth_priv *fp = netdev_priv(netdev);
904
905 if (!(fp->port_caps & FUN_PORT_CAP_STATS))
906 return;
907
908 stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts);
909 stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts);
910 stats->fragments = RX_STAT(fp, etherStatsFragments);
911 stats->jabbers = RX_STAT(fp, etherStatsJabbers);
912
913 stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets);
914 stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets);
915 stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets);
916 stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets);
917 stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets);
918 stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets);
919 stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets);
920
921 stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets);
922 stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets);
923 stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets);
924 stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets);
925 stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets);
926 stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets);
927 stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets);
928
929 *ranges = rmon_ranges;
930 }
931
fun_get_fec_stats(struct net_device * netdev,struct ethtool_fec_stats * stats)932 static void fun_get_fec_stats(struct net_device *netdev,
933 struct ethtool_fec_stats *stats)
934 {
935 const struct funeth_priv *fp = netdev_priv(netdev);
936
937 if (!(fp->port_caps & FUN_PORT_CAP_STATS))
938 return;
939
940 stats->corrected_blocks.total = FEC_STAT(fp, Correctable);
941 stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable);
942 }
943
944 #undef RX_STAT
945 #undef TX_STAT
946 #undef FEC_STAT
947
fun_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)948 static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
949 u32 *rule_locs)
950 {
951 switch (cmd->cmd) {
952 case ETHTOOL_GRXRINGS:
953 cmd->data = netdev->real_num_rx_queues;
954 return 0;
955 default:
956 break;
957 }
958 return -EOPNOTSUPP;
959 }
960
fun_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * info)961 static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
962 {
963 return 0;
964 }
965
fun_get_rxfh_indir_size(struct net_device * netdev)966 static u32 fun_get_rxfh_indir_size(struct net_device *netdev)
967 {
968 const struct funeth_priv *fp = netdev_priv(netdev);
969
970 return fp->indir_table_nentries;
971 }
972
fun_get_rxfh_key_size(struct net_device * netdev)973 static u32 fun_get_rxfh_key_size(struct net_device *netdev)
974 {
975 const struct funeth_priv *fp = netdev_priv(netdev);
976
977 return sizeof(fp->rss_key);
978 }
979
fun_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)980 static int fun_get_rxfh(struct net_device *netdev,
981 struct ethtool_rxfh_param *rxfh)
982 {
983 const struct funeth_priv *fp = netdev_priv(netdev);
984
985 if (!fp->rss_cfg)
986 return -EOPNOTSUPP;
987
988 if (rxfh->indir)
989 memcpy(rxfh->indir, fp->indir_table,
990 sizeof(u32) * fp->indir_table_nentries);
991
992 if (rxfh->key)
993 memcpy(rxfh->key, fp->rss_key, sizeof(fp->rss_key));
994
995 rxfh->hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
996 ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
997
998 return 0;
999 }
1000
fun_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1001 static int fun_set_rxfh(struct net_device *netdev,
1002 struct ethtool_rxfh_param *rxfh,
1003 struct netlink_ext_ack *extack)
1004 {
1005 struct funeth_priv *fp = netdev_priv(netdev);
1006 const u32 *rss_indir = rxfh->indir ? rxfh->indir : fp->indir_table;
1007 const u8 *rss_key = rxfh->key ? rxfh->key : fp->rss_key;
1008 enum fun_eth_hash_alg algo;
1009
1010 if (!fp->rss_cfg)
1011 return -EOPNOTSUPP;
1012
1013 if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE)
1014 algo = fp->hash_algo;
1015 else if (rxfh->hfunc == ETH_RSS_HASH_CRC32)
1016 algo = FUN_ETH_RSS_ALG_CRC32;
1017 else if (rxfh->hfunc == ETH_RSS_HASH_TOP)
1018 algo = FUN_ETH_RSS_ALG_TOEPLITZ;
1019 else
1020 return -EINVAL;
1021
1022 /* If the port is enabled try to reconfigure RSS and keep the new
1023 * settings if successful. If it is down we update the RSS settings
1024 * and apply them at the next UP time.
1025 */
1026 if (netif_running(netdev)) {
1027 int rc = fun_config_rss(netdev, algo, rss_key, rss_indir,
1028 FUN_ADMIN_SUBOP_MODIFY);
1029 if (rc)
1030 return rc;
1031 }
1032
1033 fp->hash_algo = algo;
1034 if (rxfh->key)
1035 memcpy(fp->rss_key, rxfh->key, sizeof(fp->rss_key));
1036 if (rxfh->indir)
1037 memcpy(fp->indir_table, rxfh->indir,
1038 sizeof(u32) * fp->indir_table_nentries);
1039 return 0;
1040 }
1041
fun_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * info)1042 static int fun_get_ts_info(struct net_device *netdev,
1043 struct kernel_ethtool_ts_info *info)
1044 {
1045 info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
1046 SOF_TIMESTAMPING_TX_SOFTWARE |
1047 SOF_TIMESTAMPING_RAW_HARDWARE;
1048 info->tx_types = BIT(HWTSTAMP_TX_OFF);
1049 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1050 return 0;
1051 }
1052
to_ethtool_fec(unsigned int fun_fec)1053 static unsigned int to_ethtool_fec(unsigned int fun_fec)
1054 {
1055 unsigned int fec = 0;
1056
1057 if (fun_fec == FUN_PORT_FEC_NA)
1058 fec |= ETHTOOL_FEC_NONE;
1059 if (fun_fec & FUN_PORT_FEC_OFF)
1060 fec |= ETHTOOL_FEC_OFF;
1061 if (fun_fec & FUN_PORT_FEC_RS)
1062 fec |= ETHTOOL_FEC_RS;
1063 if (fun_fec & FUN_PORT_FEC_FC)
1064 fec |= ETHTOOL_FEC_BASER;
1065 if (fun_fec & FUN_PORT_FEC_AUTO)
1066 fec |= ETHTOOL_FEC_AUTO;
1067 return fec;
1068 }
1069
fun_get_fecparam(struct net_device * netdev,struct ethtool_fecparam * fec)1070 static int fun_get_fecparam(struct net_device *netdev,
1071 struct ethtool_fecparam *fec)
1072 {
1073 struct funeth_priv *fp = netdev_priv(netdev);
1074 u64 fec_data;
1075 int rc;
1076
1077 rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data);
1078 if (rc)
1079 return rc;
1080
1081 fec->active_fec = to_ethtool_fec(fec_data & 0xff);
1082 fec->fec = to_ethtool_fec(fec_data >> 8);
1083 return 0;
1084 }
1085
fun_set_fecparam(struct net_device * netdev,struct ethtool_fecparam * fec)1086 static int fun_set_fecparam(struct net_device *netdev,
1087 struct ethtool_fecparam *fec)
1088 {
1089 struct funeth_priv *fp = netdev_priv(netdev);
1090 u64 fec_mode;
1091
1092 switch (fec->fec) {
1093 case ETHTOOL_FEC_AUTO:
1094 fec_mode = FUN_PORT_FEC_AUTO;
1095 break;
1096 case ETHTOOL_FEC_OFF:
1097 if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE))
1098 return -EINVAL;
1099 fec_mode = FUN_PORT_FEC_OFF;
1100 break;
1101 case ETHTOOL_FEC_BASER:
1102 if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC))
1103 return -EINVAL;
1104 fec_mode = FUN_PORT_FEC_FC;
1105 break;
1106 case ETHTOOL_FEC_RS:
1107 if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS))
1108 return -EINVAL;
1109 fec_mode = FUN_PORT_FEC_RS;
1110 break;
1111 default:
1112 return -EINVAL;
1113 }
1114
1115 return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
1116 }
1117
fun_get_port_module_page(struct net_device * netdev,const struct ethtool_module_eeprom * req,struct netlink_ext_ack * extack)1118 static int fun_get_port_module_page(struct net_device *netdev,
1119 const struct ethtool_module_eeprom *req,
1120 struct netlink_ext_ack *extack)
1121 {
1122 union {
1123 struct fun_admin_port_req req;
1124 struct fun_admin_port_xcvr_read_rsp rsp;
1125 } cmd;
1126 struct funeth_priv *fp = netdev_priv(netdev);
1127 int rc;
1128
1129 if (fp->port_caps & FUN_PORT_CAP_VPORT) {
1130 NL_SET_ERR_MSG_MOD(extack,
1131 "Specified port is virtual, only physical ports have modules");
1132 return -EOPNOTSUPP;
1133 }
1134
1135 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
1136 sizeof(cmd.req));
1137 cmd.req.u.xcvr_read =
1138 FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(0, netdev->dev_port,
1139 req->bank, req->page,
1140 req->offset, req->length,
1141 req->i2c_address);
1142 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
1143 sizeof(cmd.rsp), 0);
1144 if (rc)
1145 return rc;
1146
1147 memcpy(req->data, cmd.rsp.data, req->length);
1148 return req->length;
1149 }
1150
1151 static const struct ethtool_ops fun_ethtool_ops = {
1152 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1153 ETHTOOL_COALESCE_MAX_FRAMES,
1154 .get_link_ksettings = fun_get_link_ksettings,
1155 .set_link_ksettings = fun_set_link_ksettings,
1156 .set_phys_id = fun_set_phys_id,
1157 .get_drvinfo = fun_get_drvinfo,
1158 .get_msglevel = fun_get_msglevel,
1159 .set_msglevel = fun_set_msglevel,
1160 .get_regs_len = fun_get_regs_len,
1161 .get_regs = fun_get_regs,
1162 .get_link = ethtool_op_get_link,
1163 .get_coalesce = fun_get_coalesce,
1164 .set_coalesce = fun_set_coalesce,
1165 .get_ts_info = fun_get_ts_info,
1166 .get_ringparam = fun_get_ringparam,
1167 .set_ringparam = fun_set_ringparam,
1168 .get_sset_count = fun_get_sset_count,
1169 .get_strings = fun_get_strings,
1170 .get_ethtool_stats = fun_get_ethtool_stats,
1171 .get_rxnfc = fun_get_rxnfc,
1172 .set_rxnfc = fun_set_rxnfc,
1173 .get_rxfh_indir_size = fun_get_rxfh_indir_size,
1174 .get_rxfh_key_size = fun_get_rxfh_key_size,
1175 .get_rxfh = fun_get_rxfh,
1176 .set_rxfh = fun_set_rxfh,
1177 .get_channels = fun_get_channels,
1178 .set_channels = fun_set_channels,
1179 .get_fecparam = fun_get_fecparam,
1180 .set_fecparam = fun_set_fecparam,
1181 .get_pauseparam = fun_get_pauseparam,
1182 .set_pauseparam = fun_set_pauseparam,
1183 .nway_reset = fun_restart_an,
1184 .get_pause_stats = fun_get_pause_stats,
1185 .get_fec_stats = fun_get_fec_stats,
1186 .get_eth_mac_stats = fun_get_802_3_stats,
1187 .get_eth_ctrl_stats = fun_get_802_3_ctrl_stats,
1188 .get_rmon_stats = fun_get_rmon_stats,
1189 .get_module_eeprom_by_page = fun_get_port_module_page,
1190 };
1191
fun_set_ethtool_ops(struct net_device * netdev)1192 void fun_set_ethtool_ops(struct net_device *netdev)
1193 {
1194 netdev->ethtool_ops = &fun_ethtool_ops;
1195 }
1196