1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <linux/pci.h>
9 #include <linux/ethtool.h>
10 #include <linux/stddef.h>
11 #include <linux/etherdevice.h>
12 #include <linux/log2.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/linkmode.h>
15
16 #include "otx2_common.h"
17 #include "otx2_ptp.h"
18 #include <cgx_fw_if.h>
19
20 #define DRV_NAME "rvu-nicpf"
21 #define DRV_VF_NAME "rvu-nicvf"
22
23 struct otx2_stat {
24 char name[ETH_GSTRING_LEN];
25 unsigned int index;
26 };
27
28 /* HW device stats */
29 #define OTX2_DEV_STAT(stat) { \
30 .name = #stat, \
31 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
32 }
33
34 enum link_mode {
35 OTX2_MODE_SUPPORTED,
36 OTX2_MODE_ADVERTISED
37 };
38
39 static const struct otx2_stat otx2_dev_stats[] = {
40 OTX2_DEV_STAT(rx_ucast_frames),
41 OTX2_DEV_STAT(rx_bcast_frames),
42 OTX2_DEV_STAT(rx_mcast_frames),
43
44 OTX2_DEV_STAT(tx_ucast_frames),
45 OTX2_DEV_STAT(tx_bcast_frames),
46 OTX2_DEV_STAT(tx_mcast_frames),
47 };
48
49 /* Driver level stats */
50 #define OTX2_DRV_STAT(stat) { \
51 .name = #stat, \
52 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
53 }
54
55 static const struct otx2_stat otx2_drv_stats[] = {
56 OTX2_DRV_STAT(rx_fcs_errs),
57 OTX2_DRV_STAT(rx_oversize_errs),
58 OTX2_DRV_STAT(rx_undersize_errs),
59 OTX2_DRV_STAT(rx_csum_errs),
60 OTX2_DRV_STAT(rx_len_errs),
61 OTX2_DRV_STAT(rx_other_errs),
62 };
63
64 static const struct otx2_stat otx2_queue_stats[] = {
65 { "bytes", 0 },
66 { "frames", 1 },
67 };
68
69 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
70 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
71 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
72
73 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
74
otx2_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)75 static void otx2_get_drvinfo(struct net_device *netdev,
76 struct ethtool_drvinfo *info)
77 {
78 struct otx2_nic *pfvf = netdev_priv(netdev);
79
80 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
81 strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
82 }
83
otx2_get_qset_strings(struct otx2_nic * pfvf,u8 ** data,int qset)84 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
85 {
86 int start_qidx = qset * pfvf->hw.rx_queues;
87 int qidx, stats;
88
89 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++)
90 for (stats = 0; stats < otx2_n_queue_stats; stats++)
91 ethtool_sprintf(data, "rxq%d: %s", qidx + start_qidx,
92 otx2_queue_stats[stats].name);
93
94 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++)
95 for (stats = 0; stats < otx2_n_queue_stats; stats++)
96 if (qidx >= pfvf->hw.non_qos_queues)
97 ethtool_sprintf(data, "txq_qos%d: %s",
98 qidx + start_qidx -
99 pfvf->hw.non_qos_queues,
100 otx2_queue_stats[stats].name);
101 else
102 ethtool_sprintf(data, "txq%d: %s",
103 qidx + start_qidx,
104 otx2_queue_stats[stats].name);
105 }
106
otx2_get_strings(struct net_device * netdev,u32 sset,u8 * data)107 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
108 {
109 struct otx2_nic *pfvf = netdev_priv(netdev);
110 int stats;
111
112 if (sset != ETH_SS_STATS)
113 return;
114
115 for (stats = 0; stats < otx2_n_dev_stats; stats++)
116 ethtool_puts(&data, otx2_dev_stats[stats].name);
117
118 for (stats = 0; stats < otx2_n_drv_stats; stats++)
119 ethtool_puts(&data, otx2_drv_stats[stats].name);
120
121 otx2_get_qset_strings(pfvf, &data, 0);
122
123 if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
124 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++)
125 ethtool_sprintf(&data, "cgx_rxstat%d: ", stats);
126
127 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++)
128 ethtool_sprintf(&data, "cgx_txstat%d: ", stats);
129 }
130
131 ethtool_puts(&data, "reset_count");
132 ethtool_puts(&data, "Fec Corrected Errors: ");
133 ethtool_puts(&data, "Fec Uncorrected Errors: ");
134 }
135
otx2_get_qset_stats(struct otx2_nic * pfvf,struct ethtool_stats * stats,u64 ** data)136 static void otx2_get_qset_stats(struct otx2_nic *pfvf,
137 struct ethtool_stats *stats, u64 **data)
138 {
139 int stat, qidx;
140
141 if (!pfvf)
142 return;
143 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
144 if (!otx2_update_rq_stats(pfvf, qidx)) {
145 for (stat = 0; stat < otx2_n_queue_stats; stat++)
146 *((*data)++) = 0;
147 continue;
148 }
149 for (stat = 0; stat < otx2_n_queue_stats; stat++)
150 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
151 [otx2_queue_stats[stat].index];
152 }
153
154 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
155 if (!otx2_update_sq_stats(pfvf, qidx)) {
156 for (stat = 0; stat < otx2_n_queue_stats; stat++)
157 *((*data)++) = 0;
158 continue;
159 }
160 for (stat = 0; stat < otx2_n_queue_stats; stat++)
161 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
162 [otx2_queue_stats[stat].index];
163 }
164 }
165
otx2_get_phy_fec_stats(struct otx2_nic * pfvf)166 static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
167 {
168 struct msg_req *req;
169 int rc = -ENOMEM;
170
171 mutex_lock(&pfvf->mbox.lock);
172 req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
173 if (!req)
174 goto end;
175
176 if (!otx2_sync_mbox_msg(&pfvf->mbox))
177 rc = 0;
178 end:
179 mutex_unlock(&pfvf->mbox.lock);
180 return rc;
181 }
182
183 /* Get device and per queue statistics */
otx2_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)184 static void otx2_get_ethtool_stats(struct net_device *netdev,
185 struct ethtool_stats *stats, u64 *data)
186 {
187 struct otx2_nic *pfvf = netdev_priv(netdev);
188 u64 fec_corr_blks, fec_uncorr_blks;
189 struct cgx_fw_data *rsp;
190 int stat;
191
192 otx2_get_dev_stats(pfvf);
193 for (stat = 0; stat < otx2_n_dev_stats; stat++)
194 *(data++) = ((u64 *)&pfvf->hw.dev_stats)
195 [otx2_dev_stats[stat].index];
196
197 for (stat = 0; stat < otx2_n_drv_stats; stat++)
198 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
199 [otx2_drv_stats[stat].index]);
200
201 otx2_get_qset_stats(pfvf, stats, &data);
202
203 if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
204 otx2_update_lmac_stats(pfvf);
205 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
206 *(data++) = pfvf->hw.cgx_rx_stats[stat];
207 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
208 *(data++) = pfvf->hw.cgx_tx_stats[stat];
209 }
210
211 *(data++) = pfvf->reset_count;
212
213 fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
214 fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
215
216 rsp = otx2_get_fwdata(pfvf);
217 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
218 !otx2_get_phy_fec_stats(pfvf)) {
219 /* Fetch fwdata again because it's been recently populated with
220 * latest PHY FEC stats.
221 */
222 rsp = otx2_get_fwdata(pfvf);
223 if (!IS_ERR(rsp)) {
224 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
225
226 if (pfvf->linfo.fec == OTX2_FEC_BASER) {
227 fec_corr_blks = p->brfec_corr_blks;
228 fec_uncorr_blks = p->brfec_uncorr_blks;
229 } else {
230 fec_corr_blks = p->rsfec_corr_cws;
231 fec_uncorr_blks = p->rsfec_uncorr_cws;
232 }
233 }
234 }
235
236 *(data++) = fec_corr_blks;
237 *(data++) = fec_uncorr_blks;
238 }
239
otx2_get_sset_count(struct net_device * netdev,int sset)240 static int otx2_get_sset_count(struct net_device *netdev, int sset)
241 {
242 struct otx2_nic *pfvf = netdev_priv(netdev);
243 int qstats_count, mac_stats = 0;
244
245 if (sset != ETH_SS_STATS)
246 return -EINVAL;
247
248 qstats_count = otx2_n_queue_stats *
249 (pfvf->hw.rx_queues + otx2_get_total_tx_queues(pfvf));
250 if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
251 mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
252 otx2_update_lmac_fec_stats(pfvf);
253
254 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
255 mac_stats + OTX2_FEC_STATS_CNT + 1;
256 }
257
258 /* Get no of queues device supports and current queue count */
otx2_get_channels(struct net_device * dev,struct ethtool_channels * channel)259 static void otx2_get_channels(struct net_device *dev,
260 struct ethtool_channels *channel)
261 {
262 struct otx2_nic *pfvf = netdev_priv(dev);
263
264 channel->max_rx = pfvf->hw.max_queues;
265 channel->max_tx = pfvf->hw.max_queues;
266
267 channel->rx_count = pfvf->hw.rx_queues;
268 channel->tx_count = pfvf->hw.tx_queues;
269 }
270
271 /* Set no of Tx, Rx queues to be used */
otx2_set_channels(struct net_device * dev,struct ethtool_channels * channel)272 static int otx2_set_channels(struct net_device *dev,
273 struct ethtool_channels *channel)
274 {
275 struct otx2_nic *pfvf = netdev_priv(dev);
276 bool if_up = netif_running(dev);
277 int err, qos_txqs;
278
279 if (!channel->rx_count || !channel->tx_count)
280 return -EINVAL;
281
282 if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
283 netdev_err(dev,
284 "Receive queues are in use by TC police action\n");
285 return -EINVAL;
286 }
287
288 if (if_up)
289 dev->netdev_ops->ndo_stop(dev);
290
291 qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
292 OTX2_QOS_MAX_LEAF_NODES);
293
294 err = otx2_set_real_num_queues(dev, channel->tx_count + qos_txqs,
295 channel->rx_count);
296 if (err)
297 return err;
298
299 pfvf->hw.rx_queues = channel->rx_count;
300 pfvf->hw.tx_queues = channel->tx_count;
301 if (pfvf->xdp_prog)
302 pfvf->hw.xdp_queues = channel->rx_count;
303
304 if (if_up)
305 err = dev->netdev_ops->ndo_open(dev);
306
307 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
308 pfvf->hw.tx_queues, pfvf->hw.rx_queues);
309
310 return err;
311 }
312
otx2_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)313 static void otx2_get_pauseparam(struct net_device *netdev,
314 struct ethtool_pauseparam *pause)
315 {
316 struct otx2_nic *pfvf = netdev_priv(netdev);
317 struct cgx_pause_frm_cfg *req, *rsp;
318
319 if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
320 return;
321
322 mutex_lock(&pfvf->mbox.lock);
323 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
324 if (!req) {
325 mutex_unlock(&pfvf->mbox.lock);
326 return;
327 }
328
329 if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
330 rsp = (struct cgx_pause_frm_cfg *)
331 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
332 if (IS_ERR(rsp)) {
333 mutex_unlock(&pfvf->mbox.lock);
334 return;
335 }
336
337 pause->rx_pause = rsp->rx_pause;
338 pause->tx_pause = rsp->tx_pause;
339 }
340 mutex_unlock(&pfvf->mbox.lock);
341 }
342
otx2_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)343 static int otx2_set_pauseparam(struct net_device *netdev,
344 struct ethtool_pauseparam *pause)
345 {
346 struct otx2_nic *pfvf = netdev_priv(netdev);
347
348 if (pause->autoneg)
349 return -EOPNOTSUPP;
350
351 if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
352 return -EOPNOTSUPP;
353
354 if (pause->rx_pause)
355 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
356 else
357 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
358
359 if (pause->tx_pause)
360 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
361 else
362 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
363
364 return otx2_config_pause_frm(pfvf);
365 }
366
otx2_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)367 static void otx2_get_ringparam(struct net_device *netdev,
368 struct ethtool_ringparam *ring,
369 struct kernel_ethtool_ringparam *kernel_ring,
370 struct netlink_ext_ack *extack)
371 {
372 struct otx2_nic *pfvf = netdev_priv(netdev);
373 struct otx2_qset *qs = &pfvf->qset;
374
375 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
376 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
377 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
378 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
379 kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
380 kernel_ring->cqe_size = pfvf->hw.xqe_size;
381 }
382
otx2_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)383 static int otx2_set_ringparam(struct net_device *netdev,
384 struct ethtool_ringparam *ring,
385 struct kernel_ethtool_ringparam *kernel_ring,
386 struct netlink_ext_ack *extack)
387 {
388 struct otx2_nic *pfvf = netdev_priv(netdev);
389 u32 rx_buf_len = kernel_ring->rx_buf_len;
390 u32 old_rx_buf_len = pfvf->hw.rbuf_len;
391 u32 xqe_size = kernel_ring->cqe_size;
392 bool if_up = netif_running(netdev);
393 struct otx2_qset *qs = &pfvf->qset;
394 u32 rx_count, tx_count;
395
396 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
397 return -EINVAL;
398
399 /* Hardware supports max size of 32k for a receive buffer
400 * and 1536 is typical ethernet frame size.
401 */
402 if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
403 netdev_err(netdev,
404 "Receive buffer range is 1536 - 32768");
405 return -EINVAL;
406 }
407
408 if (xqe_size != 128 && xqe_size != 512) {
409 netdev_err(netdev,
410 "Completion event size must be 128 or 512");
411 return -EINVAL;
412 }
413
414 /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
415 rx_count = ring->rx_pending;
416 /* On some silicon variants a skid or reserved CQEs are
417 * needed to avoid CQ overflow.
418 */
419 if (rx_count < pfvf->hw.rq_skid)
420 rx_count = pfvf->hw.rq_skid;
421
422 if (ring->rx_pending < 16) {
423 netdev_err(netdev,
424 "rx ring size %u invalid, min is 16\n",
425 ring->rx_pending);
426 return -EINVAL;
427 }
428
429 rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
430
431 /* Due pipelining impact minimum 2000 unused SQ CQE's
432 * need to be maintained to avoid CQ overflow, hence the
433 * minimum 4K size.
434 */
435 tx_count = clamp_t(u32, ring->tx_pending,
436 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
437 tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
438
439 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
440 rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
441 return 0;
442
443 if (if_up)
444 netdev->netdev_ops->ndo_stop(netdev);
445
446 /* Assigned to the nearest possible exponent. */
447 qs->sqe_cnt = tx_count;
448 qs->rqe_cnt = rx_count;
449
450 pfvf->hw.rbuf_len = rx_buf_len;
451 pfvf->hw.xqe_size = xqe_size;
452
453 if (if_up)
454 return netdev->netdev_ops->ndo_open(netdev);
455
456 return 0;
457 }
458
otx2_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * cmd,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)459 static int otx2_get_coalesce(struct net_device *netdev,
460 struct ethtool_coalesce *cmd,
461 struct kernel_ethtool_coalesce *kernel_coal,
462 struct netlink_ext_ack *extack)
463 {
464 struct otx2_nic *pfvf = netdev_priv(netdev);
465 struct otx2_hw *hw = &pfvf->hw;
466
467 cmd->rx_coalesce_usecs = hw->cq_time_wait;
468 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
469 cmd->tx_coalesce_usecs = hw->cq_time_wait;
470 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
471 if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
472 OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
473 cmd->use_adaptive_rx_coalesce = 1;
474 cmd->use_adaptive_tx_coalesce = 1;
475 } else {
476 cmd->use_adaptive_rx_coalesce = 0;
477 cmd->use_adaptive_tx_coalesce = 0;
478 }
479
480 return 0;
481 }
482
otx2_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)483 static int otx2_set_coalesce(struct net_device *netdev,
484 struct ethtool_coalesce *ec,
485 struct kernel_ethtool_coalesce *kernel_coal,
486 struct netlink_ext_ack *extack)
487 {
488 struct otx2_nic *pfvf = netdev_priv(netdev);
489 struct otx2_hw *hw = &pfvf->hw;
490 u8 priv_coalesce_status;
491 int qidx;
492
493 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
494 return 0;
495
496 if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) {
497 netdev_err(netdev,
498 "adaptive-rx should be same as adaptive-tx");
499 return -EINVAL;
500 }
501
502 /* Check and update coalesce status */
503 if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
504 OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
505 priv_coalesce_status = 1;
506 if (!ec->use_adaptive_rx_coalesce)
507 pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
508 } else {
509 priv_coalesce_status = 0;
510 if (ec->use_adaptive_rx_coalesce)
511 pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
512 }
513
514 /* 'cq_time_wait' is 8bit and is in multiple of 100ns,
515 * so clamp the user given value to the range of 1 to 25usec.
516 */
517 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
518 1, CQ_TIMER_THRESH_MAX);
519 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
520 1, CQ_TIMER_THRESH_MAX);
521
522 /* Rx and Tx are mapped to same CQ, check which one
523 * is changed, if both then choose the min.
524 */
525 if (hw->cq_time_wait == ec->rx_coalesce_usecs)
526 hw->cq_time_wait = ec->tx_coalesce_usecs;
527 else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
528 hw->cq_time_wait = ec->rx_coalesce_usecs;
529 else
530 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
531 ec->tx_coalesce_usecs);
532
533 /* Max ecount_wait supported is 16bit,
534 * so clamp the user given value to the range of 1 to 64k.
535 */
536 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
537 1, NAPI_POLL_WEIGHT);
538 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
539 1, NAPI_POLL_WEIGHT);
540
541 /* Rx and Tx are mapped to same CQ, check which one
542 * is changed, if both then choose the min.
543 */
544 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
545 hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
546 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
547 hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
548 else
549 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
550 ec->tx_max_coalesced_frames);
551
552 /* Reset 'cq_time_wait' and 'cq_ecount_wait' to
553 * default values if coalesce status changed from
554 * 'on' to 'off'.
555 */
556 if (priv_coalesce_status &&
557 ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) !=
558 OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
559 hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
560 hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
561 }
562
563 if (netif_running(netdev)) {
564 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
565 otx2_config_irq_coalescing(pfvf, qidx);
566 }
567
568 return 0;
569 }
570
otx2_get_rss_hash_opts(struct net_device * dev,struct ethtool_rxfh_fields * nfc)571 static int otx2_get_rss_hash_opts(struct net_device *dev,
572 struct ethtool_rxfh_fields *nfc)
573 {
574 struct otx2_nic *pfvf = netdev_priv(dev);
575 struct otx2_rss_info *rss;
576
577 rss = &pfvf->hw.rss_info;
578
579 if (!(rss->flowkey_cfg &
580 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
581 return 0;
582
583 /* Mimimum is IPv4 and IPv6, SIP/DIP */
584 nfc->data = RXH_IP_SRC | RXH_IP_DST;
585 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
586 nfc->data |= RXH_VLAN;
587
588 switch (nfc->flow_type) {
589 case TCP_V4_FLOW:
590 case TCP_V6_FLOW:
591 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
592 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
593 break;
594 case UDP_V4_FLOW:
595 case UDP_V6_FLOW:
596 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
597 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
598 break;
599 case SCTP_V4_FLOW:
600 case SCTP_V6_FLOW:
601 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
602 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
603 break;
604 case AH_ESP_V4_FLOW:
605 case AH_ESP_V6_FLOW:
606 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
607 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
608 break;
609 case AH_V4_FLOW:
610 case ESP_V4_FLOW:
611 case IPV4_FLOW:
612 break;
613 case AH_V6_FLOW:
614 case ESP_V6_FLOW:
615 case IPV6_FLOW:
616 break;
617 default:
618 return -EINVAL;
619 }
620
621 return 0;
622 }
623
otx2_set_rss_hash_opts(struct net_device * dev,const struct ethtool_rxfh_fields * nfc,struct netlink_ext_ack * extack)624 static int otx2_set_rss_hash_opts(struct net_device *dev,
625 const struct ethtool_rxfh_fields *nfc,
626 struct netlink_ext_ack *extack)
627 {
628 struct otx2_nic *pfvf = netdev_priv(dev);
629 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
630 struct otx2_rss_info *rss;
631 u32 rss_cfg;
632
633 rss = &pfvf->hw.rss_info;
634 rss_cfg = rss->flowkey_cfg;
635
636 if (!rss->enable) {
637 netdev_err(pfvf->netdev,
638 "RSS is disabled, cannot change settings\n");
639 return -EIO;
640 }
641
642 /* Mimimum is IPv4 and IPv6, SIP/DIP */
643 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
644 return -EINVAL;
645
646 if (nfc->data & RXH_VLAN)
647 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
648 else
649 rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
650
651 switch (nfc->flow_type) {
652 case TCP_V4_FLOW:
653 case TCP_V6_FLOW:
654 /* Different config for v4 and v6 is not supported.
655 * Both of them have to be either 4-tuple or 2-tuple.
656 */
657 switch (nfc->data & rxh_l4) {
658 case 0:
659 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
660 break;
661 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
662 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
663 break;
664 default:
665 return -EINVAL;
666 }
667 break;
668 case UDP_V4_FLOW:
669 case UDP_V6_FLOW:
670 switch (nfc->data & rxh_l4) {
671 case 0:
672 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
673 break;
674 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
675 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
676 break;
677 default:
678 return -EINVAL;
679 }
680 break;
681 case SCTP_V4_FLOW:
682 case SCTP_V6_FLOW:
683 switch (nfc->data & rxh_l4) {
684 case 0:
685 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
686 break;
687 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
688 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
689 break;
690 default:
691 return -EINVAL;
692 }
693 break;
694 case AH_ESP_V4_FLOW:
695 case AH_ESP_V6_FLOW:
696 switch (nfc->data & rxh_l4) {
697 case 0:
698 rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
699 NIX_FLOW_KEY_TYPE_AH);
700 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
701 NIX_FLOW_KEY_TYPE_IPV4_PROTO;
702 break;
703 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
704 /* If VLAN hashing is also requested for ESP then do not
705 * allow because of hardware 40 bytes flow key limit.
706 */
707 if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
708 netdev_err(pfvf->netdev,
709 "RSS hash of ESP or AH with VLAN is not supported\n");
710 return -EOPNOTSUPP;
711 }
712
713 rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
714 /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
715 * and ESP SPI+sequence(8 bytes) uses hardware maximum
716 * limit of 40 byte flow key.
717 */
718 rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
719 break;
720 default:
721 return -EINVAL;
722 }
723 break;
724 case IPV4_FLOW:
725 case IPV6_FLOW:
726 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
727 break;
728 default:
729 return -EINVAL;
730 }
731
732 rss->flowkey_cfg = rss_cfg;
733 otx2_set_flowkey_cfg(pfvf);
734 return 0;
735 }
736
otx2_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc,u32 * rules)737 static int otx2_get_rxnfc(struct net_device *dev,
738 struct ethtool_rxnfc *nfc, u32 *rules)
739 {
740 bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
741 struct otx2_nic *pfvf = netdev_priv(dev);
742 int ret = -EOPNOTSUPP;
743
744 switch (nfc->cmd) {
745 case ETHTOOL_GRXRINGS:
746 nfc->data = pfvf->hw.rx_queues;
747 ret = 0;
748 break;
749 case ETHTOOL_GRXCLSRLCNT:
750 if (netif_running(dev) && ntuple) {
751 nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
752 ret = 0;
753 }
754 break;
755 case ETHTOOL_GRXCLSRULE:
756 if (netif_running(dev) && ntuple)
757 ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
758 break;
759 case ETHTOOL_GRXCLSRLALL:
760 if (netif_running(dev) && ntuple)
761 ret = otx2_get_all_flows(pfvf, nfc, rules);
762 break;
763 default:
764 break;
765 }
766 return ret;
767 }
768
otx2_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc)769 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
770 {
771 bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
772 struct otx2_nic *pfvf = netdev_priv(dev);
773 int ret = -EOPNOTSUPP;
774
775 pfvf->flow_cfg->ntuple = ntuple;
776 switch (nfc->cmd) {
777 case ETHTOOL_SRXCLSRLINS:
778 if (netif_running(dev) && ntuple)
779 ret = otx2_add_flow(pfvf, nfc);
780 break;
781 case ETHTOOL_SRXCLSRLDEL:
782 if (netif_running(dev) && ntuple)
783 ret = otx2_remove_flow(pfvf, nfc->fs.location);
784 break;
785 default:
786 break;
787 }
788
789 return ret;
790 }
791
otx2_get_rxfh_key_size(struct net_device * netdev)792 static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
793 {
794 struct otx2_nic *pfvf = netdev_priv(netdev);
795 struct otx2_rss_info *rss;
796
797 rss = &pfvf->hw.rss_info;
798
799 return sizeof(rss->key);
800 }
801
otx2_get_rxfh_indir_size(struct net_device * dev)802 static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
803 {
804 return MAX_RSS_INDIR_TBL_SIZE;
805 }
806
otx2_create_rxfh(struct net_device * dev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)807 static int otx2_create_rxfh(struct net_device *dev,
808 struct ethtool_rxfh_context *ctx,
809 const struct ethtool_rxfh_param *rxfh,
810 struct netlink_ext_ack *extack)
811 {
812 struct otx2_nic *pfvf = netdev_priv(dev);
813 struct otx2_rss_info *rss;
814 unsigned int queues;
815 u32 *ind_tbl;
816 int idx;
817
818 rss = &pfvf->hw.rss_info;
819 queues = pfvf->hw.rx_queues;
820
821 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
822 return -EOPNOTSUPP;
823 ctx->hfunc = ETH_RSS_HASH_TOP;
824
825 if (!rss->enable) {
826 netdev_err(dev, "RSS is disabled, cannot change settings\n");
827 return -EIO;
828 }
829
830 ind_tbl = rxfh->indir;
831 if (!ind_tbl) {
832 ind_tbl = ethtool_rxfh_context_indir(ctx);
833 for (idx = 0; idx < rss->rss_size; idx++)
834 ind_tbl[idx] = ethtool_rxfh_indir_default(idx, queues);
835 }
836
837 otx2_set_rss_table(pfvf, rxfh->rss_context, ind_tbl);
838 return 0;
839 }
840
otx2_modify_rxfh(struct net_device * dev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)841 static int otx2_modify_rxfh(struct net_device *dev,
842 struct ethtool_rxfh_context *ctx,
843 const struct ethtool_rxfh_param *rxfh,
844 struct netlink_ext_ack *extack)
845 {
846 struct otx2_nic *pfvf = netdev_priv(dev);
847
848 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
849 rxfh->hfunc != ETH_RSS_HASH_TOP)
850 return -EOPNOTSUPP;
851
852 if (!pfvf->hw.rss_info.enable) {
853 netdev_err(dev, "RSS is disabled, cannot change settings\n");
854 return -EIO;
855 }
856
857 if (rxfh->indir)
858 otx2_set_rss_table(pfvf, rxfh->rss_context, rxfh->indir);
859
860 return 0;
861 }
862
otx2_remove_rxfh(struct net_device * dev,struct ethtool_rxfh_context * ctx,u32 rss_context,struct netlink_ext_ack * extack)863 static int otx2_remove_rxfh(struct net_device *dev,
864 struct ethtool_rxfh_context *ctx,
865 u32 rss_context,
866 struct netlink_ext_ack *extack)
867 {
868 struct otx2_nic *pfvf = netdev_priv(dev);
869
870 if (!pfvf->hw.rss_info.enable) {
871 netdev_err(dev, "RSS is disabled, cannot change settings\n");
872 return -EIO;
873 }
874
875 otx2_rss_ctx_flow_del(pfvf, rss_context);
876 return 0;
877 }
878
879 /* Configure RSS table and hash key */
otx2_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)880 static int otx2_set_rxfh(struct net_device *dev,
881 struct ethtool_rxfh_param *rxfh,
882 struct netlink_ext_ack *extack)
883 {
884 struct otx2_nic *pfvf = netdev_priv(dev);
885 struct otx2_rss_info *rss;
886 int idx;
887
888 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
889 rxfh->hfunc != ETH_RSS_HASH_TOP)
890 return -EOPNOTSUPP;
891
892 rss = &pfvf->hw.rss_info;
893
894 if (!rss->enable) {
895 netdev_err(dev, "RSS is disabled, cannot change settings\n");
896 return -EIO;
897 }
898
899 if (rxfh->key) {
900 memcpy(rss->key, rxfh->key, sizeof(rss->key));
901 otx2_set_rss_key(pfvf);
902 }
903
904 if (rxfh->indir) {
905 for (idx = 0; idx < rss->rss_size; idx++)
906 rss->ind_tbl[idx] = rxfh->indir[idx];
907 }
908 otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
909
910 return 0;
911 }
912
913 /* Get RSS configuration */
otx2_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)914 static int otx2_get_rxfh(struct net_device *dev,
915 struct ethtool_rxfh_param *rxfh)
916 {
917 struct otx2_nic *pfvf = netdev_priv(dev);
918 struct otx2_rss_info *rss;
919 u32 *indir = rxfh->indir;
920 int idx, rx_queues;
921
922 rss = &pfvf->hw.rss_info;
923
924 rxfh->hfunc = ETH_RSS_HASH_TOP;
925 if (!indir)
926 return 0;
927
928 if (!rss->enable) {
929 rx_queues = pfvf->hw.rx_queues;
930 for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
931 indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
932 return 0;
933 }
934
935 for (idx = 0; idx < rss->rss_size; idx++) {
936 /* Ignore if the rx queue is AF_XDP zero copy enabled */
937 if (test_bit(rss->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
938 continue;
939 indir[idx] = rss->ind_tbl[idx];
940 }
941 if (rxfh->key)
942 memcpy(rxfh->key, rss->key, sizeof(rss->key));
943
944 return 0;
945 }
946
otx2_get_msglevel(struct net_device * netdev)947 static u32 otx2_get_msglevel(struct net_device *netdev)
948 {
949 struct otx2_nic *pfvf = netdev_priv(netdev);
950
951 return pfvf->msg_enable;
952 }
953
otx2_set_msglevel(struct net_device * netdev,u32 val)954 static void otx2_set_msglevel(struct net_device *netdev, u32 val)
955 {
956 struct otx2_nic *pfvf = netdev_priv(netdev);
957
958 pfvf->msg_enable = val;
959 }
960
otx2_get_link(struct net_device * netdev)961 static u32 otx2_get_link(struct net_device *netdev)
962 {
963 struct otx2_nic *pfvf = netdev_priv(netdev);
964
965 /* LBK and SDP links are internal and always UP */
966 if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
967 return 1;
968 return pfvf->linfo.link_up;
969 }
970
otx2_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * info)971 static int otx2_get_ts_info(struct net_device *netdev,
972 struct kernel_ethtool_ts_info *info)
973 {
974 struct otx2_nic *pfvf = netdev_priv(netdev);
975
976 if (!pfvf->ptp)
977 return ethtool_op_get_ts_info(netdev, info);
978
979 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
980 SOF_TIMESTAMPING_TX_HARDWARE |
981 SOF_TIMESTAMPING_RX_HARDWARE |
982 SOF_TIMESTAMPING_RAW_HARDWARE;
983
984 info->phc_index = otx2_ptp_clock_index(pfvf);
985
986 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
987 if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
988 info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
989
990 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
991 BIT(HWTSTAMP_FILTER_ALL);
992
993 return 0;
994 }
995
otx2_get_fwdata(struct otx2_nic * pfvf)996 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
997 {
998 struct cgx_fw_data *rsp = NULL;
999 struct msg_req *req;
1000 int err = 0;
1001
1002 mutex_lock(&pfvf->mbox.lock);
1003 req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
1004 if (!req) {
1005 mutex_unlock(&pfvf->mbox.lock);
1006 return ERR_PTR(-ENOMEM);
1007 }
1008
1009 err = otx2_sync_mbox_msg(&pfvf->mbox);
1010 if (!err) {
1011 rsp = (struct cgx_fw_data *)
1012 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
1013 } else {
1014 rsp = ERR_PTR(err);
1015 }
1016
1017 mutex_unlock(&pfvf->mbox.lock);
1018 return rsp;
1019 }
1020
otx2_get_fecparam(struct net_device * netdev,struct ethtool_fecparam * fecparam)1021 static int otx2_get_fecparam(struct net_device *netdev,
1022 struct ethtool_fecparam *fecparam)
1023 {
1024 struct otx2_nic *pfvf = netdev_priv(netdev);
1025 struct cgx_fw_data *rsp;
1026 const int fec[] = {
1027 ETHTOOL_FEC_OFF,
1028 ETHTOOL_FEC_BASER,
1029 ETHTOOL_FEC_RS,
1030 ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
1031 #define FEC_MAX_INDEX 4
1032 if (pfvf->linfo.fec < FEC_MAX_INDEX)
1033 fecparam->active_fec = fec[pfvf->linfo.fec];
1034
1035 rsp = otx2_get_fwdata(pfvf);
1036 if (IS_ERR(rsp))
1037 return PTR_ERR(rsp);
1038
1039 if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
1040 if (!rsp->fwdata.supported_fec)
1041 fecparam->fec = ETHTOOL_FEC_NONE;
1042 else
1043 fecparam->fec = fec[rsp->fwdata.supported_fec];
1044 }
1045 return 0;
1046 }
1047
otx2_set_fecparam(struct net_device * netdev,struct ethtool_fecparam * fecparam)1048 static int otx2_set_fecparam(struct net_device *netdev,
1049 struct ethtool_fecparam *fecparam)
1050 {
1051 struct otx2_nic *pfvf = netdev_priv(netdev);
1052 struct mbox *mbox = &pfvf->mbox;
1053 struct fec_mode *req, *rsp;
1054 int err = 0, fec = 0;
1055
1056 switch (fecparam->fec) {
1057 /* Firmware does not support AUTO mode consider it as FEC_OFF */
1058 case ETHTOOL_FEC_OFF:
1059 case ETHTOOL_FEC_AUTO:
1060 fec = OTX2_FEC_OFF;
1061 break;
1062 case ETHTOOL_FEC_RS:
1063 fec = OTX2_FEC_RS;
1064 break;
1065 case ETHTOOL_FEC_BASER:
1066 fec = OTX2_FEC_BASER;
1067 break;
1068 default:
1069 netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
1070 fecparam->fec);
1071 return -EINVAL;
1072 }
1073
1074 if (fec == pfvf->linfo.fec)
1075 return 0;
1076
1077 mutex_lock(&mbox->lock);
1078 req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
1079 if (!req) {
1080 err = -ENOMEM;
1081 goto end;
1082 }
1083 req->fec = fec;
1084 err = otx2_sync_mbox_msg(&pfvf->mbox);
1085 if (err)
1086 goto end;
1087
1088 rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
1089 0, &req->hdr);
1090 if (IS_ERR(rsp)) {
1091 err = PTR_ERR(rsp);
1092 goto end;
1093 }
1094
1095 if (rsp->fec >= 0)
1096 pfvf->linfo.fec = rsp->fec;
1097 else
1098 err = rsp->fec;
1099 end:
1100 mutex_unlock(&mbox->lock);
1101 return err;
1102 }
1103
otx2_get_fec_info(u64 index,int req_mode,struct ethtool_link_ksettings * link_ksettings)1104 static void otx2_get_fec_info(u64 index, int req_mode,
1105 struct ethtool_link_ksettings *link_ksettings)
1106 {
1107 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
1108
1109 switch (index) {
1110 case OTX2_FEC_NONE:
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1112 otx2_fec_modes);
1113 break;
1114 case OTX2_FEC_BASER:
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1116 otx2_fec_modes);
1117 break;
1118 case OTX2_FEC_RS:
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1120 otx2_fec_modes);
1121 break;
1122 case OTX2_FEC_BASER | OTX2_FEC_RS:
1123 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1124 otx2_fec_modes);
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1126 otx2_fec_modes);
1127 break;
1128 }
1129
1130 /* Add fec modes to existing modes */
1131 if (req_mode == OTX2_MODE_ADVERTISED)
1132 linkmode_or(link_ksettings->link_modes.advertising,
1133 link_ksettings->link_modes.advertising,
1134 otx2_fec_modes);
1135 else
1136 linkmode_or(link_ksettings->link_modes.supported,
1137 link_ksettings->link_modes.supported,
1138 otx2_fec_modes);
1139 }
1140
otx2_get_link_mode_info(u64 link_mode_bmap,bool req_mode,struct ethtool_link_ksettings * link_ksettings)1141 static void otx2_get_link_mode_info(u64 link_mode_bmap,
1142 bool req_mode,
1143 struct ethtool_link_ksettings
1144 *link_ksettings)
1145 {
1146 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
1147 /* CGX link modes to Ethtool link mode mapping */
1148 const int cgx_link_mode[CGX_MODE_MAX] = {
1149 0, /* SGMII 1000baseT */
1150 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1151 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1152 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1153 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1154 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1155 0,
1156 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1157 0,
1158 0,
1159 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1160 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1161 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1162 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1163 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1164 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1165 0,
1166 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1167 0,
1168 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1169 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1170 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1171 0,
1172 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1173 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1174 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1175 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
1176 };
1177 u8 bit;
1178
1179 for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, ARRAY_SIZE(cgx_link_mode)) {
1180 if (bit == CGX_MODE_SGMII_10M_BIT) {
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, otx2_link_modes);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, otx2_link_modes);
1183 } else if (bit == CGX_MODE_SGMII_100M_BIT) {
1184 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, otx2_link_modes);
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, otx2_link_modes);
1186 } else if (bit == CGX_MODE_SGMII) {
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, otx2_link_modes);
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, otx2_link_modes);
1189 } else {
1190 linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
1191 }
1192 }
1193
1194 if (req_mode == OTX2_MODE_ADVERTISED)
1195 linkmode_copy(link_ksettings->link_modes.advertising,
1196 otx2_link_modes);
1197 else
1198 linkmode_copy(link_ksettings->link_modes.supported,
1199 otx2_link_modes);
1200 }
1201
otx2_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)1202 static int otx2_get_link_ksettings(struct net_device *netdev,
1203 struct ethtool_link_ksettings *cmd)
1204 {
1205 struct otx2_nic *pfvf = netdev_priv(netdev);
1206 struct cgx_fw_data *rsp = NULL;
1207
1208 cmd->base.duplex = pfvf->linfo.full_duplex;
1209 cmd->base.speed = pfvf->linfo.speed;
1210 cmd->base.autoneg = pfvf->linfo.an;
1211
1212 rsp = otx2_get_fwdata(pfvf);
1213 if (IS_ERR(rsp))
1214 return PTR_ERR(rsp);
1215
1216 if (rsp->fwdata.supported_an)
1217 ethtool_link_ksettings_add_link_mode(cmd,
1218 supported,
1219 Autoneg);
1220
1221 otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
1222 OTX2_MODE_ADVERTISED, cmd);
1223 otx2_get_fec_info(rsp->fwdata.advertised_fec,
1224 OTX2_MODE_ADVERTISED, cmd);
1225 otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
1226 OTX2_MODE_SUPPORTED, cmd);
1227 otx2_get_fec_info(rsp->fwdata.supported_fec,
1228 OTX2_MODE_SUPPORTED, cmd);
1229 return 0;
1230 }
1231
otx2_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)1232 static int otx2_set_link_ksettings(struct net_device *netdev,
1233 const struct ethtool_link_ksettings *cmd)
1234 {
1235 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1236 struct otx2_nic *pf = netdev_priv(netdev);
1237 struct ethtool_link_ksettings cur_ks;
1238 struct cgx_set_link_mode_req *req;
1239 struct mbox *mbox = &pf->mbox;
1240 int err = 0;
1241
1242 memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
1243
1244 if (!ethtool_validate_speed(cmd->base.speed) ||
1245 !ethtool_validate_duplex(cmd->base.duplex))
1246 return -EINVAL;
1247
1248 if (cmd->base.autoneg != AUTONEG_ENABLE &&
1249 cmd->base.autoneg != AUTONEG_DISABLE)
1250 return -EINVAL;
1251
1252 otx2_get_link_ksettings(netdev, &cur_ks);
1253
1254 /* Check requested modes against supported modes by hardware */
1255 if (!linkmode_subset(cmd->link_modes.advertising,
1256 cur_ks.link_modes.supported))
1257 return -EINVAL;
1258
1259 mutex_lock(&mbox->lock);
1260 req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
1261 if (!req) {
1262 err = -ENOMEM;
1263 goto end;
1264 }
1265
1266 req->args.speed = cmd->base.speed;
1267 /* firmware expects 1 for half duplex and 0 for full duplex
1268 * hence inverting
1269 */
1270 req->args.duplex = cmd->base.duplex ^ 0x1;
1271 req->args.an = cmd->base.autoneg;
1272 /* Mask unsupported modes and send message to AF */
1273 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mask);
1274 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mask);
1275 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mask);
1276
1277 linkmode_copy(req->args.advertising,
1278 cmd->link_modes.advertising);
1279 linkmode_andnot(req->args.advertising,
1280 req->args.advertising, mask);
1281
1282 /* inform AF that we need parse this differently */
1283 if (bitmap_weight(req->args.advertising,
1284 __ETHTOOL_LINK_MODE_MASK_NBITS) >= 2)
1285 req->args.multimode = true;
1286
1287 err = otx2_sync_mbox_msg(&pf->mbox);
1288 end:
1289 mutex_unlock(&mbox->lock);
1290 return err;
1291 }
1292
otx2_get_fec_stats(struct net_device * netdev,struct ethtool_fec_stats * fec_stats,struct ethtool_fec_hist * hist)1293 static void otx2_get_fec_stats(struct net_device *netdev,
1294 struct ethtool_fec_stats *fec_stats,
1295 struct ethtool_fec_hist *hist)
1296 {
1297 struct otx2_nic *pfvf = netdev_priv(netdev);
1298 struct cgx_fw_data *rsp;
1299
1300 otx2_update_lmac_fec_stats(pfvf);
1301
1302 /* Report MAC FEC stats */
1303 fec_stats->corrected_blocks.total = pfvf->hw.cgx_fec_corr_blks;
1304 fec_stats->uncorrectable_blocks.total = pfvf->hw.cgx_fec_uncorr_blks;
1305
1306 rsp = otx2_get_fwdata(pfvf);
1307 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
1308 !otx2_get_phy_fec_stats(pfvf)) {
1309 /* Fetch fwdata again because it's been recently populated with
1310 * latest PHY FEC stats.
1311 */
1312 rsp = otx2_get_fwdata(pfvf);
1313 if (!IS_ERR(rsp)) {
1314 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
1315
1316 if (pfvf->linfo.fec == OTX2_FEC_BASER) {
1317 fec_stats->corrected_blocks.total = p->brfec_corr_blks;
1318 fec_stats->uncorrectable_blocks.total = p->brfec_uncorr_blks;
1319 } else {
1320 fec_stats->corrected_blocks.total = p->rsfec_corr_cws;
1321 fec_stats->uncorrectable_blocks.total = p->rsfec_uncorr_cws;
1322 }
1323 }
1324 }
1325 }
1326
1327 static const struct ethtool_ops otx2_ethtool_ops = {
1328 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1329 ETHTOOL_COALESCE_MAX_FRAMES |
1330 ETHTOOL_COALESCE_USE_ADAPTIVE,
1331 .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
1332 ETHTOOL_RING_USE_CQE_SIZE,
1333 .rxfh_max_num_contexts = MAX_RSS_GROUPS,
1334 .get_link = otx2_get_link,
1335 .get_drvinfo = otx2_get_drvinfo,
1336 .get_strings = otx2_get_strings,
1337 .get_ethtool_stats = otx2_get_ethtool_stats,
1338 .get_sset_count = otx2_get_sset_count,
1339 .set_channels = otx2_set_channels,
1340 .get_channels = otx2_get_channels,
1341 .get_ringparam = otx2_get_ringparam,
1342 .set_ringparam = otx2_set_ringparam,
1343 .get_coalesce = otx2_get_coalesce,
1344 .set_coalesce = otx2_set_coalesce,
1345 .get_rxnfc = otx2_get_rxnfc,
1346 .set_rxnfc = otx2_set_rxnfc,
1347 .get_rxfh_key_size = otx2_get_rxfh_key_size,
1348 .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
1349 .get_rxfh = otx2_get_rxfh,
1350 .set_rxfh = otx2_set_rxfh,
1351 .get_rxfh_fields = otx2_get_rss_hash_opts,
1352 .set_rxfh_fields = otx2_set_rss_hash_opts,
1353 .create_rxfh_context = otx2_create_rxfh,
1354 .modify_rxfh_context = otx2_modify_rxfh,
1355 .remove_rxfh_context = otx2_remove_rxfh,
1356 .get_msglevel = otx2_get_msglevel,
1357 .set_msglevel = otx2_set_msglevel,
1358 .get_pauseparam = otx2_get_pauseparam,
1359 .set_pauseparam = otx2_set_pauseparam,
1360 .get_ts_info = otx2_get_ts_info,
1361 .get_fec_stats = otx2_get_fec_stats,
1362 .get_fecparam = otx2_get_fecparam,
1363 .set_fecparam = otx2_set_fecparam,
1364 .get_link_ksettings = otx2_get_link_ksettings,
1365 .set_link_ksettings = otx2_set_link_ksettings,
1366 };
1367
otx2_set_ethtool_ops(struct net_device * netdev)1368 void otx2_set_ethtool_ops(struct net_device *netdev)
1369 {
1370 netdev->ethtool_ops = &otx2_ethtool_ops;
1371 }
1372
1373 /* VF's ethtool APIs */
otx2vf_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)1374 static void otx2vf_get_drvinfo(struct net_device *netdev,
1375 struct ethtool_drvinfo *info)
1376 {
1377 struct otx2_nic *vf = netdev_priv(netdev);
1378
1379 strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
1380 strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
1381 }
1382
otx2vf_get_strings(struct net_device * netdev,u32 sset,u8 * data)1383 static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
1384 {
1385 struct otx2_nic *vf = netdev_priv(netdev);
1386 int stats;
1387
1388 if (sset != ETH_SS_STATS)
1389 return;
1390
1391 for (stats = 0; stats < otx2_n_dev_stats; stats++)
1392 ethtool_puts(&data, otx2_dev_stats[stats].name);
1393
1394 for (stats = 0; stats < otx2_n_drv_stats; stats++)
1395 ethtool_puts(&data, otx2_drv_stats[stats].name);
1396
1397 otx2_get_qset_strings(vf, &data, 0);
1398
1399 ethtool_puts(&data, "reset_count");
1400 }
1401
otx2vf_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1402 static void otx2vf_get_ethtool_stats(struct net_device *netdev,
1403 struct ethtool_stats *stats, u64 *data)
1404 {
1405 struct otx2_nic *vf = netdev_priv(netdev);
1406 int stat;
1407
1408 otx2_get_dev_stats(vf);
1409 for (stat = 0; stat < otx2_n_dev_stats; stat++)
1410 *(data++) = ((u64 *)&vf->hw.dev_stats)
1411 [otx2_dev_stats[stat].index];
1412
1413 for (stat = 0; stat < otx2_n_drv_stats; stat++)
1414 *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
1415 [otx2_drv_stats[stat].index]);
1416
1417 otx2_get_qset_stats(vf, stats, &data);
1418 *(data++) = vf->reset_count;
1419 }
1420
otx2vf_get_sset_count(struct net_device * netdev,int sset)1421 static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
1422 {
1423 struct otx2_nic *vf = netdev_priv(netdev);
1424 int qstats_count;
1425
1426 if (sset != ETH_SS_STATS)
1427 return -EINVAL;
1428
1429 qstats_count = otx2_n_queue_stats *
1430 (vf->hw.rx_queues + otx2_get_total_tx_queues(vf));
1431
1432 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
1433 }
1434
otx2vf_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)1435 static int otx2vf_get_link_ksettings(struct net_device *netdev,
1436 struct ethtool_link_ksettings *cmd)
1437 {
1438 struct otx2_nic *pfvf = netdev_priv(netdev);
1439
1440 if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) {
1441 cmd->base.duplex = DUPLEX_FULL;
1442 cmd->base.speed = SPEED_100000;
1443 } else {
1444 return otx2_get_link_ksettings(netdev, cmd);
1445 }
1446 return 0;
1447 }
1448
1449 static const struct ethtool_ops otx2vf_ethtool_ops = {
1450 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1451 ETHTOOL_COALESCE_MAX_FRAMES |
1452 ETHTOOL_COALESCE_USE_ADAPTIVE,
1453 .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
1454 ETHTOOL_RING_USE_CQE_SIZE,
1455 .rxfh_max_num_contexts = MAX_RSS_GROUPS,
1456 .get_link = otx2_get_link,
1457 .get_drvinfo = otx2vf_get_drvinfo,
1458 .get_strings = otx2vf_get_strings,
1459 .get_ethtool_stats = otx2vf_get_ethtool_stats,
1460 .get_sset_count = otx2vf_get_sset_count,
1461 .set_channels = otx2_set_channels,
1462 .get_channels = otx2_get_channels,
1463 .get_rxnfc = otx2_get_rxnfc,
1464 .set_rxnfc = otx2_set_rxnfc,
1465 .get_rxfh_key_size = otx2_get_rxfh_key_size,
1466 .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
1467 .get_rxfh = otx2_get_rxfh,
1468 .set_rxfh = otx2_set_rxfh,
1469 .get_rxfh_fields = otx2_get_rss_hash_opts,
1470 .set_rxfh_fields = otx2_set_rss_hash_opts,
1471 .create_rxfh_context = otx2_create_rxfh,
1472 .modify_rxfh_context = otx2_modify_rxfh,
1473 .remove_rxfh_context = otx2_remove_rxfh,
1474 .get_ringparam = otx2_get_ringparam,
1475 .set_ringparam = otx2_set_ringparam,
1476 .get_coalesce = otx2_get_coalesce,
1477 .set_coalesce = otx2_set_coalesce,
1478 .get_msglevel = otx2_get_msglevel,
1479 .set_msglevel = otx2_set_msglevel,
1480 .get_pauseparam = otx2_get_pauseparam,
1481 .set_pauseparam = otx2_set_pauseparam,
1482 .get_link_ksettings = otx2vf_get_link_ksettings,
1483 .get_ts_info = otx2_get_ts_info,
1484 };
1485
otx2vf_set_ethtool_ops(struct net_device * netdev)1486 void otx2vf_set_ethtool_ops(struct net_device *netdev)
1487 {
1488 netdev->ethtool_ops = &otx2vf_ethtool_ops;
1489 }
1490 EXPORT_SYMBOL(otx2vf_set_ethtool_ops);
1491