xref: /linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
2 /*
3  * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/string.h>
9 #include <linux/of.h>
10 #include <linux/of_platform.h>
11 #include <linux/platform_device.h>
12 #include <linux/net_tstamp.h>
13 #include <linux/fsl/ptp_qoriq.h>
14 
15 #include "dpaa_eth.h"
16 #include "mac.h"
17 
18 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
19 	"interrupts",
20 	"rx packets",
21 	"tx packets",
22 	"tx confirm",
23 	"tx S/G",
24 	"tx error",
25 	"rx error",
26 	"rx dropped",
27 	"tx dropped",
28 };
29 
30 static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
31 	/* dpa rx errors */
32 	"rx dma error",
33 	"rx frame physical error",
34 	"rx frame size error",
35 	"rx header error",
36 
37 	/* demultiplexing errors */
38 	"qman cg_tdrop",
39 	"qman wred",
40 	"qman error cond",
41 	"qman early window",
42 	"qman late window",
43 	"qman fq tdrop",
44 	"qman fq retired",
45 	"qman orp disabled",
46 
47 	/* congestion related stats */
48 	"congestion time (ms)",
49 	"entered congestion",
50 	"congested (0/1)"
51 };
52 
53 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
54 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
55 
56 static int dpaa_get_link_ksettings(struct net_device *net_dev,
57 				   struct ethtool_link_ksettings *cmd)
58 {
59 	struct dpaa_priv *priv = netdev_priv(net_dev);
60 	struct mac_device *mac_dev = priv->mac_dev;
61 
62 	return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd);
63 }
64 
65 static int dpaa_set_link_ksettings(struct net_device *net_dev,
66 				   const struct ethtool_link_ksettings *cmd)
67 {
68 	struct dpaa_priv *priv = netdev_priv(net_dev);
69 	struct mac_device *mac_dev = priv->mac_dev;
70 
71 	return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd);
72 }
73 
74 static void dpaa_get_drvinfo(struct net_device *net_dev,
75 			     struct ethtool_drvinfo *drvinfo)
76 {
77 	strscpy(drvinfo->driver, KBUILD_MODNAME,
78 		sizeof(drvinfo->driver));
79 	strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
80 		sizeof(drvinfo->bus_info));
81 }
82 
83 static u32 dpaa_get_msglevel(struct net_device *net_dev)
84 {
85 	return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
86 }
87 
88 static void dpaa_set_msglevel(struct net_device *net_dev,
89 			      u32 msg_enable)
90 {
91 	((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
92 }
93 
94 static int dpaa_nway_reset(struct net_device *net_dev)
95 {
96 	struct dpaa_priv *priv = netdev_priv(net_dev);
97 	struct mac_device *mac_dev = priv->mac_dev;
98 
99 	return phylink_ethtool_nway_reset(mac_dev->phylink);
100 }
101 
102 static void dpaa_get_pauseparam(struct net_device *net_dev,
103 				struct ethtool_pauseparam *epause)
104 {
105 	struct dpaa_priv *priv = netdev_priv(net_dev);
106 	struct mac_device *mac_dev = priv->mac_dev;
107 
108 	phylink_ethtool_get_pauseparam(mac_dev->phylink, epause);
109 }
110 
111 static int dpaa_set_pauseparam(struct net_device *net_dev,
112 			       struct ethtool_pauseparam *epause)
113 {
114 	struct dpaa_priv *priv = netdev_priv(net_dev);
115 	struct mac_device *mac_dev = priv->mac_dev;
116 
117 	return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause);
118 }
119 
120 static int dpaa_get_sset_count(struct net_device *net_dev, int type)
121 {
122 	unsigned int total_stats, num_stats;
123 
124 	num_stats   = num_online_cpus() + 1;
125 	total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
126 			DPAA_STATS_GLOBAL_LEN;
127 
128 	switch (type) {
129 	case ETH_SS_STATS:
130 		return total_stats;
131 	default:
132 		return -EOPNOTSUPP;
133 	}
134 }
135 
136 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
137 		       int crr_cpu, u64 bp_count, u64 *data)
138 {
139 	int num_values = num_cpus + 1;
140 	int crr = 0;
141 
142 	/* update current CPU's stats and also add them to the total values */
143 	data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
144 	data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
145 
146 	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
147 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
148 
149 	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
150 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
151 
152 	data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
153 	data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
154 
155 	data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
156 	data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
157 
158 	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
159 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
160 
161 	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
162 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
163 
164 	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
165 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
166 
167 	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
168 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
169 
170 	data[crr * num_values + crr_cpu] = bp_count;
171 	data[crr++ * num_values + num_cpus] += bp_count;
172 }
173 
174 static void dpaa_get_ethtool_stats(struct net_device *net_dev,
175 				   struct ethtool_stats *stats, u64 *data)
176 {
177 	struct dpaa_percpu_priv *percpu_priv;
178 	struct dpaa_rx_errors rx_errors;
179 	unsigned int num_cpus, offset;
180 	u64 bp_count, cg_time, cg_num;
181 	struct dpaa_ern_cnt ern_cnt;
182 	struct dpaa_bp *dpaa_bp;
183 	struct dpaa_priv *priv;
184 	int total_stats, i;
185 	bool cg_status;
186 
187 	total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
188 	priv     = netdev_priv(net_dev);
189 	num_cpus = num_online_cpus();
190 
191 	memset(&bp_count, 0, sizeof(bp_count));
192 	memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
193 	memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
194 	memset(data, 0, total_stats * sizeof(u64));
195 
196 	for_each_online_cpu(i) {
197 		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
198 		dpaa_bp = priv->dpaa_bp;
199 		if (!dpaa_bp->percpu_count)
200 			continue;
201 		bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
202 		rx_errors.dme += percpu_priv->rx_errors.dme;
203 		rx_errors.fpe += percpu_priv->rx_errors.fpe;
204 		rx_errors.fse += percpu_priv->rx_errors.fse;
205 		rx_errors.phe += percpu_priv->rx_errors.phe;
206 
207 		ern_cnt.cg_tdrop     += percpu_priv->ern_cnt.cg_tdrop;
208 		ern_cnt.wred         += percpu_priv->ern_cnt.wred;
209 		ern_cnt.err_cond     += percpu_priv->ern_cnt.err_cond;
210 		ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
211 		ern_cnt.late_window  += percpu_priv->ern_cnt.late_window;
212 		ern_cnt.fq_tdrop     += percpu_priv->ern_cnt.fq_tdrop;
213 		ern_cnt.fq_retired   += percpu_priv->ern_cnt.fq_retired;
214 		ern_cnt.orp_zero     += percpu_priv->ern_cnt.orp_zero;
215 
216 		copy_stats(percpu_priv, num_cpus, i, bp_count, data);
217 	}
218 
219 	offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
220 	memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
221 
222 	offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
223 	memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
224 
225 	/* gather congestion related counters */
226 	cg_num    = 0;
227 	cg_status = false;
228 	cg_time   = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
229 	if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
230 		cg_num    = priv->cgr_data.cgr_congested_count;
231 
232 		/* reset congestion stats (like QMan API does */
233 		priv->cgr_data.congested_jiffies   = 0;
234 		priv->cgr_data.cgr_congested_count = 0;
235 	}
236 
237 	offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
238 	data[offset++] = cg_time;
239 	data[offset++] = cg_num;
240 	data[offset++] = cg_status;
241 }
242 
243 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
244 			     u8 *data)
245 {
246 	unsigned int i, j, num_cpus;
247 
248 	num_cpus = num_online_cpus();
249 
250 	for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
251 		for (j = 0; j < num_cpus; j++)
252 			ethtool_sprintf(&data, "%s [CPU %d]",
253 					dpaa_stats_percpu[i], j);
254 
255 		ethtool_sprintf(&data, "%s [TOTAL]", dpaa_stats_percpu[i]);
256 	}
257 	for (i = 0; i < num_cpus; i++)
258 		ethtool_sprintf(&data, "bpool [CPU %d]", i);
259 
260 	ethtool_puts(&data, "bpool [TOTAL]");
261 
262 	for (i = 0; i < DPAA_STATS_GLOBAL_LEN; i++)
263 		ethtool_puts(&data, dpaa_stats_global[i]);
264 }
265 
266 static int dpaa_get_hash_opts(struct net_device *dev,
267 			      struct ethtool_rxnfc *cmd)
268 {
269 	struct dpaa_priv *priv = netdev_priv(dev);
270 
271 	cmd->data = 0;
272 
273 	switch (cmd->flow_type) {
274 	case TCP_V4_FLOW:
275 	case TCP_V6_FLOW:
276 	case UDP_V4_FLOW:
277 	case UDP_V6_FLOW:
278 		if (priv->keygen_in_use)
279 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
280 		fallthrough;
281 	case IPV4_FLOW:
282 	case IPV6_FLOW:
283 	case SCTP_V4_FLOW:
284 	case SCTP_V6_FLOW:
285 	case AH_ESP_V4_FLOW:
286 	case AH_ESP_V6_FLOW:
287 	case AH_V4_FLOW:
288 	case AH_V6_FLOW:
289 	case ESP_V4_FLOW:
290 	case ESP_V6_FLOW:
291 		if (priv->keygen_in_use)
292 			cmd->data |= RXH_IP_SRC | RXH_IP_DST;
293 		break;
294 	default:
295 		cmd->data = 0;
296 		break;
297 	}
298 
299 	return 0;
300 }
301 
302 static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
303 			  u32 *unused)
304 {
305 	int ret = -EOPNOTSUPP;
306 
307 	switch (cmd->cmd) {
308 	case ETHTOOL_GRXFH:
309 		ret = dpaa_get_hash_opts(dev, cmd);
310 		break;
311 	default:
312 		break;
313 	}
314 
315 	return ret;
316 }
317 
318 static void dpaa_set_hash(struct net_device *net_dev, bool enable)
319 {
320 	struct mac_device *mac_dev;
321 	struct fman_port *rxport;
322 	struct dpaa_priv *priv;
323 
324 	priv = netdev_priv(net_dev);
325 	mac_dev = priv->mac_dev;
326 	rxport = mac_dev->port[0];
327 
328 	fman_port_use_kg_hash(rxport, enable);
329 	priv->keygen_in_use = enable;
330 }
331 
332 static int dpaa_set_hash_opts(struct net_device *dev,
333 			      struct ethtool_rxnfc *nfc)
334 {
335 	int ret = -EINVAL;
336 
337 	/* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
338 	if (nfc->data &
339 	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
340 		return -EINVAL;
341 
342 	switch (nfc->flow_type) {
343 	case TCP_V4_FLOW:
344 	case TCP_V6_FLOW:
345 	case UDP_V4_FLOW:
346 	case UDP_V6_FLOW:
347 	case IPV4_FLOW:
348 	case IPV6_FLOW:
349 	case SCTP_V4_FLOW:
350 	case SCTP_V6_FLOW:
351 	case AH_ESP_V4_FLOW:
352 	case AH_ESP_V6_FLOW:
353 	case AH_V4_FLOW:
354 	case AH_V6_FLOW:
355 	case ESP_V4_FLOW:
356 	case ESP_V6_FLOW:
357 		dpaa_set_hash(dev, !!nfc->data);
358 		ret = 0;
359 		break;
360 	default:
361 		break;
362 	}
363 
364 	return ret;
365 }
366 
367 static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
368 {
369 	int ret = -EOPNOTSUPP;
370 
371 	switch (cmd->cmd) {
372 	case ETHTOOL_SRXFH:
373 		ret = dpaa_set_hash_opts(dev, cmd);
374 		break;
375 	default:
376 		break;
377 	}
378 
379 	return ret;
380 }
381 
382 static int dpaa_get_ts_info(struct net_device *net_dev,
383 			    struct kernel_ethtool_ts_info *info)
384 {
385 	struct device *dev = net_dev->dev.parent;
386 	struct device_node *mac_node = dev->of_node;
387 	struct device_node *fman_node = NULL, *ptp_node = NULL;
388 	struct platform_device *ptp_dev = NULL;
389 	struct ptp_qoriq *ptp = NULL;
390 
391 	info->phc_index = -1;
392 
393 	fman_node = of_get_parent(mac_node);
394 	if (fman_node) {
395 		ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
396 		of_node_put(fman_node);
397 	}
398 
399 	if (ptp_node) {
400 		ptp_dev = of_find_device_by_node(ptp_node);
401 		of_node_put(ptp_node);
402 	}
403 
404 	if (ptp_dev)
405 		ptp = platform_get_drvdata(ptp_dev);
406 
407 	if (ptp)
408 		info->phc_index = ptp->phc_index;
409 
410 	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
411 				SOF_TIMESTAMPING_RX_HARDWARE |
412 				SOF_TIMESTAMPING_RAW_HARDWARE;
413 	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
414 			 (1 << HWTSTAMP_TX_ON);
415 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
416 			   (1 << HWTSTAMP_FILTER_ALL);
417 
418 	return 0;
419 }
420 
421 static int dpaa_get_coalesce(struct net_device *dev,
422 			     struct ethtool_coalesce *c,
423 			     struct kernel_ethtool_coalesce *kernel_coal,
424 			     struct netlink_ext_ack *extack)
425 {
426 	struct qman_portal *portal;
427 	u32 period;
428 	u8 thresh;
429 
430 	portal = qman_get_affine_portal(smp_processor_id());
431 	qman_portal_get_iperiod(portal, &period);
432 	qman_dqrr_get_ithresh(portal, &thresh);
433 
434 	c->rx_coalesce_usecs = period;
435 	c->rx_max_coalesced_frames = thresh;
436 
437 	return 0;
438 }
439 
440 static int dpaa_set_coalesce(struct net_device *dev,
441 			     struct ethtool_coalesce *c,
442 			     struct kernel_ethtool_coalesce *kernel_coal,
443 			     struct netlink_ext_ack *extack)
444 {
445 	const cpumask_t *cpus = qman_affine_cpus();
446 	struct qman_portal *portal;
447 	u32 period, prev_period;
448 	u8 thresh, prev_thresh;
449 	bool *needs_revert;
450 	int cpu, res;
451 
452 	needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
453 	if (!needs_revert)
454 		return -ENOMEM;
455 
456 	period = c->rx_coalesce_usecs;
457 	thresh = c->rx_max_coalesced_frames;
458 
459 	/* save previous values */
460 	portal = qman_get_affine_portal(smp_processor_id());
461 	qman_portal_get_iperiod(portal, &prev_period);
462 	qman_dqrr_get_ithresh(portal, &prev_thresh);
463 
464 	/* set new values */
465 	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
466 		portal = qman_get_affine_portal(cpu);
467 		res = qman_portal_set_iperiod(portal, period);
468 		if (res)
469 			goto revert_values;
470 		res = qman_dqrr_set_ithresh(portal, thresh);
471 		if (res) {
472 			qman_portal_set_iperiod(portal, prev_period);
473 			goto revert_values;
474 		}
475 		needs_revert[cpu] = true;
476 	}
477 
478 	kfree(needs_revert);
479 
480 	return 0;
481 
482 revert_values:
483 	/* restore previous values */
484 	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
485 		if (!needs_revert[cpu])
486 			continue;
487 		portal = qman_get_affine_portal(cpu);
488 		/* previous values will not fail, ignore return value */
489 		qman_portal_set_iperiod(portal, prev_period);
490 		qman_dqrr_set_ithresh(portal, prev_thresh);
491 	}
492 
493 	kfree(needs_revert);
494 
495 	return res;
496 }
497 
498 const struct ethtool_ops dpaa_ethtool_ops = {
499 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
500 				     ETHTOOL_COALESCE_RX_MAX_FRAMES,
501 	.get_drvinfo = dpaa_get_drvinfo,
502 	.get_msglevel = dpaa_get_msglevel,
503 	.set_msglevel = dpaa_set_msglevel,
504 	.nway_reset = dpaa_nway_reset,
505 	.get_pauseparam = dpaa_get_pauseparam,
506 	.set_pauseparam = dpaa_set_pauseparam,
507 	.get_link = ethtool_op_get_link,
508 	.get_sset_count = dpaa_get_sset_count,
509 	.get_ethtool_stats = dpaa_get_ethtool_stats,
510 	.get_strings = dpaa_get_strings,
511 	.get_link_ksettings = dpaa_get_link_ksettings,
512 	.set_link_ksettings = dpaa_set_link_ksettings,
513 	.get_rxnfc = dpaa_get_rxnfc,
514 	.set_rxnfc = dpaa_set_rxnfc,
515 	.get_ts_info = dpaa_get_ts_info,
516 	.get_coalesce = dpaa_get_coalesce,
517 	.set_coalesce = dpaa_set_coalesce,
518 };
519