xref: /linux/drivers/net/ethernet/intel/idpf/idpf_ethtool.c (revision bfc64d9b7e8cac82be6b8629865e137d962578f8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 
6 /**
7  * idpf_get_rxnfc - command to get RX flow classification rules
8  * @netdev: network interface device structure
9  * @cmd: ethtool rxnfc command
10  * @rule_locs: pointer to store rule locations
11  *
12  * Returns Success if the command is supported.
13  */
idpf_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 __always_unused * rule_locs)14 static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
15 			  u32 __always_unused *rule_locs)
16 {
17 	struct idpf_vport *vport;
18 
19 	idpf_vport_ctrl_lock(netdev);
20 	vport = idpf_netdev_to_vport(netdev);
21 
22 	switch (cmd->cmd) {
23 	case ETHTOOL_GRXRINGS:
24 		cmd->data = vport->num_rxq;
25 		idpf_vport_ctrl_unlock(netdev);
26 
27 		return 0;
28 	default:
29 		break;
30 	}
31 
32 	idpf_vport_ctrl_unlock(netdev);
33 
34 	return -EOPNOTSUPP;
35 }
36 
37 /**
38  * idpf_get_rxfh_key_size - get the RSS hash key size
39  * @netdev: network interface device structure
40  *
41  * Returns the key size on success, error value on failure.
42  */
idpf_get_rxfh_key_size(struct net_device * netdev)43 static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
44 {
45 	struct idpf_netdev_priv *np = netdev_priv(netdev);
46 	struct idpf_vport_user_config_data *user_config;
47 
48 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
49 		return -EOPNOTSUPP;
50 
51 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
52 
53 	return user_config->rss_data.rss_key_size;
54 }
55 
56 /**
57  * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
58  * @netdev: network interface device structure
59  *
60  * Returns the table size on success, error value on failure.
61  */
idpf_get_rxfh_indir_size(struct net_device * netdev)62 static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
63 {
64 	struct idpf_netdev_priv *np = netdev_priv(netdev);
65 	struct idpf_vport_user_config_data *user_config;
66 
67 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
68 		return -EOPNOTSUPP;
69 
70 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
71 
72 	return user_config->rss_data.rss_lut_size;
73 }
74 
75 /**
76  * idpf_get_rxfh - get the rx flow hash indirection table
77  * @netdev: network interface device structure
78  * @rxfh: pointer to param struct (indir, key, hfunc)
79  *
80  * Reads the indirection table directly from the hardware. Always returns 0.
81  */
idpf_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)82 static int idpf_get_rxfh(struct net_device *netdev,
83 			 struct ethtool_rxfh_param *rxfh)
84 {
85 	struct idpf_netdev_priv *np = netdev_priv(netdev);
86 	struct idpf_rss_data *rss_data;
87 	struct idpf_adapter *adapter;
88 	int err = 0;
89 	u16 i;
90 
91 	idpf_vport_ctrl_lock(netdev);
92 
93 	adapter = np->adapter;
94 
95 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
96 		err = -EOPNOTSUPP;
97 		goto unlock_mutex;
98 	}
99 
100 	rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
101 	if (np->state != __IDPF_VPORT_UP)
102 		goto unlock_mutex;
103 
104 	rxfh->hfunc = ETH_RSS_HASH_TOP;
105 
106 	if (rxfh->key)
107 		memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
108 
109 	if (rxfh->indir) {
110 		for (i = 0; i < rss_data->rss_lut_size; i++)
111 			rxfh->indir[i] = rss_data->rss_lut[i];
112 	}
113 
114 unlock_mutex:
115 	idpf_vport_ctrl_unlock(netdev);
116 
117 	return err;
118 }
119 
120 /**
121  * idpf_set_rxfh - set the rx flow hash indirection table
122  * @netdev: network interface device structure
123  * @rxfh: pointer to param struct (indir, key, hfunc)
124  * @extack: extended ACK from the Netlink message
125  *
126  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
127  * returns 0 after programming the table.
128  */
idpf_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)129 static int idpf_set_rxfh(struct net_device *netdev,
130 			 struct ethtool_rxfh_param *rxfh,
131 			 struct netlink_ext_ack *extack)
132 {
133 	struct idpf_netdev_priv *np = netdev_priv(netdev);
134 	struct idpf_rss_data *rss_data;
135 	struct idpf_adapter *adapter;
136 	struct idpf_vport *vport;
137 	int err = 0;
138 	u16 lut;
139 
140 	idpf_vport_ctrl_lock(netdev);
141 	vport = idpf_netdev_to_vport(netdev);
142 
143 	adapter = vport->adapter;
144 
145 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
146 		err = -EOPNOTSUPP;
147 		goto unlock_mutex;
148 	}
149 
150 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
151 	if (np->state != __IDPF_VPORT_UP)
152 		goto unlock_mutex;
153 
154 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
155 	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
156 		err = -EOPNOTSUPP;
157 		goto unlock_mutex;
158 	}
159 
160 	if (rxfh->key)
161 		memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
162 
163 	if (rxfh->indir) {
164 		for (lut = 0; lut < rss_data->rss_lut_size; lut++)
165 			rss_data->rss_lut[lut] = rxfh->indir[lut];
166 	}
167 
168 	err = idpf_config_rss(vport);
169 
170 unlock_mutex:
171 	idpf_vport_ctrl_unlock(netdev);
172 
173 	return err;
174 }
175 
176 /**
177  * idpf_get_channels: get the number of channels supported by the device
178  * @netdev: network interface device structure
179  * @ch: channel information structure
180  *
181  * Report maximum of TX and RX. Report one extra channel to match our MailBox
182  * Queue.
183  */
idpf_get_channels(struct net_device * netdev,struct ethtool_channels * ch)184 static void idpf_get_channels(struct net_device *netdev,
185 			      struct ethtool_channels *ch)
186 {
187 	struct idpf_netdev_priv *np = netdev_priv(netdev);
188 	struct idpf_vport_config *vport_config;
189 	u16 num_txq, num_rxq;
190 	u16 combined;
191 
192 	vport_config = np->adapter->vport_config[np->vport_idx];
193 
194 	num_txq = vport_config->user_config.num_req_tx_qs;
195 	num_rxq = vport_config->user_config.num_req_rx_qs;
196 
197 	combined = min(num_txq, num_rxq);
198 
199 	/* Report maximum channels */
200 	ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
201 				 vport_config->max_q.max_rxq);
202 	ch->max_rx = vport_config->max_q.max_rxq;
203 	ch->max_tx = vport_config->max_q.max_txq;
204 
205 	ch->max_other = IDPF_MAX_MBXQ;
206 	ch->other_count = IDPF_MAX_MBXQ;
207 
208 	ch->combined_count = combined;
209 	ch->rx_count = num_rxq - combined;
210 	ch->tx_count = num_txq - combined;
211 }
212 
213 /**
214  * idpf_set_channels: set the new channel count
215  * @netdev: network interface device structure
216  * @ch: channel information structure
217  *
218  * Negotiate a new number of channels with CP. Returns 0 on success, negative
219  * on failure.
220  */
idpf_set_channels(struct net_device * netdev,struct ethtool_channels * ch)221 static int idpf_set_channels(struct net_device *netdev,
222 			     struct ethtool_channels *ch)
223 {
224 	struct idpf_vport_config *vport_config;
225 	unsigned int num_req_tx_q;
226 	unsigned int num_req_rx_q;
227 	struct idpf_vport *vport;
228 	u16 num_txq, num_rxq;
229 	struct device *dev;
230 	int err = 0;
231 	u16 idx;
232 
233 	if (ch->rx_count && ch->tx_count) {
234 		netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
235 		return -EINVAL;
236 	}
237 
238 	idpf_vport_ctrl_lock(netdev);
239 	vport = idpf_netdev_to_vport(netdev);
240 
241 	idx = vport->idx;
242 	vport_config = vport->adapter->vport_config[idx];
243 
244 	num_txq = vport_config->user_config.num_req_tx_qs;
245 	num_rxq = vport_config->user_config.num_req_rx_qs;
246 
247 	num_req_tx_q = ch->combined_count + ch->tx_count;
248 	num_req_rx_q = ch->combined_count + ch->rx_count;
249 
250 	dev = &vport->adapter->pdev->dev;
251 	/* It's possible to specify number of queues that exceeds max.
252 	 * Stack checks max combined_count and max [tx|rx]_count but not the
253 	 * max combined_count + [tx|rx]_count. These checks should catch that.
254 	 */
255 	if (num_req_tx_q > vport_config->max_q.max_txq) {
256 		dev_info(dev, "Maximum TX queues is %d\n",
257 			 vport_config->max_q.max_txq);
258 		err = -EINVAL;
259 		goto unlock_mutex;
260 	}
261 	if (num_req_rx_q > vport_config->max_q.max_rxq) {
262 		dev_info(dev, "Maximum RX queues is %d\n",
263 			 vport_config->max_q.max_rxq);
264 		err = -EINVAL;
265 		goto unlock_mutex;
266 	}
267 
268 	if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
269 		goto unlock_mutex;
270 
271 	vport_config->user_config.num_req_tx_qs = num_req_tx_q;
272 	vport_config->user_config.num_req_rx_qs = num_req_rx_q;
273 
274 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
275 	if (err) {
276 		/* roll back queue change */
277 		vport_config->user_config.num_req_tx_qs = num_txq;
278 		vport_config->user_config.num_req_rx_qs = num_rxq;
279 	}
280 
281 unlock_mutex:
282 	idpf_vport_ctrl_unlock(netdev);
283 
284 	return err;
285 }
286 
287 /**
288  * idpf_get_ringparam - Get ring parameters
289  * @netdev: network interface device structure
290  * @ring: ethtool ringparam structure
291  * @kring: unused
292  * @ext_ack: unused
293  *
294  * Returns current ring parameters. TX and RX rings are reported separately,
295  * but the number of rings is not reported.
296  */
idpf_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * ext_ack)297 static void idpf_get_ringparam(struct net_device *netdev,
298 			       struct ethtool_ringparam *ring,
299 			       struct kernel_ethtool_ringparam *kring,
300 			       struct netlink_ext_ack *ext_ack)
301 {
302 	struct idpf_vport *vport;
303 
304 	idpf_vport_ctrl_lock(netdev);
305 	vport = idpf_netdev_to_vport(netdev);
306 
307 	ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
308 	ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
309 	ring->rx_pending = vport->rxq_desc_count;
310 	ring->tx_pending = vport->txq_desc_count;
311 
312 	kring->tcp_data_split = idpf_vport_get_hsplit(vport);
313 
314 	idpf_vport_ctrl_unlock(netdev);
315 }
316 
317 /**
318  * idpf_set_ringparam - Set ring parameters
319  * @netdev: network interface device structure
320  * @ring: ethtool ringparam structure
321  * @kring: unused
322  * @ext_ack: unused
323  *
324  * Sets ring parameters. TX and RX rings are controlled separately, but the
325  * number of rings is not specified, so all rings get the same settings.
326  */
idpf_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * ext_ack)327 static int idpf_set_ringparam(struct net_device *netdev,
328 			      struct ethtool_ringparam *ring,
329 			      struct kernel_ethtool_ringparam *kring,
330 			      struct netlink_ext_ack *ext_ack)
331 {
332 	struct idpf_vport_user_config_data *config_data;
333 	u32 new_rx_count, new_tx_count;
334 	struct idpf_vport *vport;
335 	int i, err = 0;
336 	u16 idx;
337 
338 	idpf_vport_ctrl_lock(netdev);
339 	vport = idpf_netdev_to_vport(netdev);
340 
341 	idx = vport->idx;
342 
343 	if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
344 		netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
345 			   ring->tx_pending,
346 			   IDPF_MIN_TXQ_DESC);
347 		err = -EINVAL;
348 		goto unlock_mutex;
349 	}
350 
351 	if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
352 		netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
353 			   ring->rx_pending,
354 			   IDPF_MIN_RXQ_DESC);
355 		err = -EINVAL;
356 		goto unlock_mutex;
357 	}
358 
359 	new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
360 	if (new_rx_count != ring->rx_pending)
361 		netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
362 			    new_rx_count);
363 
364 	new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
365 	if (new_tx_count != ring->tx_pending)
366 		netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
367 			    new_tx_count);
368 
369 	if (new_tx_count == vport->txq_desc_count &&
370 	    new_rx_count == vport->rxq_desc_count &&
371 	    kring->tcp_data_split == idpf_vport_get_hsplit(vport))
372 		goto unlock_mutex;
373 
374 	if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
375 		NL_SET_ERR_MSG_MOD(ext_ack,
376 				   "setting TCP data split is not supported");
377 		err = -EOPNOTSUPP;
378 
379 		goto unlock_mutex;
380 	}
381 
382 	config_data = &vport->adapter->vport_config[idx]->user_config;
383 	config_data->num_req_txq_desc = new_tx_count;
384 	config_data->num_req_rxq_desc = new_rx_count;
385 
386 	/* Since we adjusted the RX completion queue count, the RX buffer queue
387 	 * descriptor count needs to be adjusted as well
388 	 */
389 	for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
390 		vport->bufq_desc_count[i] =
391 			IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
392 						vport->num_bufqs_per_qgrp);
393 
394 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
395 
396 unlock_mutex:
397 	idpf_vport_ctrl_unlock(netdev);
398 
399 	return err;
400 }
401 
402 /**
403  * struct idpf_stats - definition for an ethtool statistic
404  * @stat_string: statistic name to display in ethtool -S output
405  * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
406  * @stat_offset: offsetof() the stat from a base pointer
407  *
408  * This structure defines a statistic to be added to the ethtool stats buffer.
409  * It defines a statistic as offset from a common base pointer. Stats should
410  * be defined in constant arrays using the IDPF_STAT macro, with every element
411  * of the array using the same _type for calculating the sizeof_stat and
412  * stat_offset.
413  *
414  * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
415  * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
416  * the idpf_add_ethtool_stat() helper function.
417  *
418  * The @stat_string is interpreted as a format string, allowing formatted
419  * values to be inserted while looping over multiple structures for a given
420  * statistics array. Thus, every statistic string in an array should have the
421  * same type and number of format specifiers, to be formatted by variadic
422  * arguments to the idpf_add_stat_string() helper function.
423  */
424 struct idpf_stats {
425 	char stat_string[ETH_GSTRING_LEN];
426 	int sizeof_stat;
427 	int stat_offset;
428 };
429 
430 /* Helper macro to define an idpf_stat structure with proper size and type.
431  * Use this when defining constant statistics arrays. Note that @_type expects
432  * only a type name and is used multiple times.
433  */
434 #define IDPF_STAT(_type, _name, _stat) { \
435 	.stat_string = _name, \
436 	.sizeof_stat = sizeof_field(_type, _stat), \
437 	.stat_offset = offsetof(_type, _stat) \
438 }
439 
440 /* Helper macros for defining some statistics related to queues */
441 #define IDPF_RX_QUEUE_STAT(_name, _stat) \
442 	IDPF_STAT(struct idpf_rx_queue, _name, _stat)
443 #define IDPF_TX_QUEUE_STAT(_name, _stat) \
444 	IDPF_STAT(struct idpf_tx_queue, _name, _stat)
445 
446 /* Stats associated with a Tx queue */
447 static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
448 	IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
449 	IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
450 	IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
451 };
452 
453 /* Stats associated with an Rx queue */
454 static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
455 	IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
456 	IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
457 	IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
458 };
459 
460 #define IDPF_TX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
461 #define IDPF_RX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
462 
463 #define IDPF_PORT_STAT(_name, _stat) \
464 	IDPF_STAT(struct idpf_vport,  _name, _stat)
465 
466 static const struct idpf_stats idpf_gstrings_port_stats[] = {
467 	IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
468 	IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
469 	IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
470 	IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
471 	IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
472 	IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
473 	IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
474 	IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
475 	IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
476 	IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
477 	IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
478 	IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
479 	IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
480 	IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
481 	IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
482 };
483 
484 #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
485 
486 /**
487  * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
488  * @p: ethtool supplied buffer
489  * @stats: stat definitions array
490  * @size: size of the stats array
491  * @type: stat type
492  * @idx: stat index
493  *
494  * Format and copy the strings described by stats into the buffer pointed at
495  * by p.
496  */
__idpf_add_qstat_strings(u8 ** p,const struct idpf_stats * stats,const unsigned int size,const char * type,unsigned int idx)497 static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
498 				     const unsigned int size, const char *type,
499 				     unsigned int idx)
500 {
501 	unsigned int i;
502 
503 	for (i = 0; i < size; i++)
504 		ethtool_sprintf(p, "%s_q-%u_%s",
505 				type, idx, stats[i].stat_string);
506 }
507 
508 /**
509  * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
510  * @p: ethtool supplied buffer
511  * @stats: stat definitions array
512  * @type: stat type
513  * @idx: stat idx
514  *
515  * Format and copy the strings described by the const static stats value into
516  * the buffer pointed at by p.
517  *
518  * The parameter @stats is evaluated twice, so parameters with side effects
519  * should be avoided. Additionally, stats must be an array such that
520  * ARRAY_SIZE can be called on it.
521  */
522 #define idpf_add_qstat_strings(p, stats, type, idx) \
523 	__idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
524 
525 /**
526  * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
527  * @p: ethtool buffer
528  * @stats: struct to copy from
529  * @size: size of stats array to copy from
530  */
idpf_add_stat_strings(u8 ** p,const struct idpf_stats * stats,const unsigned int size)531 static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
532 				  const unsigned int size)
533 {
534 	unsigned int i;
535 
536 	for (i = 0; i < size; i++)
537 		ethtool_puts(p, stats[i].stat_string);
538 }
539 
540 /**
541  * idpf_get_stat_strings - Get stat strings
542  * @netdev: network interface device structure
543  * @data: buffer for string data
544  *
545  * Builds the statistics string table
546  */
idpf_get_stat_strings(struct net_device * netdev,u8 * data)547 static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
548 {
549 	struct idpf_netdev_priv *np = netdev_priv(netdev);
550 	struct idpf_vport_config *vport_config;
551 	unsigned int i;
552 
553 	idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
554 			      IDPF_PORT_STATS_LEN);
555 
556 	vport_config = np->adapter->vport_config[np->vport_idx];
557 	/* It's critical that we always report a constant number of strings and
558 	 * that the strings are reported in the same order regardless of how
559 	 * many queues are actually in use.
560 	 */
561 	for (i = 0; i < vport_config->max_q.max_txq; i++)
562 		idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
563 				       "tx", i);
564 
565 	for (i = 0; i < vport_config->max_q.max_rxq; i++)
566 		idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
567 				       "rx", i);
568 }
569 
570 /**
571  * idpf_get_strings - Get string set
572  * @netdev: network interface device structure
573  * @sset: id of string set
574  * @data: buffer for string data
575  *
576  * Builds string tables for various string sets
577  */
idpf_get_strings(struct net_device * netdev,u32 sset,u8 * data)578 static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
579 {
580 	switch (sset) {
581 	case ETH_SS_STATS:
582 		idpf_get_stat_strings(netdev, data);
583 		break;
584 	default:
585 		break;
586 	}
587 }
588 
589 /**
590  * idpf_get_sset_count - Get length of string set
591  * @netdev: network interface device structure
592  * @sset: id of string set
593  *
594  * Reports size of various string tables.
595  */
idpf_get_sset_count(struct net_device * netdev,int sset)596 static int idpf_get_sset_count(struct net_device *netdev, int sset)
597 {
598 	struct idpf_netdev_priv *np = netdev_priv(netdev);
599 	struct idpf_vport_config *vport_config;
600 	u16 max_txq, max_rxq;
601 
602 	if (sset != ETH_SS_STATS)
603 		return -EINVAL;
604 
605 	vport_config = np->adapter->vport_config[np->vport_idx];
606 	/* This size reported back here *must* be constant throughout the
607 	 * lifecycle of the netdevice, i.e. we must report the maximum length
608 	 * even for queues that don't technically exist.  This is due to the
609 	 * fact that this userspace API uses three separate ioctl calls to get
610 	 * stats data but has no way to communicate back to userspace when that
611 	 * size has changed, which can typically happen as a result of changing
612 	 * number of queues. If the number/order of stats change in the middle
613 	 * of this call chain it will lead to userspace crashing/accessing bad
614 	 * data through buffer under/overflow.
615 	 */
616 	max_txq = vport_config->max_q.max_txq;
617 	max_rxq = vport_config->max_q.max_rxq;
618 
619 	return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
620 	       (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
621 }
622 
623 /**
624  * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
625  * @data: location to store the stat value
626  * @pstat: old stat pointer to copy from
627  * @stat: the stat definition
628  *
629  * Copies the stat data defined by the pointer and stat structure pair into
630  * the memory supplied as data. If the pointer is null, data will be zero'd.
631  */
idpf_add_one_ethtool_stat(u64 * data,const void * pstat,const struct idpf_stats * stat)632 static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
633 				      const struct idpf_stats *stat)
634 {
635 	char *p;
636 
637 	if (!pstat) {
638 		/* Ensure that the ethtool data buffer is zero'd for any stats
639 		 * which don't have a valid pointer.
640 		 */
641 		*data = 0;
642 		return;
643 	}
644 
645 	p = (char *)pstat + stat->stat_offset;
646 	switch (stat->sizeof_stat) {
647 	case sizeof(u64):
648 		*data = *((u64 *)p);
649 		break;
650 	case sizeof(u32):
651 		*data = *((u32 *)p);
652 		break;
653 	case sizeof(u16):
654 		*data = *((u16 *)p);
655 		break;
656 	case sizeof(u8):
657 		*data = *((u8 *)p);
658 		break;
659 	default:
660 		WARN_ONCE(1, "unexpected stat size for %s",
661 			  stat->stat_string);
662 		*data = 0;
663 	}
664 }
665 
666 /**
667  * idpf_add_queue_stats - copy queue statistics into supplied buffer
668  * @data: ethtool stats buffer
669  * @q: the queue to copy
670  * @type: type of the queue
671  *
672  * Queue statistics must be copied while protected by u64_stats_fetch_begin,
673  * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
674  * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
675  * zero out the queue stat values and update the data pointer. Otherwise
676  * safely copy the stats from the queue into the supplied buffer and update
677  * the data pointer when finished.
678  *
679  * This function expects to be called while under rcu_read_lock().
680  */
idpf_add_queue_stats(u64 ** data,const void * q,enum virtchnl2_queue_type type)681 static void idpf_add_queue_stats(u64 **data, const void *q,
682 				 enum virtchnl2_queue_type type)
683 {
684 	const struct u64_stats_sync *stats_sync;
685 	const struct idpf_stats *stats;
686 	unsigned int start;
687 	unsigned int size;
688 	unsigned int i;
689 
690 	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
691 		size = IDPF_RX_QUEUE_STATS_LEN;
692 		stats = idpf_gstrings_rx_queue_stats;
693 		stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
694 	} else {
695 		size = IDPF_TX_QUEUE_STATS_LEN;
696 		stats = idpf_gstrings_tx_queue_stats;
697 		stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
698 	}
699 
700 	/* To avoid invalid statistics values, ensure that we keep retrying
701 	 * the copy until we get a consistent value according to
702 	 * u64_stats_fetch_retry.
703 	 */
704 	do {
705 		start = u64_stats_fetch_begin(stats_sync);
706 		for (i = 0; i < size; i++)
707 			idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
708 	} while (u64_stats_fetch_retry(stats_sync, start));
709 
710 	/* Once we successfully copy the stats in, update the data pointer */
711 	*data += size;
712 }
713 
714 /**
715  * idpf_add_empty_queue_stats - Add stats for a non-existent queue
716  * @data: pointer to data buffer
717  * @qtype: type of data queue
718  *
719  * We must report a constant length of stats back to userspace regardless of
720  * how many queues are actually in use because stats collection happens over
721  * three separate ioctls and there's no way to notify userspace the size
722  * changed between those calls. This adds empty to data to the stats since we
723  * don't have a real queue to refer to for this stats slot.
724  */
idpf_add_empty_queue_stats(u64 ** data,u16 qtype)725 static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
726 {
727 	unsigned int i;
728 	int stats_len;
729 
730 	if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
731 		stats_len = IDPF_RX_QUEUE_STATS_LEN;
732 	else
733 		stats_len = IDPF_TX_QUEUE_STATS_LEN;
734 
735 	for (i = 0; i < stats_len; i++)
736 		(*data)[i] = 0;
737 	*data += stats_len;
738 }
739 
740 /**
741  * idpf_add_port_stats - Copy port stats into ethtool buffer
742  * @vport: virtual port struct
743  * @data: ethtool buffer to copy into
744  */
idpf_add_port_stats(struct idpf_vport * vport,u64 ** data)745 static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
746 {
747 	unsigned int size = IDPF_PORT_STATS_LEN;
748 	unsigned int start;
749 	unsigned int i;
750 
751 	do {
752 		start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
753 		for (i = 0; i < size; i++)
754 			idpf_add_one_ethtool_stat(&(*data)[i], vport,
755 						  &idpf_gstrings_port_stats[i]);
756 	} while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
757 
758 	*data += size;
759 }
760 
761 /**
762  * idpf_collect_queue_stats - accumulate various per queue stats
763  * into port level stats
764  * @vport: pointer to vport struct
765  **/
idpf_collect_queue_stats(struct idpf_vport * vport)766 static void idpf_collect_queue_stats(struct idpf_vport *vport)
767 {
768 	struct idpf_port_stats *pstats = &vport->port_stats;
769 	int i, j;
770 
771 	/* zero out port stats since they're actually tracked in per
772 	 * queue stats; this is only for reporting
773 	 */
774 	u64_stats_update_begin(&pstats->stats_sync);
775 	u64_stats_set(&pstats->rx_hw_csum_err, 0);
776 	u64_stats_set(&pstats->rx_hsplit, 0);
777 	u64_stats_set(&pstats->rx_hsplit_hbo, 0);
778 	u64_stats_set(&pstats->rx_bad_descs, 0);
779 	u64_stats_set(&pstats->tx_linearize, 0);
780 	u64_stats_set(&pstats->tx_busy, 0);
781 	u64_stats_set(&pstats->tx_drops, 0);
782 	u64_stats_set(&pstats->tx_dma_map_errs, 0);
783 	u64_stats_update_end(&pstats->stats_sync);
784 
785 	for (i = 0; i < vport->num_rxq_grp; i++) {
786 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
787 		u16 num_rxq;
788 
789 		if (idpf_is_queue_model_split(vport->rxq_model))
790 			num_rxq = rxq_grp->splitq.num_rxq_sets;
791 		else
792 			num_rxq = rxq_grp->singleq.num_rxq;
793 
794 		for (j = 0; j < num_rxq; j++) {
795 			u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
796 			struct idpf_rx_queue_stats *stats;
797 			struct idpf_rx_queue *rxq;
798 			unsigned int start;
799 
800 			if (idpf_is_queue_model_split(vport->rxq_model))
801 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
802 			else
803 				rxq = rxq_grp->singleq.rxqs[j];
804 
805 			if (!rxq)
806 				continue;
807 
808 			do {
809 				start = u64_stats_fetch_begin(&rxq->stats_sync);
810 
811 				stats = &rxq->q_stats;
812 				hw_csum_err = u64_stats_read(&stats->hw_csum_err);
813 				hsplit = u64_stats_read(&stats->hsplit_pkts);
814 				hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
815 				bad_descs = u64_stats_read(&stats->bad_descs);
816 			} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
817 
818 			u64_stats_update_begin(&pstats->stats_sync);
819 			u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
820 			u64_stats_add(&pstats->rx_hsplit, hsplit);
821 			u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
822 			u64_stats_add(&pstats->rx_bad_descs, bad_descs);
823 			u64_stats_update_end(&pstats->stats_sync);
824 		}
825 	}
826 
827 	for (i = 0; i < vport->num_txq_grp; i++) {
828 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
829 
830 		for (j = 0; j < txq_grp->num_txq; j++) {
831 			u64 linearize, qbusy, skb_drops, dma_map_errs;
832 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
833 			struct idpf_tx_queue_stats *stats;
834 			unsigned int start;
835 
836 			if (!txq)
837 				continue;
838 
839 			do {
840 				start = u64_stats_fetch_begin(&txq->stats_sync);
841 
842 				stats = &txq->q_stats;
843 				linearize = u64_stats_read(&stats->linearize);
844 				qbusy = u64_stats_read(&stats->q_busy);
845 				skb_drops = u64_stats_read(&stats->skb_drops);
846 				dma_map_errs = u64_stats_read(&stats->dma_map_errs);
847 			} while (u64_stats_fetch_retry(&txq->stats_sync, start));
848 
849 			u64_stats_update_begin(&pstats->stats_sync);
850 			u64_stats_add(&pstats->tx_linearize, linearize);
851 			u64_stats_add(&pstats->tx_busy, qbusy);
852 			u64_stats_add(&pstats->tx_drops, skb_drops);
853 			u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
854 			u64_stats_update_end(&pstats->stats_sync);
855 		}
856 	}
857 }
858 
859 /**
860  * idpf_get_ethtool_stats - report device statistics
861  * @netdev: network interface device structure
862  * @stats: ethtool statistics structure
863  * @data: pointer to data buffer
864  *
865  * All statistics are added to the data buffer as an array of u64.
866  */
idpf_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)867 static void idpf_get_ethtool_stats(struct net_device *netdev,
868 				   struct ethtool_stats __always_unused *stats,
869 				   u64 *data)
870 {
871 	struct idpf_netdev_priv *np = netdev_priv(netdev);
872 	struct idpf_vport_config *vport_config;
873 	struct idpf_vport *vport;
874 	unsigned int total = 0;
875 	unsigned int i, j;
876 	bool is_splitq;
877 	u16 qtype;
878 
879 	idpf_vport_ctrl_lock(netdev);
880 	vport = idpf_netdev_to_vport(netdev);
881 
882 	if (np->state != __IDPF_VPORT_UP) {
883 		idpf_vport_ctrl_unlock(netdev);
884 
885 		return;
886 	}
887 
888 	rcu_read_lock();
889 
890 	idpf_collect_queue_stats(vport);
891 	idpf_add_port_stats(vport, &data);
892 
893 	for (i = 0; i < vport->num_txq_grp; i++) {
894 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
895 
896 		qtype = VIRTCHNL2_QUEUE_TYPE_TX;
897 
898 		for (j = 0; j < txq_grp->num_txq; j++, total++) {
899 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
900 
901 			if (!txq)
902 				idpf_add_empty_queue_stats(&data, qtype);
903 			else
904 				idpf_add_queue_stats(&data, txq, qtype);
905 		}
906 	}
907 
908 	vport_config = vport->adapter->vport_config[vport->idx];
909 	/* It is critical we provide a constant number of stats back to
910 	 * userspace regardless of how many queues are actually in use because
911 	 * there is no way to inform userspace the size has changed between
912 	 * ioctl calls. This will fill in any missing stats with zero.
913 	 */
914 	for (; total < vport_config->max_q.max_txq; total++)
915 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
916 	total = 0;
917 
918 	is_splitq = idpf_is_queue_model_split(vport->rxq_model);
919 
920 	for (i = 0; i < vport->num_rxq_grp; i++) {
921 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
922 		u16 num_rxq;
923 
924 		qtype = VIRTCHNL2_QUEUE_TYPE_RX;
925 
926 		if (is_splitq)
927 			num_rxq = rxq_grp->splitq.num_rxq_sets;
928 		else
929 			num_rxq = rxq_grp->singleq.num_rxq;
930 
931 		for (j = 0; j < num_rxq; j++, total++) {
932 			struct idpf_rx_queue *rxq;
933 
934 			if (is_splitq)
935 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
936 			else
937 				rxq = rxq_grp->singleq.rxqs[j];
938 			if (!rxq)
939 				idpf_add_empty_queue_stats(&data, qtype);
940 			else
941 				idpf_add_queue_stats(&data, rxq, qtype);
942 		}
943 	}
944 
945 	for (; total < vport_config->max_q.max_rxq; total++)
946 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
947 
948 	rcu_read_unlock();
949 
950 	idpf_vport_ctrl_unlock(netdev);
951 }
952 
953 /**
954  * idpf_find_rxq_vec - find rxq vector from q index
955  * @vport: virtual port associated to queue
956  * @q_num: q index used to find queue
957  *
958  * returns pointer to rx vector
959  */
idpf_find_rxq_vec(const struct idpf_vport * vport,int q_num)960 static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
961 					       int q_num)
962 {
963 	int q_grp, q_idx;
964 
965 	if (!idpf_is_queue_model_split(vport->rxq_model))
966 		return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
967 
968 	q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
969 	q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
970 
971 	return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
972 }
973 
974 /**
975  * idpf_find_txq_vec - find txq vector from q index
976  * @vport: virtual port associated to queue
977  * @q_num: q index used to find queue
978  *
979  * returns pointer to tx vector
980  */
idpf_find_txq_vec(const struct idpf_vport * vport,int q_num)981 static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
982 					       int q_num)
983 {
984 	int q_grp;
985 
986 	if (!idpf_is_queue_model_split(vport->txq_model))
987 		return vport->txqs[q_num]->q_vector;
988 
989 	q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
990 
991 	return vport->txq_grps[q_grp].complq->q_vector;
992 }
993 
994 /**
995  * __idpf_get_q_coalesce - get ITR values for specific queue
996  * @ec: ethtool structure to fill with driver's coalesce settings
997  * @q_vector: queue vector corresponding to this queue
998  * @type: queue type
999  */
__idpf_get_q_coalesce(struct ethtool_coalesce * ec,const struct idpf_q_vector * q_vector,enum virtchnl2_queue_type type)1000 static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
1001 				  const struct idpf_q_vector *q_vector,
1002 				  enum virtchnl2_queue_type type)
1003 {
1004 	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
1005 		ec->use_adaptive_rx_coalesce =
1006 				IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
1007 		ec->rx_coalesce_usecs = q_vector->rx_itr_value;
1008 	} else {
1009 		ec->use_adaptive_tx_coalesce =
1010 				IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
1011 		ec->tx_coalesce_usecs = q_vector->tx_itr_value;
1012 	}
1013 }
1014 
1015 /**
1016  * idpf_get_q_coalesce - get ITR values for specific queue
1017  * @netdev: pointer to the netdev associated with this query
1018  * @ec: coalesce settings to program the device with
1019  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1020  *
1021  * Return 0 on success, and negative on failure
1022  */
idpf_get_q_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,u32 q_num)1023 static int idpf_get_q_coalesce(struct net_device *netdev,
1024 			       struct ethtool_coalesce *ec,
1025 			       u32 q_num)
1026 {
1027 	const struct idpf_netdev_priv *np = netdev_priv(netdev);
1028 	const struct idpf_vport *vport;
1029 	int err = 0;
1030 
1031 	idpf_vport_ctrl_lock(netdev);
1032 	vport = idpf_netdev_to_vport(netdev);
1033 
1034 	if (np->state != __IDPF_VPORT_UP)
1035 		goto unlock_mutex;
1036 
1037 	if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
1038 		err = -EINVAL;
1039 		goto unlock_mutex;
1040 	}
1041 
1042 	if (q_num < vport->num_rxq)
1043 		__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
1044 				      VIRTCHNL2_QUEUE_TYPE_RX);
1045 
1046 	if (q_num < vport->num_txq)
1047 		__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
1048 				      VIRTCHNL2_QUEUE_TYPE_TX);
1049 
1050 unlock_mutex:
1051 	idpf_vport_ctrl_unlock(netdev);
1052 
1053 	return err;
1054 }
1055 
1056 /**
1057  * idpf_get_coalesce - get ITR values as requested by user
1058  * @netdev: pointer to the netdev associated with this query
1059  * @ec: coalesce settings to be filled
1060  * @kec: unused
1061  * @extack: unused
1062  *
1063  * Return 0 on success, and negative on failure
1064  */
idpf_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kec,struct netlink_ext_ack * extack)1065 static int idpf_get_coalesce(struct net_device *netdev,
1066 			     struct ethtool_coalesce *ec,
1067 			     struct kernel_ethtool_coalesce *kec,
1068 			     struct netlink_ext_ack *extack)
1069 {
1070 	/* Return coalesce based on queue number zero */
1071 	return idpf_get_q_coalesce(netdev, ec, 0);
1072 }
1073 
1074 /**
1075  * idpf_get_per_q_coalesce - get ITR values as requested by user
1076  * @netdev: pointer to the netdev associated with this query
1077  * @q_num: queue for which the itr values has to retrieved
1078  * @ec: coalesce settings to be filled
1079  *
1080  * Return 0 on success, and negative on failure
1081  */
1082 
idpf_get_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)1083 static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
1084 				   struct ethtool_coalesce *ec)
1085 {
1086 	return idpf_get_q_coalesce(netdev, ec, q_num);
1087 }
1088 
1089 /**
1090  * __idpf_set_q_coalesce - set ITR values for specific queue
1091  * @ec: ethtool structure from user to update ITR settings
1092  * @qv: queue vector for which itr values has to be set
1093  * @is_rxq: is queue type rx
1094  *
1095  * Returns 0 on success, negative otherwise.
1096  */
__idpf_set_q_coalesce(const struct ethtool_coalesce * ec,struct idpf_q_vector * qv,bool is_rxq)1097 static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
1098 				 struct idpf_q_vector *qv, bool is_rxq)
1099 {
1100 	u32 use_adaptive_coalesce, coalesce_usecs;
1101 	bool is_dim_ena = false;
1102 	u16 itr_val;
1103 
1104 	if (is_rxq) {
1105 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
1106 		use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
1107 		coalesce_usecs = ec->rx_coalesce_usecs;
1108 		itr_val = qv->rx_itr_value;
1109 	} else {
1110 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
1111 		use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
1112 		coalesce_usecs = ec->tx_coalesce_usecs;
1113 		itr_val = qv->tx_itr_value;
1114 	}
1115 	if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
1116 		netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
1117 
1118 		return -EINVAL;
1119 	}
1120 
1121 	if (is_dim_ena && use_adaptive_coalesce)
1122 		return 0;
1123 
1124 	if (coalesce_usecs > IDPF_ITR_MAX) {
1125 		netdev_err(qv->vport->netdev,
1126 			   "Invalid value, %d-usecs range is 0-%d\n",
1127 			   coalesce_usecs, IDPF_ITR_MAX);
1128 
1129 		return -EINVAL;
1130 	}
1131 
1132 	if (coalesce_usecs % 2) {
1133 		coalesce_usecs--;
1134 		netdev_info(qv->vport->netdev,
1135 			    "HW only supports even ITR values, ITR rounded to %d\n",
1136 			    coalesce_usecs);
1137 	}
1138 
1139 	if (is_rxq) {
1140 		qv->rx_itr_value = coalesce_usecs;
1141 		if (use_adaptive_coalesce) {
1142 			qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
1143 		} else {
1144 			qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1145 			idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
1146 						  false);
1147 		}
1148 	} else {
1149 		qv->tx_itr_value = coalesce_usecs;
1150 		if (use_adaptive_coalesce) {
1151 			qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
1152 		} else {
1153 			qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1154 			idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
1155 		}
1156 	}
1157 
1158 	/* Update of static/dynamic itr will be taken care when interrupt is
1159 	 * fired
1160 	 */
1161 	return 0;
1162 }
1163 
1164 /**
1165  * idpf_set_q_coalesce - set ITR values for specific queue
1166  * @vport: vport associated to the queue that need updating
1167  * @ec: coalesce settings to program the device with
1168  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1169  * @is_rxq: is queue type rx
1170  *
1171  * Return 0 on success, and negative on failure
1172  */
idpf_set_q_coalesce(const struct idpf_vport * vport,const struct ethtool_coalesce * ec,int q_num,bool is_rxq)1173 static int idpf_set_q_coalesce(const struct idpf_vport *vport,
1174 			       const struct ethtool_coalesce *ec,
1175 			       int q_num, bool is_rxq)
1176 {
1177 	struct idpf_q_vector *qv;
1178 
1179 	qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
1180 		      idpf_find_txq_vec(vport, q_num);
1181 
1182 	if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
1183 		return -EINVAL;
1184 
1185 	return 0;
1186 }
1187 
1188 /**
1189  * idpf_set_coalesce - set ITR values as requested by user
1190  * @netdev: pointer to the netdev associated with this query
1191  * @ec: coalesce settings to program the device with
1192  * @kec: unused
1193  * @extack: unused
1194  *
1195  * Return 0 on success, and negative on failure
1196  */
idpf_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kec,struct netlink_ext_ack * extack)1197 static int idpf_set_coalesce(struct net_device *netdev,
1198 			     struct ethtool_coalesce *ec,
1199 			     struct kernel_ethtool_coalesce *kec,
1200 			     struct netlink_ext_ack *extack)
1201 {
1202 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1203 	struct idpf_vport *vport;
1204 	int i, err = 0;
1205 
1206 	idpf_vport_ctrl_lock(netdev);
1207 	vport = idpf_netdev_to_vport(netdev);
1208 
1209 	if (np->state != __IDPF_VPORT_UP)
1210 		goto unlock_mutex;
1211 
1212 	for (i = 0; i < vport->num_txq; i++) {
1213 		err = idpf_set_q_coalesce(vport, ec, i, false);
1214 		if (err)
1215 			goto unlock_mutex;
1216 	}
1217 
1218 	for (i = 0; i < vport->num_rxq; i++) {
1219 		err = idpf_set_q_coalesce(vport, ec, i, true);
1220 		if (err)
1221 			goto unlock_mutex;
1222 	}
1223 
1224 unlock_mutex:
1225 	idpf_vport_ctrl_unlock(netdev);
1226 
1227 	return err;
1228 }
1229 
1230 /**
1231  * idpf_set_per_q_coalesce - set ITR values as requested by user
1232  * @netdev: pointer to the netdev associated with this query
1233  * @q_num: queue for which the itr values has to be set
1234  * @ec: coalesce settings to program the device with
1235  *
1236  * Return 0 on success, and negative on failure
1237  */
idpf_set_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)1238 static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
1239 				   struct ethtool_coalesce *ec)
1240 {
1241 	struct idpf_vport *vport;
1242 	int err;
1243 
1244 	idpf_vport_ctrl_lock(netdev);
1245 	vport = idpf_netdev_to_vport(netdev);
1246 
1247 	err = idpf_set_q_coalesce(vport, ec, q_num, false);
1248 	if (err) {
1249 		idpf_vport_ctrl_unlock(netdev);
1250 
1251 		return err;
1252 	}
1253 
1254 	err = idpf_set_q_coalesce(vport, ec, q_num, true);
1255 
1256 	idpf_vport_ctrl_unlock(netdev);
1257 
1258 	return err;
1259 }
1260 
1261 /**
1262  * idpf_get_msglevel - Get debug message level
1263  * @netdev: network interface device structure
1264  *
1265  * Returns current debug message level.
1266  */
idpf_get_msglevel(struct net_device * netdev)1267 static u32 idpf_get_msglevel(struct net_device *netdev)
1268 {
1269 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1270 
1271 	return adapter->msg_enable;
1272 }
1273 
1274 /**
1275  * idpf_set_msglevel - Set debug message level
1276  * @netdev: network interface device structure
1277  * @data: message level
1278  *
1279  * Set current debug message level. Higher values cause the driver to
1280  * be noisier.
1281  */
idpf_set_msglevel(struct net_device * netdev,u32 data)1282 static void idpf_set_msglevel(struct net_device *netdev, u32 data)
1283 {
1284 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1285 
1286 	adapter->msg_enable = data;
1287 }
1288 
1289 /**
1290  * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1291  * @netdev: network interface device structure
1292  * @cmd: ethtool command
1293  *
1294  * Reports speed/duplex settings.
1295  **/
idpf_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)1296 static int idpf_get_link_ksettings(struct net_device *netdev,
1297 				   struct ethtool_link_ksettings *cmd)
1298 {
1299 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1300 
1301 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
1302 	cmd->base.autoneg = AUTONEG_DISABLE;
1303 	cmd->base.port = PORT_NONE;
1304 	if (netif_carrier_ok(netdev)) {
1305 		cmd->base.duplex = DUPLEX_FULL;
1306 		cmd->base.speed = np->link_speed_mbps;
1307 	} else {
1308 		cmd->base.duplex = DUPLEX_UNKNOWN;
1309 		cmd->base.speed = SPEED_UNKNOWN;
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 static const struct ethtool_ops idpf_ethtool_ops = {
1316 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1317 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1318 	.supported_ring_params	= ETHTOOL_RING_USE_TCP_DATA_SPLIT,
1319 	.get_msglevel		= idpf_get_msglevel,
1320 	.set_msglevel		= idpf_set_msglevel,
1321 	.get_link		= ethtool_op_get_link,
1322 	.get_coalesce		= idpf_get_coalesce,
1323 	.set_coalesce		= idpf_set_coalesce,
1324 	.get_per_queue_coalesce = idpf_get_per_q_coalesce,
1325 	.set_per_queue_coalesce = idpf_set_per_q_coalesce,
1326 	.get_ethtool_stats	= idpf_get_ethtool_stats,
1327 	.get_strings		= idpf_get_strings,
1328 	.get_sset_count		= idpf_get_sset_count,
1329 	.get_channels		= idpf_get_channels,
1330 	.get_rxnfc		= idpf_get_rxnfc,
1331 	.get_rxfh_key_size	= idpf_get_rxfh_key_size,
1332 	.get_rxfh_indir_size	= idpf_get_rxfh_indir_size,
1333 	.get_rxfh		= idpf_get_rxfh,
1334 	.set_rxfh		= idpf_set_rxfh,
1335 	.set_channels		= idpf_set_channels,
1336 	.get_ringparam		= idpf_get_ringparam,
1337 	.set_ringparam		= idpf_set_ringparam,
1338 	.get_link_ksettings	= idpf_get_link_ksettings,
1339 };
1340 
1341 /**
1342  * idpf_set_ethtool_ops - Initialize ethtool ops struct
1343  * @netdev: network interface device structure
1344  *
1345  * Sets ethtool ops struct in our netdev so that ethtool can call
1346  * our functions.
1347  */
idpf_set_ethtool_ops(struct net_device * netdev)1348 void idpf_set_ethtool_ops(struct net_device *netdev)
1349 {
1350 	netdev->ethtool_ops = &idpf_ethtool_ops;
1351 }
1352