xref: /linux/drivers/net/ethernet/intel/idpf/idpf_ethtool.c (revision ffcaa2172cc1a85ddb8b783de96d38ca8855e248)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 
6 /**
7  * idpf_get_rxnfc - command to get RX flow classification rules
8  * @netdev: network interface device structure
9  * @cmd: ethtool rxnfc command
10  * @rule_locs: pointer to store rule locations
11  *
12  * Returns Success if the command is supported.
13  */
14 static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
15 			  u32 __always_unused *rule_locs)
16 {
17 	struct idpf_vport *vport;
18 
19 	idpf_vport_ctrl_lock(netdev);
20 	vport = idpf_netdev_to_vport(netdev);
21 
22 	switch (cmd->cmd) {
23 	case ETHTOOL_GRXRINGS:
24 		cmd->data = vport->num_rxq;
25 		idpf_vport_ctrl_unlock(netdev);
26 
27 		return 0;
28 	default:
29 		break;
30 	}
31 
32 	idpf_vport_ctrl_unlock(netdev);
33 
34 	return -EOPNOTSUPP;
35 }
36 
37 /**
38  * idpf_get_rxfh_key_size - get the RSS hash key size
39  * @netdev: network interface device structure
40  *
41  * Returns the key size on success, error value on failure.
42  */
43 static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
44 {
45 	struct idpf_netdev_priv *np = netdev_priv(netdev);
46 	struct idpf_vport_user_config_data *user_config;
47 
48 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
49 		return -EOPNOTSUPP;
50 
51 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
52 
53 	return user_config->rss_data.rss_key_size;
54 }
55 
56 /**
57  * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
58  * @netdev: network interface device structure
59  *
60  * Returns the table size on success, error value on failure.
61  */
62 static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
63 {
64 	struct idpf_netdev_priv *np = netdev_priv(netdev);
65 	struct idpf_vport_user_config_data *user_config;
66 
67 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
68 		return -EOPNOTSUPP;
69 
70 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
71 
72 	return user_config->rss_data.rss_lut_size;
73 }
74 
75 /**
76  * idpf_get_rxfh - get the rx flow hash indirection table
77  * @netdev: network interface device structure
78  * @rxfh: pointer to param struct (indir, key, hfunc)
79  *
80  * Reads the indirection table directly from the hardware. Always returns 0.
81  */
82 static int idpf_get_rxfh(struct net_device *netdev,
83 			 struct ethtool_rxfh_param *rxfh)
84 {
85 	struct idpf_netdev_priv *np = netdev_priv(netdev);
86 	struct idpf_rss_data *rss_data;
87 	struct idpf_adapter *adapter;
88 	int err = 0;
89 	u16 i;
90 
91 	idpf_vport_ctrl_lock(netdev);
92 
93 	adapter = np->adapter;
94 
95 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
96 		err = -EOPNOTSUPP;
97 		goto unlock_mutex;
98 	}
99 
100 	rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
101 	if (np->state != __IDPF_VPORT_UP)
102 		goto unlock_mutex;
103 
104 	rxfh->hfunc = ETH_RSS_HASH_TOP;
105 
106 	if (rxfh->key)
107 		memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
108 
109 	if (rxfh->indir) {
110 		for (i = 0; i < rss_data->rss_lut_size; i++)
111 			rxfh->indir[i] = rss_data->rss_lut[i];
112 	}
113 
114 unlock_mutex:
115 	idpf_vport_ctrl_unlock(netdev);
116 
117 	return err;
118 }
119 
120 /**
121  * idpf_set_rxfh - set the rx flow hash indirection table
122  * @netdev: network interface device structure
123  * @rxfh: pointer to param struct (indir, key, hfunc)
124  * @extack: extended ACK from the Netlink message
125  *
126  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
127  * returns 0 after programming the table.
128  */
129 static int idpf_set_rxfh(struct net_device *netdev,
130 			 struct ethtool_rxfh_param *rxfh,
131 			 struct netlink_ext_ack *extack)
132 {
133 	struct idpf_netdev_priv *np = netdev_priv(netdev);
134 	struct idpf_rss_data *rss_data;
135 	struct idpf_adapter *adapter;
136 	struct idpf_vport *vport;
137 	int err = 0;
138 	u16 lut;
139 
140 	idpf_vport_ctrl_lock(netdev);
141 	vport = idpf_netdev_to_vport(netdev);
142 
143 	adapter = vport->adapter;
144 
145 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
146 		err = -EOPNOTSUPP;
147 		goto unlock_mutex;
148 	}
149 
150 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
151 	if (np->state != __IDPF_VPORT_UP)
152 		goto unlock_mutex;
153 
154 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
155 	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
156 		err = -EOPNOTSUPP;
157 		goto unlock_mutex;
158 	}
159 
160 	if (rxfh->key)
161 		memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
162 
163 	if (rxfh->indir) {
164 		for (lut = 0; lut < rss_data->rss_lut_size; lut++)
165 			rss_data->rss_lut[lut] = rxfh->indir[lut];
166 	}
167 
168 	err = idpf_config_rss(vport);
169 
170 unlock_mutex:
171 	idpf_vport_ctrl_unlock(netdev);
172 
173 	return err;
174 }
175 
176 /**
177  * idpf_get_channels: get the number of channels supported by the device
178  * @netdev: network interface device structure
179  * @ch: channel information structure
180  *
181  * Report maximum of TX and RX. Report one extra channel to match our MailBox
182  * Queue.
183  */
184 static void idpf_get_channels(struct net_device *netdev,
185 			      struct ethtool_channels *ch)
186 {
187 	struct idpf_netdev_priv *np = netdev_priv(netdev);
188 	struct idpf_vport_config *vport_config;
189 	u16 num_txq, num_rxq;
190 	u16 combined;
191 
192 	vport_config = np->adapter->vport_config[np->vport_idx];
193 
194 	num_txq = vport_config->user_config.num_req_tx_qs;
195 	num_rxq = vport_config->user_config.num_req_rx_qs;
196 
197 	combined = min(num_txq, num_rxq);
198 
199 	/* Report maximum channels */
200 	ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
201 				 vport_config->max_q.max_rxq);
202 	ch->max_rx = vport_config->max_q.max_rxq;
203 	ch->max_tx = vport_config->max_q.max_txq;
204 
205 	ch->max_other = IDPF_MAX_MBXQ;
206 	ch->other_count = IDPF_MAX_MBXQ;
207 
208 	ch->combined_count = combined;
209 	ch->rx_count = num_rxq - combined;
210 	ch->tx_count = num_txq - combined;
211 }
212 
213 /**
214  * idpf_set_channels: set the new channel count
215  * @netdev: network interface device structure
216  * @ch: channel information structure
217  *
218  * Negotiate a new number of channels with CP. Returns 0 on success, negative
219  * on failure.
220  */
221 static int idpf_set_channels(struct net_device *netdev,
222 			     struct ethtool_channels *ch)
223 {
224 	struct idpf_vport_config *vport_config;
225 	u16 combined, num_txq, num_rxq;
226 	unsigned int num_req_tx_q;
227 	unsigned int num_req_rx_q;
228 	struct idpf_vport *vport;
229 	struct device *dev;
230 	int err = 0;
231 	u16 idx;
232 
233 	idpf_vport_ctrl_lock(netdev);
234 	vport = idpf_netdev_to_vport(netdev);
235 
236 	idx = vport->idx;
237 	vport_config = vport->adapter->vport_config[idx];
238 
239 	num_txq = vport_config->user_config.num_req_tx_qs;
240 	num_rxq = vport_config->user_config.num_req_rx_qs;
241 
242 	combined = min(num_txq, num_rxq);
243 
244 	/* these checks are for cases where user didn't specify a particular
245 	 * value on cmd line but we get non-zero value anyway via
246 	 * get_channels(); look at ethtool.c in ethtool repository (the user
247 	 * space part), particularly, do_schannels() routine
248 	 */
249 	if (ch->combined_count == combined)
250 		ch->combined_count = 0;
251 	if (ch->combined_count && ch->rx_count == num_rxq - combined)
252 		ch->rx_count = 0;
253 	if (ch->combined_count && ch->tx_count == num_txq - combined)
254 		ch->tx_count = 0;
255 
256 	num_req_tx_q = ch->combined_count + ch->tx_count;
257 	num_req_rx_q = ch->combined_count + ch->rx_count;
258 
259 	dev = &vport->adapter->pdev->dev;
260 	/* It's possible to specify number of queues that exceeds max.
261 	 * Stack checks max combined_count and max [tx|rx]_count but not the
262 	 * max combined_count + [tx|rx]_count. These checks should catch that.
263 	 */
264 	if (num_req_tx_q > vport_config->max_q.max_txq) {
265 		dev_info(dev, "Maximum TX queues is %d\n",
266 			 vport_config->max_q.max_txq);
267 		err = -EINVAL;
268 		goto unlock_mutex;
269 	}
270 	if (num_req_rx_q > vport_config->max_q.max_rxq) {
271 		dev_info(dev, "Maximum RX queues is %d\n",
272 			 vport_config->max_q.max_rxq);
273 		err = -EINVAL;
274 		goto unlock_mutex;
275 	}
276 
277 	if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
278 		goto unlock_mutex;
279 
280 	vport_config->user_config.num_req_tx_qs = num_req_tx_q;
281 	vport_config->user_config.num_req_rx_qs = num_req_rx_q;
282 
283 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
284 	if (err) {
285 		/* roll back queue change */
286 		vport_config->user_config.num_req_tx_qs = num_txq;
287 		vport_config->user_config.num_req_rx_qs = num_rxq;
288 	}
289 
290 unlock_mutex:
291 	idpf_vport_ctrl_unlock(netdev);
292 
293 	return err;
294 }
295 
296 /**
297  * idpf_get_ringparam - Get ring parameters
298  * @netdev: network interface device structure
299  * @ring: ethtool ringparam structure
300  * @kring: unused
301  * @ext_ack: unused
302  *
303  * Returns current ring parameters. TX and RX rings are reported separately,
304  * but the number of rings is not reported.
305  */
306 static void idpf_get_ringparam(struct net_device *netdev,
307 			       struct ethtool_ringparam *ring,
308 			       struct kernel_ethtool_ringparam *kring,
309 			       struct netlink_ext_ack *ext_ack)
310 {
311 	struct idpf_vport *vport;
312 
313 	idpf_vport_ctrl_lock(netdev);
314 	vport = idpf_netdev_to_vport(netdev);
315 
316 	ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
317 	ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
318 	ring->rx_pending = vport->rxq_desc_count;
319 	ring->tx_pending = vport->txq_desc_count;
320 
321 	kring->tcp_data_split = idpf_vport_get_hsplit(vport);
322 
323 	idpf_vport_ctrl_unlock(netdev);
324 }
325 
326 /**
327  * idpf_set_ringparam - Set ring parameters
328  * @netdev: network interface device structure
329  * @ring: ethtool ringparam structure
330  * @kring: unused
331  * @ext_ack: unused
332  *
333  * Sets ring parameters. TX and RX rings are controlled separately, but the
334  * number of rings is not specified, so all rings get the same settings.
335  */
336 static int idpf_set_ringparam(struct net_device *netdev,
337 			      struct ethtool_ringparam *ring,
338 			      struct kernel_ethtool_ringparam *kring,
339 			      struct netlink_ext_ack *ext_ack)
340 {
341 	struct idpf_vport_user_config_data *config_data;
342 	u32 new_rx_count, new_tx_count;
343 	struct idpf_vport *vport;
344 	int i, err = 0;
345 	u16 idx;
346 
347 	idpf_vport_ctrl_lock(netdev);
348 	vport = idpf_netdev_to_vport(netdev);
349 
350 	idx = vport->idx;
351 
352 	if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
353 		netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
354 			   ring->tx_pending,
355 			   IDPF_MIN_TXQ_DESC);
356 		err = -EINVAL;
357 		goto unlock_mutex;
358 	}
359 
360 	if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
361 		netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
362 			   ring->rx_pending,
363 			   IDPF_MIN_RXQ_DESC);
364 		err = -EINVAL;
365 		goto unlock_mutex;
366 	}
367 
368 	new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
369 	if (new_rx_count != ring->rx_pending)
370 		netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
371 			    new_rx_count);
372 
373 	new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
374 	if (new_tx_count != ring->tx_pending)
375 		netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
376 			    new_tx_count);
377 
378 	if (new_tx_count == vport->txq_desc_count &&
379 	    new_rx_count == vport->rxq_desc_count &&
380 	    kring->tcp_data_split == idpf_vport_get_hsplit(vport))
381 		goto unlock_mutex;
382 
383 	if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
384 		NL_SET_ERR_MSG_MOD(ext_ack,
385 				   "setting TCP data split is not supported");
386 		err = -EOPNOTSUPP;
387 
388 		goto unlock_mutex;
389 	}
390 
391 	config_data = &vport->adapter->vport_config[idx]->user_config;
392 	config_data->num_req_txq_desc = new_tx_count;
393 	config_data->num_req_rxq_desc = new_rx_count;
394 
395 	/* Since we adjusted the RX completion queue count, the RX buffer queue
396 	 * descriptor count needs to be adjusted as well
397 	 */
398 	for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
399 		vport->bufq_desc_count[i] =
400 			IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
401 						vport->num_bufqs_per_qgrp);
402 
403 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
404 
405 unlock_mutex:
406 	idpf_vport_ctrl_unlock(netdev);
407 
408 	return err;
409 }
410 
411 /**
412  * struct idpf_stats - definition for an ethtool statistic
413  * @stat_string: statistic name to display in ethtool -S output
414  * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
415  * @stat_offset: offsetof() the stat from a base pointer
416  *
417  * This structure defines a statistic to be added to the ethtool stats buffer.
418  * It defines a statistic as offset from a common base pointer. Stats should
419  * be defined in constant arrays using the IDPF_STAT macro, with every element
420  * of the array using the same _type for calculating the sizeof_stat and
421  * stat_offset.
422  *
423  * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
424  * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
425  * the idpf_add_ethtool_stat() helper function.
426  *
427  * The @stat_string is interpreted as a format string, allowing formatted
428  * values to be inserted while looping over multiple structures for a given
429  * statistics array. Thus, every statistic string in an array should have the
430  * same type and number of format specifiers, to be formatted by variadic
431  * arguments to the idpf_add_stat_string() helper function.
432  */
433 struct idpf_stats {
434 	char stat_string[ETH_GSTRING_LEN];
435 	int sizeof_stat;
436 	int stat_offset;
437 };
438 
439 /* Helper macro to define an idpf_stat structure with proper size and type.
440  * Use this when defining constant statistics arrays. Note that @_type expects
441  * only a type name and is used multiple times.
442  */
443 #define IDPF_STAT(_type, _name, _stat) { \
444 	.stat_string = _name, \
445 	.sizeof_stat = sizeof_field(_type, _stat), \
446 	.stat_offset = offsetof(_type, _stat) \
447 }
448 
449 /* Helper macro for defining some statistics related to queues */
450 #define IDPF_QUEUE_STAT(_name, _stat) \
451 	IDPF_STAT(struct idpf_queue, _name, _stat)
452 
453 /* Stats associated with a Tx queue */
454 static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
455 	IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
456 	IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
457 	IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
458 };
459 
460 /* Stats associated with an Rx queue */
461 static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
462 	IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
463 	IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
464 	IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
465 };
466 
467 #define IDPF_TX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
468 #define IDPF_RX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
469 
470 #define IDPF_PORT_STAT(_name, _stat) \
471 	IDPF_STAT(struct idpf_vport,  _name, _stat)
472 
473 static const struct idpf_stats idpf_gstrings_port_stats[] = {
474 	IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
475 	IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
476 	IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
477 	IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
478 	IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
479 	IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
480 	IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
481 	IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
482 	IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
483 	IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
484 	IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
485 	IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
486 	IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
487 	IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
488 	IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
489 };
490 
491 #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
492 
493 /**
494  * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
495  * @p: ethtool supplied buffer
496  * @stats: stat definitions array
497  * @size: size of the stats array
498  * @type: stat type
499  * @idx: stat index
500  *
501  * Format and copy the strings described by stats into the buffer pointed at
502  * by p.
503  */
504 static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
505 				     const unsigned int size, const char *type,
506 				     unsigned int idx)
507 {
508 	unsigned int i;
509 
510 	for (i = 0; i < size; i++)
511 		ethtool_sprintf(p, "%s_q-%u_%s",
512 				type, idx, stats[i].stat_string);
513 }
514 
515 /**
516  * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
517  * @p: ethtool supplied buffer
518  * @stats: stat definitions array
519  * @type: stat type
520  * @idx: stat idx
521  *
522  * Format and copy the strings described by the const static stats value into
523  * the buffer pointed at by p.
524  *
525  * The parameter @stats is evaluated twice, so parameters with side effects
526  * should be avoided. Additionally, stats must be an array such that
527  * ARRAY_SIZE can be called on it.
528  */
529 #define idpf_add_qstat_strings(p, stats, type, idx) \
530 	__idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
531 
532 /**
533  * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
534  * @p: ethtool buffer
535  * @stats: struct to copy from
536  * @size: size of stats array to copy from
537  */
538 static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
539 				  const unsigned int size)
540 {
541 	unsigned int i;
542 
543 	for (i = 0; i < size; i++)
544 		ethtool_puts(p, stats[i].stat_string);
545 }
546 
547 /**
548  * idpf_get_stat_strings - Get stat strings
549  * @netdev: network interface device structure
550  * @data: buffer for string data
551  *
552  * Builds the statistics string table
553  */
554 static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
555 {
556 	struct idpf_netdev_priv *np = netdev_priv(netdev);
557 	struct idpf_vport_config *vport_config;
558 	unsigned int i;
559 
560 	idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
561 			      IDPF_PORT_STATS_LEN);
562 
563 	vport_config = np->adapter->vport_config[np->vport_idx];
564 	/* It's critical that we always report a constant number of strings and
565 	 * that the strings are reported in the same order regardless of how
566 	 * many queues are actually in use.
567 	 */
568 	for (i = 0; i < vport_config->max_q.max_txq; i++)
569 		idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
570 				       "tx", i);
571 
572 	for (i = 0; i < vport_config->max_q.max_rxq; i++)
573 		idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
574 				       "rx", i);
575 
576 	page_pool_ethtool_stats_get_strings(data);
577 }
578 
579 /**
580  * idpf_get_strings - Get string set
581  * @netdev: network interface device structure
582  * @sset: id of string set
583  * @data: buffer for string data
584  *
585  * Builds string tables for various string sets
586  */
587 static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
588 {
589 	switch (sset) {
590 	case ETH_SS_STATS:
591 		idpf_get_stat_strings(netdev, data);
592 		break;
593 	default:
594 		break;
595 	}
596 }
597 
598 /**
599  * idpf_get_sset_count - Get length of string set
600  * @netdev: network interface device structure
601  * @sset: id of string set
602  *
603  * Reports size of various string tables.
604  */
605 static int idpf_get_sset_count(struct net_device *netdev, int sset)
606 {
607 	struct idpf_netdev_priv *np = netdev_priv(netdev);
608 	struct idpf_vport_config *vport_config;
609 	u16 max_txq, max_rxq;
610 	unsigned int size;
611 
612 	if (sset != ETH_SS_STATS)
613 		return -EINVAL;
614 
615 	vport_config = np->adapter->vport_config[np->vport_idx];
616 	/* This size reported back here *must* be constant throughout the
617 	 * lifecycle of the netdevice, i.e. we must report the maximum length
618 	 * even for queues that don't technically exist.  This is due to the
619 	 * fact that this userspace API uses three separate ioctl calls to get
620 	 * stats data but has no way to communicate back to userspace when that
621 	 * size has changed, which can typically happen as a result of changing
622 	 * number of queues. If the number/order of stats change in the middle
623 	 * of this call chain it will lead to userspace crashing/accessing bad
624 	 * data through buffer under/overflow.
625 	 */
626 	max_txq = vport_config->max_q.max_txq;
627 	max_rxq = vport_config->max_q.max_rxq;
628 
629 	size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
630 	       (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
631 	size += page_pool_ethtool_stats_get_count();
632 
633 	return size;
634 }
635 
636 /**
637  * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
638  * @data: location to store the stat value
639  * @pstat: old stat pointer to copy from
640  * @stat: the stat definition
641  *
642  * Copies the stat data defined by the pointer and stat structure pair into
643  * the memory supplied as data. If the pointer is null, data will be zero'd.
644  */
645 static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
646 				      const struct idpf_stats *stat)
647 {
648 	char *p;
649 
650 	if (!pstat) {
651 		/* Ensure that the ethtool data buffer is zero'd for any stats
652 		 * which don't have a valid pointer.
653 		 */
654 		*data = 0;
655 		return;
656 	}
657 
658 	p = (char *)pstat + stat->stat_offset;
659 	switch (stat->sizeof_stat) {
660 	case sizeof(u64):
661 		*data = *((u64 *)p);
662 		break;
663 	case sizeof(u32):
664 		*data = *((u32 *)p);
665 		break;
666 	case sizeof(u16):
667 		*data = *((u16 *)p);
668 		break;
669 	case sizeof(u8):
670 		*data = *((u8 *)p);
671 		break;
672 	default:
673 		WARN_ONCE(1, "unexpected stat size for %s",
674 			  stat->stat_string);
675 		*data = 0;
676 	}
677 }
678 
679 /**
680  * idpf_add_queue_stats - copy queue statistics into supplied buffer
681  * @data: ethtool stats buffer
682  * @q: the queue to copy
683  *
684  * Queue statistics must be copied while protected by u64_stats_fetch_begin,
685  * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
686  * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
687  * zero out the queue stat values and update the data pointer. Otherwise
688  * safely copy the stats from the queue into the supplied buffer and update
689  * the data pointer when finished.
690  *
691  * This function expects to be called while under rcu_read_lock().
692  */
693 static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
694 {
695 	const struct idpf_stats *stats;
696 	unsigned int start;
697 	unsigned int size;
698 	unsigned int i;
699 
700 	if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
701 		size = IDPF_RX_QUEUE_STATS_LEN;
702 		stats = idpf_gstrings_rx_queue_stats;
703 	} else {
704 		size = IDPF_TX_QUEUE_STATS_LEN;
705 		stats = idpf_gstrings_tx_queue_stats;
706 	}
707 
708 	/* To avoid invalid statistics values, ensure that we keep retrying
709 	 * the copy until we get a consistent value according to
710 	 * u64_stats_fetch_retry.
711 	 */
712 	do {
713 		start = u64_stats_fetch_begin(&q->stats_sync);
714 		for (i = 0; i < size; i++)
715 			idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
716 	} while (u64_stats_fetch_retry(&q->stats_sync, start));
717 
718 	/* Once we successfully copy the stats in, update the data pointer */
719 	*data += size;
720 }
721 
722 /**
723  * idpf_add_empty_queue_stats - Add stats for a non-existent queue
724  * @data: pointer to data buffer
725  * @qtype: type of data queue
726  *
727  * We must report a constant length of stats back to userspace regardless of
728  * how many queues are actually in use because stats collection happens over
729  * three separate ioctls and there's no way to notify userspace the size
730  * changed between those calls. This adds empty to data to the stats since we
731  * don't have a real queue to refer to for this stats slot.
732  */
733 static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
734 {
735 	unsigned int i;
736 	int stats_len;
737 
738 	if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
739 		stats_len = IDPF_RX_QUEUE_STATS_LEN;
740 	else
741 		stats_len = IDPF_TX_QUEUE_STATS_LEN;
742 
743 	for (i = 0; i < stats_len; i++)
744 		(*data)[i] = 0;
745 	*data += stats_len;
746 }
747 
748 /**
749  * idpf_add_port_stats - Copy port stats into ethtool buffer
750  * @vport: virtual port struct
751  * @data: ethtool buffer to copy into
752  */
753 static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
754 {
755 	unsigned int size = IDPF_PORT_STATS_LEN;
756 	unsigned int start;
757 	unsigned int i;
758 
759 	do {
760 		start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
761 		for (i = 0; i < size; i++)
762 			idpf_add_one_ethtool_stat(&(*data)[i], vport,
763 						  &idpf_gstrings_port_stats[i]);
764 	} while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
765 
766 	*data += size;
767 }
768 
769 /**
770  * idpf_collect_queue_stats - accumulate various per queue stats
771  * into port level stats
772  * @vport: pointer to vport struct
773  **/
774 static void idpf_collect_queue_stats(struct idpf_vport *vport)
775 {
776 	struct idpf_port_stats *pstats = &vport->port_stats;
777 	int i, j;
778 
779 	/* zero out port stats since they're actually tracked in per
780 	 * queue stats; this is only for reporting
781 	 */
782 	u64_stats_update_begin(&pstats->stats_sync);
783 	u64_stats_set(&pstats->rx_hw_csum_err, 0);
784 	u64_stats_set(&pstats->rx_hsplit, 0);
785 	u64_stats_set(&pstats->rx_hsplit_hbo, 0);
786 	u64_stats_set(&pstats->rx_bad_descs, 0);
787 	u64_stats_set(&pstats->tx_linearize, 0);
788 	u64_stats_set(&pstats->tx_busy, 0);
789 	u64_stats_set(&pstats->tx_drops, 0);
790 	u64_stats_set(&pstats->tx_dma_map_errs, 0);
791 	u64_stats_update_end(&pstats->stats_sync);
792 
793 	for (i = 0; i < vport->num_rxq_grp; i++) {
794 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
795 		u16 num_rxq;
796 
797 		if (idpf_is_queue_model_split(vport->rxq_model))
798 			num_rxq = rxq_grp->splitq.num_rxq_sets;
799 		else
800 			num_rxq = rxq_grp->singleq.num_rxq;
801 
802 		for (j = 0; j < num_rxq; j++) {
803 			u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
804 			struct idpf_rx_queue_stats *stats;
805 			struct idpf_queue *rxq;
806 			unsigned int start;
807 
808 			if (idpf_is_queue_model_split(vport->rxq_model))
809 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
810 			else
811 				rxq = rxq_grp->singleq.rxqs[j];
812 
813 			if (!rxq)
814 				continue;
815 
816 			do {
817 				start = u64_stats_fetch_begin(&rxq->stats_sync);
818 
819 				stats = &rxq->q_stats.rx;
820 				hw_csum_err = u64_stats_read(&stats->hw_csum_err);
821 				hsplit = u64_stats_read(&stats->hsplit_pkts);
822 				hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
823 				bad_descs = u64_stats_read(&stats->bad_descs);
824 			} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
825 
826 			u64_stats_update_begin(&pstats->stats_sync);
827 			u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
828 			u64_stats_add(&pstats->rx_hsplit, hsplit);
829 			u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
830 			u64_stats_add(&pstats->rx_bad_descs, bad_descs);
831 			u64_stats_update_end(&pstats->stats_sync);
832 		}
833 	}
834 
835 	for (i = 0; i < vport->num_txq_grp; i++) {
836 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
837 
838 		for (j = 0; j < txq_grp->num_txq; j++) {
839 			u64 linearize, qbusy, skb_drops, dma_map_errs;
840 			struct idpf_queue *txq = txq_grp->txqs[j];
841 			struct idpf_tx_queue_stats *stats;
842 			unsigned int start;
843 
844 			if (!txq)
845 				continue;
846 
847 			do {
848 				start = u64_stats_fetch_begin(&txq->stats_sync);
849 
850 				stats = &txq->q_stats.tx;
851 				linearize = u64_stats_read(&stats->linearize);
852 				qbusy = u64_stats_read(&stats->q_busy);
853 				skb_drops = u64_stats_read(&stats->skb_drops);
854 				dma_map_errs = u64_stats_read(&stats->dma_map_errs);
855 			} while (u64_stats_fetch_retry(&txq->stats_sync, start));
856 
857 			u64_stats_update_begin(&pstats->stats_sync);
858 			u64_stats_add(&pstats->tx_linearize, linearize);
859 			u64_stats_add(&pstats->tx_busy, qbusy);
860 			u64_stats_add(&pstats->tx_drops, skb_drops);
861 			u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
862 			u64_stats_update_end(&pstats->stats_sync);
863 		}
864 	}
865 }
866 
867 /**
868  * idpf_get_ethtool_stats - report device statistics
869  * @netdev: network interface device structure
870  * @stats: ethtool statistics structure
871  * @data: pointer to data buffer
872  *
873  * All statistics are added to the data buffer as an array of u64.
874  */
875 static void idpf_get_ethtool_stats(struct net_device *netdev,
876 				   struct ethtool_stats __always_unused *stats,
877 				   u64 *data)
878 {
879 	struct idpf_netdev_priv *np = netdev_priv(netdev);
880 	struct idpf_vport_config *vport_config;
881 	struct page_pool_stats pp_stats = { };
882 	struct idpf_vport *vport;
883 	unsigned int total = 0;
884 	unsigned int i, j;
885 	bool is_splitq;
886 	u16 qtype;
887 
888 	idpf_vport_ctrl_lock(netdev);
889 	vport = idpf_netdev_to_vport(netdev);
890 
891 	if (np->state != __IDPF_VPORT_UP) {
892 		idpf_vport_ctrl_unlock(netdev);
893 
894 		return;
895 	}
896 
897 	rcu_read_lock();
898 
899 	idpf_collect_queue_stats(vport);
900 	idpf_add_port_stats(vport, &data);
901 
902 	for (i = 0; i < vport->num_txq_grp; i++) {
903 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
904 
905 		qtype = VIRTCHNL2_QUEUE_TYPE_TX;
906 
907 		for (j = 0; j < txq_grp->num_txq; j++, total++) {
908 			struct idpf_queue *txq = txq_grp->txqs[j];
909 
910 			if (!txq)
911 				idpf_add_empty_queue_stats(&data, qtype);
912 			else
913 				idpf_add_queue_stats(&data, txq);
914 		}
915 	}
916 
917 	vport_config = vport->adapter->vport_config[vport->idx];
918 	/* It is critical we provide a constant number of stats back to
919 	 * userspace regardless of how many queues are actually in use because
920 	 * there is no way to inform userspace the size has changed between
921 	 * ioctl calls. This will fill in any missing stats with zero.
922 	 */
923 	for (; total < vport_config->max_q.max_txq; total++)
924 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
925 	total = 0;
926 
927 	is_splitq = idpf_is_queue_model_split(vport->rxq_model);
928 
929 	for (i = 0; i < vport->num_rxq_grp; i++) {
930 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
931 		u16 num_rxq;
932 
933 		qtype = VIRTCHNL2_QUEUE_TYPE_RX;
934 
935 		if (is_splitq)
936 			num_rxq = rxq_grp->splitq.num_rxq_sets;
937 		else
938 			num_rxq = rxq_grp->singleq.num_rxq;
939 
940 		for (j = 0; j < num_rxq; j++, total++) {
941 			struct idpf_queue *rxq;
942 
943 			if (is_splitq)
944 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
945 			else
946 				rxq = rxq_grp->singleq.rxqs[j];
947 			if (!rxq)
948 				idpf_add_empty_queue_stats(&data, qtype);
949 			else
950 				idpf_add_queue_stats(&data, rxq);
951 
952 			/* In splitq mode, don't get page pool stats here since
953 			 * the pools are attached to the buffer queues
954 			 */
955 			if (is_splitq)
956 				continue;
957 
958 			if (rxq)
959 				page_pool_get_stats(rxq->pp, &pp_stats);
960 		}
961 	}
962 
963 	for (i = 0; i < vport->num_rxq_grp; i++) {
964 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
965 			struct idpf_queue *rxbufq =
966 				&vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
967 
968 			page_pool_get_stats(rxbufq->pp, &pp_stats);
969 		}
970 	}
971 
972 	for (; total < vport_config->max_q.max_rxq; total++)
973 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
974 
975 	page_pool_ethtool_stats_get(data, &pp_stats);
976 
977 	rcu_read_unlock();
978 
979 	idpf_vport_ctrl_unlock(netdev);
980 }
981 
982 /**
983  * idpf_find_rxq - find rxq from q index
984  * @vport: virtual port associated to queue
985  * @q_num: q index used to find queue
986  *
987  * returns pointer to rx queue
988  */
989 static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
990 {
991 	int q_grp, q_idx;
992 
993 	if (!idpf_is_queue_model_split(vport->rxq_model))
994 		return vport->rxq_grps->singleq.rxqs[q_num];
995 
996 	q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
997 	q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
998 
999 	return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
1000 }
1001 
1002 /**
1003  * idpf_find_txq - find txq from q index
1004  * @vport: virtual port associated to queue
1005  * @q_num: q index used to find queue
1006  *
1007  * returns pointer to tx queue
1008  */
1009 static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
1010 {
1011 	int q_grp;
1012 
1013 	if (!idpf_is_queue_model_split(vport->txq_model))
1014 		return vport->txqs[q_num];
1015 
1016 	q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1017 
1018 	return vport->txq_grps[q_grp].complq;
1019 }
1020 
1021 /**
1022  * __idpf_get_q_coalesce - get ITR values for specific queue
1023  * @ec: ethtool structure to fill with driver's coalesce settings
1024  * @q: quuee of Rx or Tx
1025  */
1026 static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
1027 				  struct idpf_queue *q)
1028 {
1029 	if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
1030 		ec->use_adaptive_rx_coalesce =
1031 				IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
1032 		ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
1033 	} else {
1034 		ec->use_adaptive_tx_coalesce =
1035 				IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
1036 		ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
1037 	}
1038 }
1039 
1040 /**
1041  * idpf_get_q_coalesce - get ITR values for specific queue
1042  * @netdev: pointer to the netdev associated with this query
1043  * @ec: coalesce settings to program the device with
1044  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1045  *
1046  * Return 0 on success, and negative on failure
1047  */
1048 static int idpf_get_q_coalesce(struct net_device *netdev,
1049 			       struct ethtool_coalesce *ec,
1050 			       u32 q_num)
1051 {
1052 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1053 	struct idpf_vport *vport;
1054 	int err = 0;
1055 
1056 	idpf_vport_ctrl_lock(netdev);
1057 	vport = idpf_netdev_to_vport(netdev);
1058 
1059 	if (np->state != __IDPF_VPORT_UP)
1060 		goto unlock_mutex;
1061 
1062 	if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
1063 		err = -EINVAL;
1064 		goto unlock_mutex;
1065 	}
1066 
1067 	if (q_num < vport->num_rxq)
1068 		__idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
1069 
1070 	if (q_num < vport->num_txq)
1071 		__idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
1072 
1073 unlock_mutex:
1074 	idpf_vport_ctrl_unlock(netdev);
1075 
1076 	return err;
1077 }
1078 
1079 /**
1080  * idpf_get_coalesce - get ITR values as requested by user
1081  * @netdev: pointer to the netdev associated with this query
1082  * @ec: coalesce settings to be filled
1083  * @kec: unused
1084  * @extack: unused
1085  *
1086  * Return 0 on success, and negative on failure
1087  */
1088 static int idpf_get_coalesce(struct net_device *netdev,
1089 			     struct ethtool_coalesce *ec,
1090 			     struct kernel_ethtool_coalesce *kec,
1091 			     struct netlink_ext_ack *extack)
1092 {
1093 	/* Return coalesce based on queue number zero */
1094 	return idpf_get_q_coalesce(netdev, ec, 0);
1095 }
1096 
1097 /**
1098  * idpf_get_per_q_coalesce - get ITR values as requested by user
1099  * @netdev: pointer to the netdev associated with this query
1100  * @q_num: queue for which the itr values has to retrieved
1101  * @ec: coalesce settings to be filled
1102  *
1103  * Return 0 on success, and negative on failure
1104  */
1105 
1106 static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
1107 				   struct ethtool_coalesce *ec)
1108 {
1109 	return idpf_get_q_coalesce(netdev, ec, q_num);
1110 }
1111 
1112 /**
1113  * __idpf_set_q_coalesce - set ITR values for specific queue
1114  * @ec: ethtool structure from user to update ITR settings
1115  * @q: queue for which itr values has to be set
1116  * @is_rxq: is queue type rx
1117  *
1118  * Returns 0 on success, negative otherwise.
1119  */
1120 static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
1121 				 struct idpf_queue *q, bool is_rxq)
1122 {
1123 	u32 use_adaptive_coalesce, coalesce_usecs;
1124 	struct idpf_q_vector *qv = q->q_vector;
1125 	bool is_dim_ena = false;
1126 	u16 itr_val;
1127 
1128 	if (is_rxq) {
1129 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
1130 		use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
1131 		coalesce_usecs = ec->rx_coalesce_usecs;
1132 		itr_val = qv->rx_itr_value;
1133 	} else {
1134 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
1135 		use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
1136 		coalesce_usecs = ec->tx_coalesce_usecs;
1137 		itr_val = qv->tx_itr_value;
1138 	}
1139 	if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
1140 		netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
1141 
1142 		return -EINVAL;
1143 	}
1144 
1145 	if (is_dim_ena && use_adaptive_coalesce)
1146 		return 0;
1147 
1148 	if (coalesce_usecs > IDPF_ITR_MAX) {
1149 		netdev_err(q->vport->netdev,
1150 			   "Invalid value, %d-usecs range is 0-%d\n",
1151 			   coalesce_usecs, IDPF_ITR_MAX);
1152 
1153 		return -EINVAL;
1154 	}
1155 
1156 	if (coalesce_usecs % 2) {
1157 		coalesce_usecs--;
1158 		netdev_info(q->vport->netdev,
1159 			    "HW only supports even ITR values, ITR rounded to %d\n",
1160 			    coalesce_usecs);
1161 	}
1162 
1163 	if (is_rxq) {
1164 		qv->rx_itr_value = coalesce_usecs;
1165 		if (use_adaptive_coalesce) {
1166 			qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
1167 		} else {
1168 			qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1169 			idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
1170 						  false);
1171 		}
1172 	} else {
1173 		qv->tx_itr_value = coalesce_usecs;
1174 		if (use_adaptive_coalesce) {
1175 			qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
1176 		} else {
1177 			qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1178 			idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
1179 		}
1180 	}
1181 
1182 	/* Update of static/dynamic itr will be taken care when interrupt is
1183 	 * fired
1184 	 */
1185 	return 0;
1186 }
1187 
1188 /**
1189  * idpf_set_q_coalesce - set ITR values for specific queue
1190  * @vport: vport associated to the queue that need updating
1191  * @ec: coalesce settings to program the device with
1192  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1193  * @is_rxq: is queue type rx
1194  *
1195  * Return 0 on success, and negative on failure
1196  */
1197 static int idpf_set_q_coalesce(struct idpf_vport *vport,
1198 			       struct ethtool_coalesce *ec,
1199 			       int q_num, bool is_rxq)
1200 {
1201 	struct idpf_queue *q;
1202 
1203 	q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
1204 
1205 	if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
1206 		return -EINVAL;
1207 
1208 	return 0;
1209 }
1210 
1211 /**
1212  * idpf_set_coalesce - set ITR values as requested by user
1213  * @netdev: pointer to the netdev associated with this query
1214  * @ec: coalesce settings to program the device with
1215  * @kec: unused
1216  * @extack: unused
1217  *
1218  * Return 0 on success, and negative on failure
1219  */
1220 static int idpf_set_coalesce(struct net_device *netdev,
1221 			     struct ethtool_coalesce *ec,
1222 			     struct kernel_ethtool_coalesce *kec,
1223 			     struct netlink_ext_ack *extack)
1224 {
1225 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1226 	struct idpf_vport *vport;
1227 	int i, err = 0;
1228 
1229 	idpf_vport_ctrl_lock(netdev);
1230 	vport = idpf_netdev_to_vport(netdev);
1231 
1232 	if (np->state != __IDPF_VPORT_UP)
1233 		goto unlock_mutex;
1234 
1235 	for (i = 0; i < vport->num_txq; i++) {
1236 		err = idpf_set_q_coalesce(vport, ec, i, false);
1237 		if (err)
1238 			goto unlock_mutex;
1239 	}
1240 
1241 	for (i = 0; i < vport->num_rxq; i++) {
1242 		err = idpf_set_q_coalesce(vport, ec, i, true);
1243 		if (err)
1244 			goto unlock_mutex;
1245 	}
1246 
1247 unlock_mutex:
1248 	idpf_vport_ctrl_unlock(netdev);
1249 
1250 	return err;
1251 }
1252 
1253 /**
1254  * idpf_set_per_q_coalesce - set ITR values as requested by user
1255  * @netdev: pointer to the netdev associated with this query
1256  * @q_num: queue for which the itr values has to be set
1257  * @ec: coalesce settings to program the device with
1258  *
1259  * Return 0 on success, and negative on failure
1260  */
1261 static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
1262 				   struct ethtool_coalesce *ec)
1263 {
1264 	struct idpf_vport *vport;
1265 	int err;
1266 
1267 	idpf_vport_ctrl_lock(netdev);
1268 	vport = idpf_netdev_to_vport(netdev);
1269 
1270 	err = idpf_set_q_coalesce(vport, ec, q_num, false);
1271 	if (err) {
1272 		idpf_vport_ctrl_unlock(netdev);
1273 
1274 		return err;
1275 	}
1276 
1277 	err = idpf_set_q_coalesce(vport, ec, q_num, true);
1278 
1279 	idpf_vport_ctrl_unlock(netdev);
1280 
1281 	return err;
1282 }
1283 
1284 /**
1285  * idpf_get_msglevel - Get debug message level
1286  * @netdev: network interface device structure
1287  *
1288  * Returns current debug message level.
1289  */
1290 static u32 idpf_get_msglevel(struct net_device *netdev)
1291 {
1292 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1293 
1294 	return adapter->msg_enable;
1295 }
1296 
1297 /**
1298  * idpf_set_msglevel - Set debug message level
1299  * @netdev: network interface device structure
1300  * @data: message level
1301  *
1302  * Set current debug message level. Higher values cause the driver to
1303  * be noisier.
1304  */
1305 static void idpf_set_msglevel(struct net_device *netdev, u32 data)
1306 {
1307 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1308 
1309 	adapter->msg_enable = data;
1310 }
1311 
1312 /**
1313  * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1314  * @netdev: network interface device structure
1315  * @cmd: ethtool command
1316  *
1317  * Reports speed/duplex settings.
1318  **/
1319 static int idpf_get_link_ksettings(struct net_device *netdev,
1320 				   struct ethtool_link_ksettings *cmd)
1321 {
1322 	struct idpf_vport *vport;
1323 
1324 	idpf_vport_ctrl_lock(netdev);
1325 	vport = idpf_netdev_to_vport(netdev);
1326 
1327 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
1328 	cmd->base.autoneg = AUTONEG_DISABLE;
1329 	cmd->base.port = PORT_NONE;
1330 	if (vport->link_up) {
1331 		cmd->base.duplex = DUPLEX_FULL;
1332 		cmd->base.speed = vport->link_speed_mbps;
1333 	} else {
1334 		cmd->base.duplex = DUPLEX_UNKNOWN;
1335 		cmd->base.speed = SPEED_UNKNOWN;
1336 	}
1337 
1338 	idpf_vport_ctrl_unlock(netdev);
1339 
1340 	return 0;
1341 }
1342 
1343 static const struct ethtool_ops idpf_ethtool_ops = {
1344 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1345 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1346 	.supported_ring_params	= ETHTOOL_RING_USE_TCP_DATA_SPLIT,
1347 	.get_msglevel		= idpf_get_msglevel,
1348 	.set_msglevel		= idpf_set_msglevel,
1349 	.get_link		= ethtool_op_get_link,
1350 	.get_coalesce		= idpf_get_coalesce,
1351 	.set_coalesce		= idpf_set_coalesce,
1352 	.get_per_queue_coalesce = idpf_get_per_q_coalesce,
1353 	.set_per_queue_coalesce = idpf_set_per_q_coalesce,
1354 	.get_ethtool_stats	= idpf_get_ethtool_stats,
1355 	.get_strings		= idpf_get_strings,
1356 	.get_sset_count		= idpf_get_sset_count,
1357 	.get_channels		= idpf_get_channels,
1358 	.get_rxnfc		= idpf_get_rxnfc,
1359 	.get_rxfh_key_size	= idpf_get_rxfh_key_size,
1360 	.get_rxfh_indir_size	= idpf_get_rxfh_indir_size,
1361 	.get_rxfh		= idpf_get_rxfh,
1362 	.set_rxfh		= idpf_set_rxfh,
1363 	.set_channels		= idpf_set_channels,
1364 	.get_ringparam		= idpf_get_ringparam,
1365 	.set_ringparam		= idpf_set_ringparam,
1366 	.get_link_ksettings	= idpf_get_link_ksettings,
1367 };
1368 
1369 /**
1370  * idpf_set_ethtool_ops - Initialize ethtool ops struct
1371  * @netdev: network interface device structure
1372  *
1373  * Sets ethtool ops struct in our netdev so that ethtool can call
1374  * our functions.
1375  */
1376 void idpf_set_ethtool_ops(struct net_device *netdev)
1377 {
1378 	netdev->ethtool_ops = &idpf_ethtool_ops;
1379 }
1380