xref: /linux/drivers/net/ethernet/intel/idpf/idpf_ethtool.c (revision ff7e082ea40d70b7613e8db2cb11e3555ebcc546)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7 
8 /**
9  * idpf_get_rx_ring_count - get RX ring count
10  * @netdev: network interface device structure
11  *
12  * Return: number of RX rings.
13  */
14 static u32 idpf_get_rx_ring_count(struct net_device *netdev)
15 {
16 	struct idpf_vport *vport;
17 	u32 num_rxq;
18 
19 	idpf_vport_ctrl_lock(netdev);
20 	vport = idpf_netdev_to_vport(netdev);
21 	num_rxq = vport->num_rxq;
22 	idpf_vport_ctrl_unlock(netdev);
23 
24 	return num_rxq;
25 }
26 
27 /**
28  * idpf_get_rxnfc - command to get RX flow classification rules
29  * @netdev: network interface device structure
30  * @cmd: ethtool rxnfc command
31  * @rule_locs: pointer to store rule locations
32  *
33  * Returns Success if the command is supported.
34  */
35 static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
36 			  u32 *rule_locs)
37 {
38 	struct idpf_netdev_priv *np = netdev_priv(netdev);
39 	struct idpf_vport_user_config_data *user_config;
40 	struct idpf_fsteer_fltr *f;
41 	struct idpf_vport *vport;
42 	unsigned int cnt = 0;
43 	int err = 0;
44 
45 	idpf_vport_ctrl_lock(netdev);
46 	vport = idpf_netdev_to_vport(netdev);
47 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
48 
49 	switch (cmd->cmd) {
50 	case ETHTOOL_GRXCLSRLCNT:
51 		cmd->rule_cnt = user_config->num_fsteer_fltrs;
52 		cmd->data = idpf_fsteer_max_rules(vport);
53 		break;
54 	case ETHTOOL_GRXCLSRULE:
55 		err = -EINVAL;
56 		list_for_each_entry(f, &user_config->flow_steer_list, list)
57 			if (f->loc == cmd->fs.location) {
58 				cmd->fs.ring_cookie = f->q_index;
59 				err = 0;
60 				break;
61 			}
62 		break;
63 	case ETHTOOL_GRXCLSRLALL:
64 		cmd->data = idpf_fsteer_max_rules(vport);
65 		list_for_each_entry(f, &user_config->flow_steer_list, list) {
66 			if (cnt == cmd->rule_cnt) {
67 				err = -EMSGSIZE;
68 				break;
69 			}
70 			rule_locs[cnt] = f->loc;
71 			cnt++;
72 		}
73 		if (!err)
74 			cmd->rule_cnt = user_config->num_fsteer_fltrs;
75 		break;
76 	default:
77 		break;
78 	}
79 
80 	idpf_vport_ctrl_unlock(netdev);
81 
82 	return err;
83 }
84 
85 static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs,
86 				  struct ethtool_rx_flow_spec *fsp)
87 {
88 	struct iphdr *iph;
89 
90 	hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4);
91 
92 	iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec;
93 	iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
94 	iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
95 
96 	iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask;
97 	iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src;
98 	iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst;
99 }
100 
101 static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs,
102 				 struct ethtool_rx_flow_spec *fsp,
103 				 bool v4)
104 {
105 	struct udphdr *udph, *udpm;
106 
107 	hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP);
108 
109 	udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec;
110 	udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask;
111 
112 	if (v4) {
113 		udph->source = fsp->h_u.udp_ip4_spec.psrc;
114 		udph->dest = fsp->h_u.udp_ip4_spec.pdst;
115 		udpm->source = fsp->m_u.udp_ip4_spec.psrc;
116 		udpm->dest = fsp->m_u.udp_ip4_spec.pdst;
117 	} else {
118 		udph->source = fsp->h_u.udp_ip6_spec.psrc;
119 		udph->dest = fsp->h_u.udp_ip6_spec.pdst;
120 		udpm->source = fsp->m_u.udp_ip6_spec.psrc;
121 		udpm->dest = fsp->m_u.udp_ip6_spec.pdst;
122 	}
123 }
124 
125 static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs,
126 				 struct ethtool_rx_flow_spec *fsp,
127 				 bool v4)
128 {
129 	struct tcphdr *tcph, *tcpm;
130 
131 	hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP);
132 
133 	tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec;
134 	tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask;
135 
136 	if (v4) {
137 		tcph->source = fsp->h_u.tcp_ip4_spec.psrc;
138 		tcph->dest = fsp->h_u.tcp_ip4_spec.pdst;
139 		tcpm->source = fsp->m_u.tcp_ip4_spec.psrc;
140 		tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst;
141 	} else {
142 		tcph->source = fsp->h_u.tcp_ip6_spec.psrc;
143 		tcph->dest = fsp->h_u.tcp_ip6_spec.pdst;
144 		tcpm->source = fsp->m_u.tcp_ip6_spec.psrc;
145 		tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst;
146 	}
147 }
148 
149 /**
150  * idpf_add_flow_steer - add a Flow Steering filter
151  * @netdev: network interface device structure
152  * @cmd: command to add Flow Steering filter
153  *
154  * Return: 0 on success and negative values for failure
155  */
156 static int idpf_add_flow_steer(struct net_device *netdev,
157 			       struct ethtool_rxnfc *cmd)
158 {
159 	struct idpf_fsteer_fltr *fltr, *parent = NULL, *f;
160 	struct idpf_netdev_priv *np = netdev_priv(netdev);
161 	struct idpf_vport_user_config_data *user_config;
162 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
163 	struct virtchnl2_flow_rule_add_del *rule;
164 	struct idpf_vport_config *vport_config;
165 	struct virtchnl2_rule_action_set *acts;
166 	struct virtchnl2_flow_rule_info *info;
167 	struct virtchnl2_proto_hdrs *hdrs;
168 	struct idpf_vport *vport;
169 	u32 flow_type, q_index;
170 	u16 num_rxq;
171 	int err;
172 
173 	vport = idpf_netdev_to_vport(netdev);
174 	vport_config = vport->adapter->vport_config[np->vport_idx];
175 	user_config = &vport_config->user_config;
176 	num_rxq = user_config->num_req_rx_qs;
177 
178 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
179 	if (flow_type != fsp->flow_type)
180 		return -EINVAL;
181 
182 	if (!idpf_sideband_action_ena(vport, fsp) ||
183 	    !idpf_sideband_flow_type_ena(vport, flow_type))
184 		return -EOPNOTSUPP;
185 
186 	if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport))
187 		return -ENOSPC;
188 
189 	q_index = fsp->ring_cookie;
190 	if (q_index >= num_rxq)
191 		return -EINVAL;
192 
193 	rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
194 	if (!rule)
195 		return -ENOMEM;
196 
197 	rule->vport_id = cpu_to_le32(vport->vport_id);
198 	rule->count = cpu_to_le32(1);
199 	info = &rule->rule_info[0];
200 	info->rule_id = cpu_to_le32(fsp->location);
201 
202 	hdrs = &info->rule_cfg.proto_hdrs;
203 	hdrs->tunnel_level = 0;
204 	hdrs->count = cpu_to_le32(2);
205 
206 	acts = &info->rule_cfg.action_set;
207 	acts->count = cpu_to_le32(1);
208 	acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE);
209 	acts->actions[0].act_conf.q_id = cpu_to_le32(q_index);
210 
211 	switch (flow_type) {
212 	case UDP_V4_FLOW:
213 		idpf_fsteer_fill_ipv4(hdrs, fsp);
214 		idpf_fsteer_fill_udp(hdrs, fsp, true);
215 		break;
216 	case TCP_V4_FLOW:
217 		idpf_fsteer_fill_ipv4(hdrs, fsp);
218 		idpf_fsteer_fill_tcp(hdrs, fsp, true);
219 		break;
220 	default:
221 		err = -EINVAL;
222 		goto out;
223 	}
224 
225 	err = idpf_add_del_fsteer_filters(vport->adapter, rule,
226 					  VIRTCHNL2_OP_ADD_FLOW_RULE);
227 	if (err)
228 		goto out;
229 
230 	if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
231 		err = -EIO;
232 		goto out;
233 	}
234 
235 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
236 	if (!fltr) {
237 		err = -ENOMEM;
238 		goto out;
239 	}
240 
241 	fltr->loc = fsp->location;
242 	fltr->q_index = q_index;
243 	list_for_each_entry(f, &user_config->flow_steer_list, list) {
244 		if (f->loc >= fltr->loc)
245 			break;
246 		parent = f;
247 	}
248 
249 	parent ? list_add(&fltr->list, &parent->list) :
250 		 list_add(&fltr->list, &user_config->flow_steer_list);
251 
252 	user_config->num_fsteer_fltrs++;
253 
254 out:
255 	kfree(rule);
256 	return err;
257 }
258 
259 /**
260  * idpf_del_flow_steer - delete a Flow Steering filter
261  * @netdev: network interface device structure
262  * @cmd: command to add Flow Steering filter
263  *
264  * Return: 0 on success and negative values for failure
265  */
266 static int idpf_del_flow_steer(struct net_device *netdev,
267 			       struct ethtool_rxnfc *cmd)
268 {
269 	struct idpf_netdev_priv *np = netdev_priv(netdev);
270 	struct idpf_vport_user_config_data *user_config;
271 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
272 	struct virtchnl2_flow_rule_add_del *rule;
273 	struct idpf_vport_config *vport_config;
274 	struct virtchnl2_flow_rule_info *info;
275 	struct idpf_fsteer_fltr *f, *iter;
276 	struct idpf_vport *vport;
277 	int err;
278 
279 	vport = idpf_netdev_to_vport(netdev);
280 	vport_config = vport->adapter->vport_config[np->vport_idx];
281 	user_config = &vport_config->user_config;
282 
283 	if (!idpf_sideband_action_ena(vport, fsp))
284 		return -EOPNOTSUPP;
285 
286 	rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
287 	if (!rule)
288 		return -ENOMEM;
289 
290 	rule->vport_id = cpu_to_le32(vport->vport_id);
291 	rule->count = cpu_to_le32(1);
292 	info = &rule->rule_info[0];
293 	info->rule_id = cpu_to_le32(fsp->location);
294 
295 	err = idpf_add_del_fsteer_filters(vport->adapter, rule,
296 					  VIRTCHNL2_OP_DEL_FLOW_RULE);
297 	if (err)
298 		goto out;
299 
300 	if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
301 		err = -EIO;
302 		goto out;
303 	}
304 
305 	list_for_each_entry_safe(f, iter,
306 				 &user_config->flow_steer_list, list) {
307 		if (f->loc == fsp->location) {
308 			list_del(&f->list);
309 			kfree(f);
310 			user_config->num_fsteer_fltrs--;
311 			goto out;
312 		}
313 	}
314 	err = -EINVAL;
315 
316 out:
317 	kfree(rule);
318 	return err;
319 }
320 
321 static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
322 {
323 	int ret = -EOPNOTSUPP;
324 
325 	idpf_vport_ctrl_lock(netdev);
326 	switch (cmd->cmd) {
327 	case ETHTOOL_SRXCLSRLINS:
328 		ret = idpf_add_flow_steer(netdev, cmd);
329 		break;
330 	case ETHTOOL_SRXCLSRLDEL:
331 		ret = idpf_del_flow_steer(netdev, cmd);
332 		break;
333 	default:
334 		break;
335 	}
336 
337 	idpf_vport_ctrl_unlock(netdev);
338 	return ret;
339 }
340 
341 /**
342  * idpf_get_rxfh_key_size - get the RSS hash key size
343  * @netdev: network interface device structure
344  *
345  * Returns the key size on success, error value on failure.
346  */
347 static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
348 {
349 	struct idpf_netdev_priv *np = netdev_priv(netdev);
350 	struct idpf_vport_user_config_data *user_config;
351 
352 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
353 		return 0;
354 
355 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
356 
357 	return user_config->rss_data.rss_key_size;
358 }
359 
360 /**
361  * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
362  * @netdev: network interface device structure
363  *
364  * Returns the table size on success, error value on failure.
365  */
366 static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
367 {
368 	struct idpf_netdev_priv *np = netdev_priv(netdev);
369 	struct idpf_vport_user_config_data *user_config;
370 
371 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
372 		return 0;
373 
374 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
375 
376 	return user_config->rss_data.rss_lut_size;
377 }
378 
379 /**
380  * idpf_get_rxfh - get the rx flow hash indirection table
381  * @netdev: network interface device structure
382  * @rxfh: pointer to param struct (indir, key, hfunc)
383  *
384  * Reads the indirection table directly from the hardware. Always returns 0.
385  */
386 static int idpf_get_rxfh(struct net_device *netdev,
387 			 struct ethtool_rxfh_param *rxfh)
388 {
389 	struct idpf_netdev_priv *np = netdev_priv(netdev);
390 	struct idpf_rss_data *rss_data;
391 	struct idpf_adapter *adapter;
392 	int err = 0;
393 	u16 i;
394 
395 	idpf_vport_ctrl_lock(netdev);
396 
397 	adapter = np->adapter;
398 
399 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
400 		err = -EOPNOTSUPP;
401 		goto unlock_mutex;
402 	}
403 
404 	rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
405 	if (!test_bit(IDPF_VPORT_UP, np->state))
406 		goto unlock_mutex;
407 
408 	rxfh->hfunc = ETH_RSS_HASH_TOP;
409 
410 	if (rxfh->key)
411 		memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
412 
413 	if (rxfh->indir) {
414 		for (i = 0; i < rss_data->rss_lut_size; i++)
415 			rxfh->indir[i] = rss_data->rss_lut[i];
416 	}
417 
418 unlock_mutex:
419 	idpf_vport_ctrl_unlock(netdev);
420 
421 	return err;
422 }
423 
424 /**
425  * idpf_set_rxfh - set the rx flow hash indirection table
426  * @netdev: network interface device structure
427  * @rxfh: pointer to param struct (indir, key, hfunc)
428  * @extack: extended ACK from the Netlink message
429  *
430  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
431  * returns 0 after programming the table.
432  */
433 static int idpf_set_rxfh(struct net_device *netdev,
434 			 struct ethtool_rxfh_param *rxfh,
435 			 struct netlink_ext_ack *extack)
436 {
437 	struct idpf_netdev_priv *np = netdev_priv(netdev);
438 	struct idpf_rss_data *rss_data;
439 	struct idpf_adapter *adapter;
440 	struct idpf_vport *vport;
441 	int err = 0;
442 	u16 lut;
443 
444 	idpf_vport_ctrl_lock(netdev);
445 	vport = idpf_netdev_to_vport(netdev);
446 
447 	adapter = vport->adapter;
448 
449 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
450 		err = -EOPNOTSUPP;
451 		goto unlock_mutex;
452 	}
453 
454 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
455 	if (!test_bit(IDPF_VPORT_UP, np->state))
456 		goto unlock_mutex;
457 
458 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
459 	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
460 		err = -EOPNOTSUPP;
461 		goto unlock_mutex;
462 	}
463 
464 	if (rxfh->key)
465 		memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
466 
467 	if (rxfh->indir) {
468 		for (lut = 0; lut < rss_data->rss_lut_size; lut++)
469 			rss_data->rss_lut[lut] = rxfh->indir[lut];
470 	}
471 
472 	err = idpf_config_rss(vport);
473 
474 unlock_mutex:
475 	idpf_vport_ctrl_unlock(netdev);
476 
477 	return err;
478 }
479 
480 /**
481  * idpf_get_channels: get the number of channels supported by the device
482  * @netdev: network interface device structure
483  * @ch: channel information structure
484  *
485  * Report maximum of TX and RX. Report one extra channel to match our MailBox
486  * Queue.
487  */
488 static void idpf_get_channels(struct net_device *netdev,
489 			      struct ethtool_channels *ch)
490 {
491 	struct idpf_netdev_priv *np = netdev_priv(netdev);
492 	struct idpf_vport_config *vport_config;
493 	u16 num_txq, num_rxq;
494 	u16 combined;
495 
496 	vport_config = np->adapter->vport_config[np->vport_idx];
497 
498 	num_txq = vport_config->user_config.num_req_tx_qs;
499 	num_rxq = vport_config->user_config.num_req_rx_qs;
500 
501 	combined = min(num_txq, num_rxq);
502 
503 	/* Report maximum channels */
504 	ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
505 				 vport_config->max_q.max_rxq);
506 	ch->max_rx = vport_config->max_q.max_rxq;
507 	ch->max_tx = vport_config->max_q.max_txq;
508 
509 	ch->max_other = IDPF_MAX_MBXQ;
510 	ch->other_count = IDPF_MAX_MBXQ;
511 
512 	ch->combined_count = combined;
513 	ch->rx_count = num_rxq - combined;
514 	ch->tx_count = num_txq - combined;
515 }
516 
517 /**
518  * idpf_set_channels: set the new channel count
519  * @netdev: network interface device structure
520  * @ch: channel information structure
521  *
522  * Negotiate a new number of channels with CP. Returns 0 on success, negative
523  * on failure.
524  */
525 static int idpf_set_channels(struct net_device *netdev,
526 			     struct ethtool_channels *ch)
527 {
528 	struct idpf_vport_config *vport_config;
529 	unsigned int num_req_tx_q;
530 	unsigned int num_req_rx_q;
531 	struct idpf_vport *vport;
532 	u16 num_txq, num_rxq;
533 	struct device *dev;
534 	int err = 0;
535 	u16 idx;
536 
537 	if (ch->rx_count && ch->tx_count) {
538 		netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
539 		return -EINVAL;
540 	}
541 
542 	idpf_vport_ctrl_lock(netdev);
543 	vport = idpf_netdev_to_vport(netdev);
544 
545 	idx = vport->idx;
546 	vport_config = vport->adapter->vport_config[idx];
547 
548 	num_txq = vport_config->user_config.num_req_tx_qs;
549 	num_rxq = vport_config->user_config.num_req_rx_qs;
550 
551 	num_req_tx_q = ch->combined_count + ch->tx_count;
552 	num_req_rx_q = ch->combined_count + ch->rx_count;
553 
554 	dev = &vport->adapter->pdev->dev;
555 	/* It's possible to specify number of queues that exceeds max.
556 	 * Stack checks max combined_count and max [tx|rx]_count but not the
557 	 * max combined_count + [tx|rx]_count. These checks should catch that.
558 	 */
559 	if (num_req_tx_q > vport_config->max_q.max_txq) {
560 		dev_info(dev, "Maximum TX queues is %d\n",
561 			 vport_config->max_q.max_txq);
562 		err = -EINVAL;
563 		goto unlock_mutex;
564 	}
565 	if (num_req_rx_q > vport_config->max_q.max_rxq) {
566 		dev_info(dev, "Maximum RX queues is %d\n",
567 			 vport_config->max_q.max_rxq);
568 		err = -EINVAL;
569 		goto unlock_mutex;
570 	}
571 
572 	if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
573 		goto unlock_mutex;
574 
575 	vport_config->user_config.num_req_tx_qs = num_req_tx_q;
576 	vport_config->user_config.num_req_rx_qs = num_req_rx_q;
577 
578 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
579 	if (err) {
580 		/* roll back queue change */
581 		vport_config->user_config.num_req_tx_qs = num_txq;
582 		vport_config->user_config.num_req_rx_qs = num_rxq;
583 	}
584 
585 unlock_mutex:
586 	idpf_vport_ctrl_unlock(netdev);
587 
588 	return err;
589 }
590 
591 /**
592  * idpf_get_ringparam - Get ring parameters
593  * @netdev: network interface device structure
594  * @ring: ethtool ringparam structure
595  * @kring: unused
596  * @ext_ack: unused
597  *
598  * Returns current ring parameters. TX and RX rings are reported separately,
599  * but the number of rings is not reported.
600  */
601 static void idpf_get_ringparam(struct net_device *netdev,
602 			       struct ethtool_ringparam *ring,
603 			       struct kernel_ethtool_ringparam *kring,
604 			       struct netlink_ext_ack *ext_ack)
605 {
606 	struct idpf_vport *vport;
607 
608 	idpf_vport_ctrl_lock(netdev);
609 	vport = idpf_netdev_to_vport(netdev);
610 
611 	ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
612 	ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
613 	ring->rx_pending = vport->rxq_desc_count;
614 	ring->tx_pending = vport->txq_desc_count;
615 
616 	kring->tcp_data_split = idpf_vport_get_hsplit(vport);
617 
618 	idpf_vport_ctrl_unlock(netdev);
619 }
620 
621 /**
622  * idpf_set_ringparam - Set ring parameters
623  * @netdev: network interface device structure
624  * @ring: ethtool ringparam structure
625  * @kring: unused
626  * @ext_ack: unused
627  *
628  * Sets ring parameters. TX and RX rings are controlled separately, but the
629  * number of rings is not specified, so all rings get the same settings.
630  */
631 static int idpf_set_ringparam(struct net_device *netdev,
632 			      struct ethtool_ringparam *ring,
633 			      struct kernel_ethtool_ringparam *kring,
634 			      struct netlink_ext_ack *ext_ack)
635 {
636 	struct idpf_vport_user_config_data *config_data;
637 	u32 new_rx_count, new_tx_count;
638 	struct idpf_vport *vport;
639 	int i, err = 0;
640 	u16 idx;
641 
642 	idpf_vport_ctrl_lock(netdev);
643 	vport = idpf_netdev_to_vport(netdev);
644 
645 	idx = vport->idx;
646 
647 	if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
648 		netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
649 			   ring->tx_pending,
650 			   IDPF_MIN_TXQ_DESC);
651 		err = -EINVAL;
652 		goto unlock_mutex;
653 	}
654 
655 	if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
656 		netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
657 			   ring->rx_pending,
658 			   IDPF_MIN_RXQ_DESC);
659 		err = -EINVAL;
660 		goto unlock_mutex;
661 	}
662 
663 	new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
664 	if (new_rx_count != ring->rx_pending)
665 		netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
666 			    new_rx_count);
667 
668 	new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
669 	if (new_tx_count != ring->tx_pending)
670 		netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
671 			    new_tx_count);
672 
673 	if (new_tx_count == vport->txq_desc_count &&
674 	    new_rx_count == vport->rxq_desc_count &&
675 	    kring->tcp_data_split == idpf_vport_get_hsplit(vport))
676 		goto unlock_mutex;
677 
678 	if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
679 		NL_SET_ERR_MSG_MOD(ext_ack,
680 				   "setting TCP data split is not supported");
681 		err = -EOPNOTSUPP;
682 
683 		goto unlock_mutex;
684 	}
685 
686 	config_data = &vport->adapter->vport_config[idx]->user_config;
687 	config_data->num_req_txq_desc = new_tx_count;
688 	config_data->num_req_rxq_desc = new_rx_count;
689 
690 	/* Since we adjusted the RX completion queue count, the RX buffer queue
691 	 * descriptor count needs to be adjusted as well
692 	 */
693 	for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
694 		vport->bufq_desc_count[i] =
695 			IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
696 						vport->num_bufqs_per_qgrp);
697 
698 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
699 
700 unlock_mutex:
701 	idpf_vport_ctrl_unlock(netdev);
702 
703 	return err;
704 }
705 
706 /**
707  * struct idpf_stats - definition for an ethtool statistic
708  * @stat_string: statistic name to display in ethtool -S output
709  * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
710  * @stat_offset: offsetof() the stat from a base pointer
711  *
712  * This structure defines a statistic to be added to the ethtool stats buffer.
713  * It defines a statistic as offset from a common base pointer. Stats should
714  * be defined in constant arrays using the IDPF_STAT macro, with every element
715  * of the array using the same _type for calculating the sizeof_stat and
716  * stat_offset.
717  *
718  * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
719  * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
720  * the idpf_add_ethtool_stat() helper function.
721  *
722  * The @stat_string is interpreted as a format string, allowing formatted
723  * values to be inserted while looping over multiple structures for a given
724  * statistics array. Thus, every statistic string in an array should have the
725  * same type and number of format specifiers, to be formatted by variadic
726  * arguments to the idpf_add_stat_string() helper function.
727  */
728 struct idpf_stats {
729 	char stat_string[ETH_GSTRING_LEN];
730 	int sizeof_stat;
731 	int stat_offset;
732 };
733 
734 /* Helper macro to define an idpf_stat structure with proper size and type.
735  * Use this when defining constant statistics arrays. Note that @_type expects
736  * only a type name and is used multiple times.
737  */
738 #define IDPF_STAT(_type, _name, _stat) { \
739 	.stat_string = _name, \
740 	.sizeof_stat = sizeof_field(_type, _stat), \
741 	.stat_offset = offsetof(_type, _stat) \
742 }
743 
744 /* Helper macros for defining some statistics related to queues */
745 #define IDPF_RX_QUEUE_STAT(_name, _stat) \
746 	IDPF_STAT(struct idpf_rx_queue, _name, _stat)
747 #define IDPF_TX_QUEUE_STAT(_name, _stat) \
748 	IDPF_STAT(struct idpf_tx_queue, _name, _stat)
749 
750 /* Stats associated with a Tx queue */
751 static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
752 	IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
753 	IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
754 	IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
755 };
756 
757 /* Stats associated with an Rx queue */
758 static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
759 	IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
760 	IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
761 	IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
762 };
763 
764 #define IDPF_TX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
765 #define IDPF_RX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
766 
767 #define IDPF_PORT_STAT(_name, _stat) \
768 	IDPF_STAT(struct idpf_vport,  _name, _stat)
769 
770 static const struct idpf_stats idpf_gstrings_port_stats[] = {
771 	IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
772 	IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
773 	IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
774 	IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
775 	IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
776 	IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
777 	IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
778 	IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
779 	IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
780 	IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
781 	IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
782 	IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
783 	IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
784 	IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
785 	IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
786 };
787 
788 #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
789 
790 /**
791  * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
792  * @p: ethtool supplied buffer
793  * @stats: stat definitions array
794  * @size: size of the stats array
795  * @type: stat type
796  * @idx: stat index
797  *
798  * Format and copy the strings described by stats into the buffer pointed at
799  * by p.
800  */
801 static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
802 				     const unsigned int size, const char *type,
803 				     unsigned int idx)
804 {
805 	unsigned int i;
806 
807 	for (i = 0; i < size; i++)
808 		ethtool_sprintf(p, "%s_q-%u_%s",
809 				type, idx, stats[i].stat_string);
810 }
811 
812 /**
813  * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
814  * @p: ethtool supplied buffer
815  * @stats: stat definitions array
816  * @type: stat type
817  * @idx: stat idx
818  *
819  * Format and copy the strings described by the const static stats value into
820  * the buffer pointed at by p.
821  *
822  * The parameter @stats is evaluated twice, so parameters with side effects
823  * should be avoided. Additionally, stats must be an array such that
824  * ARRAY_SIZE can be called on it.
825  */
826 #define idpf_add_qstat_strings(p, stats, type, idx) \
827 	__idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
828 
829 /**
830  * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
831  * @p: ethtool buffer
832  * @stats: struct to copy from
833  * @size: size of stats array to copy from
834  */
835 static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
836 				  const unsigned int size)
837 {
838 	unsigned int i;
839 
840 	for (i = 0; i < size; i++)
841 		ethtool_puts(p, stats[i].stat_string);
842 }
843 
844 /**
845  * idpf_get_stat_strings - Get stat strings
846  * @netdev: network interface device structure
847  * @data: buffer for string data
848  *
849  * Builds the statistics string table
850  */
851 static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
852 {
853 	struct idpf_netdev_priv *np = netdev_priv(netdev);
854 	struct idpf_vport_config *vport_config;
855 	unsigned int i;
856 
857 	idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
858 			      IDPF_PORT_STATS_LEN);
859 
860 	vport_config = np->adapter->vport_config[np->vport_idx];
861 	/* It's critical that we always report a constant number of strings and
862 	 * that the strings are reported in the same order regardless of how
863 	 * many queues are actually in use.
864 	 */
865 	for (i = 0; i < vport_config->max_q.max_txq; i++)
866 		idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
867 				       "tx", i);
868 
869 	for (i = 0; i < vport_config->max_q.max_rxq; i++)
870 		idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
871 				       "rx", i);
872 }
873 
874 /**
875  * idpf_get_strings - Get string set
876  * @netdev: network interface device structure
877  * @sset: id of string set
878  * @data: buffer for string data
879  *
880  * Builds string tables for various string sets
881  */
882 static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
883 {
884 	switch (sset) {
885 	case ETH_SS_STATS:
886 		idpf_get_stat_strings(netdev, data);
887 		break;
888 	default:
889 		break;
890 	}
891 }
892 
893 /**
894  * idpf_get_sset_count - Get length of string set
895  * @netdev: network interface device structure
896  * @sset: id of string set
897  *
898  * Reports size of various string tables.
899  */
900 static int idpf_get_sset_count(struct net_device *netdev, int sset)
901 {
902 	struct idpf_netdev_priv *np = netdev_priv(netdev);
903 	struct idpf_vport_config *vport_config;
904 	u16 max_txq, max_rxq;
905 
906 	if (sset != ETH_SS_STATS)
907 		return -EINVAL;
908 
909 	vport_config = np->adapter->vport_config[np->vport_idx];
910 	/* This size reported back here *must* be constant throughout the
911 	 * lifecycle of the netdevice, i.e. we must report the maximum length
912 	 * even for queues that don't technically exist.  This is due to the
913 	 * fact that this userspace API uses three separate ioctl calls to get
914 	 * stats data but has no way to communicate back to userspace when that
915 	 * size has changed, which can typically happen as a result of changing
916 	 * number of queues. If the number/order of stats change in the middle
917 	 * of this call chain it will lead to userspace crashing/accessing bad
918 	 * data through buffer under/overflow.
919 	 */
920 	max_txq = vport_config->max_q.max_txq;
921 	max_rxq = vport_config->max_q.max_rxq;
922 
923 	return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
924 	       (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
925 }
926 
927 /**
928  * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
929  * @data: location to store the stat value
930  * @pstat: old stat pointer to copy from
931  * @stat: the stat definition
932  *
933  * Copies the stat data defined by the pointer and stat structure pair into
934  * the memory supplied as data. If the pointer is null, data will be zero'd.
935  */
936 static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
937 				      const struct idpf_stats *stat)
938 {
939 	char *p;
940 
941 	if (!pstat) {
942 		/* Ensure that the ethtool data buffer is zero'd for any stats
943 		 * which don't have a valid pointer.
944 		 */
945 		*data = 0;
946 		return;
947 	}
948 
949 	p = (char *)pstat + stat->stat_offset;
950 	switch (stat->sizeof_stat) {
951 	case sizeof(u64):
952 		*data = *((u64 *)p);
953 		break;
954 	case sizeof(u32):
955 		*data = *((u32 *)p);
956 		break;
957 	case sizeof(u16):
958 		*data = *((u16 *)p);
959 		break;
960 	case sizeof(u8):
961 		*data = *((u8 *)p);
962 		break;
963 	default:
964 		WARN_ONCE(1, "unexpected stat size for %s",
965 			  stat->stat_string);
966 		*data = 0;
967 	}
968 }
969 
970 /**
971  * idpf_add_queue_stats - copy queue statistics into supplied buffer
972  * @data: ethtool stats buffer
973  * @q: the queue to copy
974  * @type: type of the queue
975  *
976  * Queue statistics must be copied while protected by u64_stats_fetch_begin,
977  * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
978  * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
979  * zero out the queue stat values and update the data pointer. Otherwise
980  * safely copy the stats from the queue into the supplied buffer and update
981  * the data pointer when finished.
982  *
983  * This function expects to be called while under rcu_read_lock().
984  */
985 static void idpf_add_queue_stats(u64 **data, const void *q,
986 				 enum virtchnl2_queue_type type)
987 {
988 	const struct u64_stats_sync *stats_sync;
989 	const struct idpf_stats *stats;
990 	unsigned int start;
991 	unsigned int size;
992 	unsigned int i;
993 
994 	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
995 		size = IDPF_RX_QUEUE_STATS_LEN;
996 		stats = idpf_gstrings_rx_queue_stats;
997 		stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
998 	} else {
999 		size = IDPF_TX_QUEUE_STATS_LEN;
1000 		stats = idpf_gstrings_tx_queue_stats;
1001 		stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
1002 	}
1003 
1004 	/* To avoid invalid statistics values, ensure that we keep retrying
1005 	 * the copy until we get a consistent value according to
1006 	 * u64_stats_fetch_retry.
1007 	 */
1008 	do {
1009 		start = u64_stats_fetch_begin(stats_sync);
1010 		for (i = 0; i < size; i++)
1011 			idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
1012 	} while (u64_stats_fetch_retry(stats_sync, start));
1013 
1014 	/* Once we successfully copy the stats in, update the data pointer */
1015 	*data += size;
1016 }
1017 
1018 /**
1019  * idpf_add_empty_queue_stats - Add stats for a non-existent queue
1020  * @data: pointer to data buffer
1021  * @qtype: type of data queue
1022  *
1023  * We must report a constant length of stats back to userspace regardless of
1024  * how many queues are actually in use because stats collection happens over
1025  * three separate ioctls and there's no way to notify userspace the size
1026  * changed between those calls. This adds empty to data to the stats since we
1027  * don't have a real queue to refer to for this stats slot.
1028  */
1029 static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
1030 {
1031 	unsigned int i;
1032 	int stats_len;
1033 
1034 	if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
1035 		stats_len = IDPF_RX_QUEUE_STATS_LEN;
1036 	else
1037 		stats_len = IDPF_TX_QUEUE_STATS_LEN;
1038 
1039 	for (i = 0; i < stats_len; i++)
1040 		(*data)[i] = 0;
1041 	*data += stats_len;
1042 }
1043 
1044 /**
1045  * idpf_add_port_stats - Copy port stats into ethtool buffer
1046  * @vport: virtual port struct
1047  * @data: ethtool buffer to copy into
1048  */
1049 static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
1050 {
1051 	unsigned int size = IDPF_PORT_STATS_LEN;
1052 	unsigned int start;
1053 	unsigned int i;
1054 
1055 	do {
1056 		start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
1057 		for (i = 0; i < size; i++)
1058 			idpf_add_one_ethtool_stat(&(*data)[i], vport,
1059 						  &idpf_gstrings_port_stats[i]);
1060 	} while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
1061 
1062 	*data += size;
1063 }
1064 
1065 /**
1066  * idpf_collect_queue_stats - accumulate various per queue stats
1067  * into port level stats
1068  * @vport: pointer to vport struct
1069  **/
1070 static void idpf_collect_queue_stats(struct idpf_vport *vport)
1071 {
1072 	struct idpf_port_stats *pstats = &vport->port_stats;
1073 	int i, j;
1074 
1075 	/* zero out port stats since they're actually tracked in per
1076 	 * queue stats; this is only for reporting
1077 	 */
1078 	u64_stats_update_begin(&pstats->stats_sync);
1079 	u64_stats_set(&pstats->rx_hw_csum_err, 0);
1080 	u64_stats_set(&pstats->rx_hsplit, 0);
1081 	u64_stats_set(&pstats->rx_hsplit_hbo, 0);
1082 	u64_stats_set(&pstats->rx_bad_descs, 0);
1083 	u64_stats_set(&pstats->tx_linearize, 0);
1084 	u64_stats_set(&pstats->tx_busy, 0);
1085 	u64_stats_set(&pstats->tx_drops, 0);
1086 	u64_stats_set(&pstats->tx_dma_map_errs, 0);
1087 	u64_stats_update_end(&pstats->stats_sync);
1088 
1089 	for (i = 0; i < vport->num_rxq_grp; i++) {
1090 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
1091 		u16 num_rxq;
1092 
1093 		if (idpf_is_queue_model_split(vport->rxq_model))
1094 			num_rxq = rxq_grp->splitq.num_rxq_sets;
1095 		else
1096 			num_rxq = rxq_grp->singleq.num_rxq;
1097 
1098 		for (j = 0; j < num_rxq; j++) {
1099 			u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
1100 			struct idpf_rx_queue_stats *stats;
1101 			struct idpf_rx_queue *rxq;
1102 			unsigned int start;
1103 
1104 			if (idpf_is_queue_model_split(vport->rxq_model))
1105 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
1106 			else
1107 				rxq = rxq_grp->singleq.rxqs[j];
1108 
1109 			if (!rxq)
1110 				continue;
1111 
1112 			do {
1113 				start = u64_stats_fetch_begin(&rxq->stats_sync);
1114 
1115 				stats = &rxq->q_stats;
1116 				hw_csum_err = u64_stats_read(&stats->hw_csum_err);
1117 				hsplit = u64_stats_read(&stats->hsplit_pkts);
1118 				hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
1119 				bad_descs = u64_stats_read(&stats->bad_descs);
1120 			} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
1121 
1122 			u64_stats_update_begin(&pstats->stats_sync);
1123 			u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
1124 			u64_stats_add(&pstats->rx_hsplit, hsplit);
1125 			u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
1126 			u64_stats_add(&pstats->rx_bad_descs, bad_descs);
1127 			u64_stats_update_end(&pstats->stats_sync);
1128 		}
1129 	}
1130 
1131 	for (i = 0; i < vport->num_txq_grp; i++) {
1132 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1133 
1134 		for (j = 0; j < txq_grp->num_txq; j++) {
1135 			u64 linearize, qbusy, skb_drops, dma_map_errs;
1136 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
1137 			struct idpf_tx_queue_stats *stats;
1138 			unsigned int start;
1139 
1140 			if (!txq)
1141 				continue;
1142 
1143 			do {
1144 				start = u64_stats_fetch_begin(&txq->stats_sync);
1145 
1146 				stats = &txq->q_stats;
1147 				linearize = u64_stats_read(&stats->linearize);
1148 				qbusy = u64_stats_read(&stats->q_busy);
1149 				skb_drops = u64_stats_read(&stats->skb_drops);
1150 				dma_map_errs = u64_stats_read(&stats->dma_map_errs);
1151 			} while (u64_stats_fetch_retry(&txq->stats_sync, start));
1152 
1153 			u64_stats_update_begin(&pstats->stats_sync);
1154 			u64_stats_add(&pstats->tx_linearize, linearize);
1155 			u64_stats_add(&pstats->tx_busy, qbusy);
1156 			u64_stats_add(&pstats->tx_drops, skb_drops);
1157 			u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
1158 			u64_stats_update_end(&pstats->stats_sync);
1159 		}
1160 	}
1161 }
1162 
1163 /**
1164  * idpf_get_ethtool_stats - report device statistics
1165  * @netdev: network interface device structure
1166  * @stats: ethtool statistics structure
1167  * @data: pointer to data buffer
1168  *
1169  * All statistics are added to the data buffer as an array of u64.
1170  */
1171 static void idpf_get_ethtool_stats(struct net_device *netdev,
1172 				   struct ethtool_stats __always_unused *stats,
1173 				   u64 *data)
1174 {
1175 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1176 	struct idpf_vport_config *vport_config;
1177 	struct idpf_vport *vport;
1178 	unsigned int total = 0;
1179 	unsigned int i, j;
1180 	bool is_splitq;
1181 	u16 qtype;
1182 
1183 	idpf_vport_ctrl_lock(netdev);
1184 	vport = idpf_netdev_to_vport(netdev);
1185 
1186 	if (!test_bit(IDPF_VPORT_UP, np->state)) {
1187 		idpf_vport_ctrl_unlock(netdev);
1188 
1189 		return;
1190 	}
1191 
1192 	rcu_read_lock();
1193 
1194 	idpf_collect_queue_stats(vport);
1195 	idpf_add_port_stats(vport, &data);
1196 
1197 	for (i = 0; i < vport->num_txq_grp; i++) {
1198 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1199 
1200 		qtype = VIRTCHNL2_QUEUE_TYPE_TX;
1201 
1202 		for (j = 0; j < txq_grp->num_txq; j++, total++) {
1203 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
1204 
1205 			if (!txq)
1206 				idpf_add_empty_queue_stats(&data, qtype);
1207 			else
1208 				idpf_add_queue_stats(&data, txq, qtype);
1209 		}
1210 	}
1211 
1212 	vport_config = vport->adapter->vport_config[vport->idx];
1213 	/* It is critical we provide a constant number of stats back to
1214 	 * userspace regardless of how many queues are actually in use because
1215 	 * there is no way to inform userspace the size has changed between
1216 	 * ioctl calls. This will fill in any missing stats with zero.
1217 	 */
1218 	for (; total < vport_config->max_q.max_txq; total++)
1219 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
1220 	total = 0;
1221 
1222 	is_splitq = idpf_is_queue_model_split(vport->rxq_model);
1223 
1224 	for (i = 0; i < vport->num_rxq_grp; i++) {
1225 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
1226 		u16 num_rxq;
1227 
1228 		qtype = VIRTCHNL2_QUEUE_TYPE_RX;
1229 
1230 		if (is_splitq)
1231 			num_rxq = rxq_grp->splitq.num_rxq_sets;
1232 		else
1233 			num_rxq = rxq_grp->singleq.num_rxq;
1234 
1235 		for (j = 0; j < num_rxq; j++, total++) {
1236 			struct idpf_rx_queue *rxq;
1237 
1238 			if (is_splitq)
1239 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
1240 			else
1241 				rxq = rxq_grp->singleq.rxqs[j];
1242 			if (!rxq)
1243 				idpf_add_empty_queue_stats(&data, qtype);
1244 			else
1245 				idpf_add_queue_stats(&data, rxq, qtype);
1246 		}
1247 	}
1248 
1249 	for (; total < vport_config->max_q.max_rxq; total++)
1250 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
1251 
1252 	rcu_read_unlock();
1253 
1254 	idpf_vport_ctrl_unlock(netdev);
1255 }
1256 
1257 /**
1258  * idpf_find_rxq_vec - find rxq vector from q index
1259  * @vport: virtual port associated to queue
1260  * @q_num: q index used to find queue
1261  *
1262  * returns pointer to rx vector
1263  */
1264 struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
1265 					u32 q_num)
1266 {
1267 	int q_grp, q_idx;
1268 
1269 	if (!idpf_is_queue_model_split(vport->rxq_model))
1270 		return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
1271 
1272 	q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1273 	q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1274 
1275 	return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
1276 }
1277 
1278 /**
1279  * idpf_find_txq_vec - find txq vector from q index
1280  * @vport: virtual port associated to queue
1281  * @q_num: q index used to find queue
1282  *
1283  * returns pointer to tx vector
1284  */
1285 struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
1286 					u32 q_num)
1287 {
1288 	int q_grp;
1289 
1290 	if (!idpf_is_queue_model_split(vport->txq_model))
1291 		return vport->txqs[q_num]->q_vector;
1292 
1293 	q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1294 
1295 	return vport->txq_grps[q_grp].complq->q_vector;
1296 }
1297 
1298 /**
1299  * __idpf_get_q_coalesce - get ITR values for specific queue
1300  * @ec: ethtool structure to fill with driver's coalesce settings
1301  * @q_vector: queue vector corresponding to this queue
1302  * @type: queue type
1303  */
1304 static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
1305 				  const struct idpf_q_vector *q_vector,
1306 				  enum virtchnl2_queue_type type)
1307 {
1308 	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
1309 		ec->use_adaptive_rx_coalesce =
1310 				IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
1311 		ec->rx_coalesce_usecs = q_vector->rx_itr_value;
1312 	} else {
1313 		ec->use_adaptive_tx_coalesce =
1314 				IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
1315 		ec->tx_coalesce_usecs = q_vector->tx_itr_value;
1316 	}
1317 }
1318 
1319 /**
1320  * idpf_get_q_coalesce - get ITR values for specific queue
1321  * @netdev: pointer to the netdev associated with this query
1322  * @ec: coalesce settings to program the device with
1323  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1324  *
1325  * Return 0 on success, and negative on failure
1326  */
1327 static int idpf_get_q_coalesce(struct net_device *netdev,
1328 			       struct ethtool_coalesce *ec,
1329 			       u32 q_num)
1330 {
1331 	const struct idpf_netdev_priv *np = netdev_priv(netdev);
1332 	const struct idpf_vport *vport;
1333 	int err = 0;
1334 
1335 	idpf_vport_ctrl_lock(netdev);
1336 	vport = idpf_netdev_to_vport(netdev);
1337 
1338 	if (!test_bit(IDPF_VPORT_UP, np->state))
1339 		goto unlock_mutex;
1340 
1341 	if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
1342 		err = -EINVAL;
1343 		goto unlock_mutex;
1344 	}
1345 
1346 	if (q_num < vport->num_rxq)
1347 		__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
1348 				      VIRTCHNL2_QUEUE_TYPE_RX);
1349 
1350 	if (q_num < vport->num_txq)
1351 		__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
1352 				      VIRTCHNL2_QUEUE_TYPE_TX);
1353 
1354 unlock_mutex:
1355 	idpf_vport_ctrl_unlock(netdev);
1356 
1357 	return err;
1358 }
1359 
1360 /**
1361  * idpf_get_coalesce - get ITR values as requested by user
1362  * @netdev: pointer to the netdev associated with this query
1363  * @ec: coalesce settings to be filled
1364  * @kec: unused
1365  * @extack: unused
1366  *
1367  * Return 0 on success, and negative on failure
1368  */
1369 static int idpf_get_coalesce(struct net_device *netdev,
1370 			     struct ethtool_coalesce *ec,
1371 			     struct kernel_ethtool_coalesce *kec,
1372 			     struct netlink_ext_ack *extack)
1373 {
1374 	/* Return coalesce based on queue number zero */
1375 	return idpf_get_q_coalesce(netdev, ec, 0);
1376 }
1377 
1378 /**
1379  * idpf_get_per_q_coalesce - get ITR values as requested by user
1380  * @netdev: pointer to the netdev associated with this query
1381  * @q_num: queue for which the itr values has to retrieved
1382  * @ec: coalesce settings to be filled
1383  *
1384  * Return 0 on success, and negative on failure
1385  */
1386 
1387 static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
1388 				   struct ethtool_coalesce *ec)
1389 {
1390 	return idpf_get_q_coalesce(netdev, ec, q_num);
1391 }
1392 
1393 /**
1394  * __idpf_set_q_coalesce - set ITR values for specific queue
1395  * @ec: ethtool structure from user to update ITR settings
1396  * @q_coal: per queue coalesce settings
1397  * @qv: queue vector for which itr values has to be set
1398  * @is_rxq: is queue type rx
1399  *
1400  * Returns 0 on success, negative otherwise.
1401  */
1402 static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
1403 				 struct idpf_q_coalesce *q_coal,
1404 				 struct idpf_q_vector *qv, bool is_rxq)
1405 {
1406 	u32 use_adaptive_coalesce, coalesce_usecs;
1407 	bool is_dim_ena = false;
1408 	u16 itr_val;
1409 
1410 	if (is_rxq) {
1411 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
1412 		use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
1413 		coalesce_usecs = ec->rx_coalesce_usecs;
1414 		itr_val = qv->rx_itr_value;
1415 	} else {
1416 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
1417 		use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
1418 		coalesce_usecs = ec->tx_coalesce_usecs;
1419 		itr_val = qv->tx_itr_value;
1420 	}
1421 	if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
1422 		netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
1423 
1424 		return -EINVAL;
1425 	}
1426 
1427 	if (is_dim_ena && use_adaptive_coalesce)
1428 		return 0;
1429 
1430 	if (coalesce_usecs > IDPF_ITR_MAX) {
1431 		netdev_err(qv->vport->netdev,
1432 			   "Invalid value, %d-usecs range is 0-%d\n",
1433 			   coalesce_usecs, IDPF_ITR_MAX);
1434 
1435 		return -EINVAL;
1436 	}
1437 
1438 	if (coalesce_usecs % 2) {
1439 		coalesce_usecs--;
1440 		netdev_info(qv->vport->netdev,
1441 			    "HW only supports even ITR values, ITR rounded to %d\n",
1442 			    coalesce_usecs);
1443 	}
1444 
1445 	if (is_rxq) {
1446 		qv->rx_itr_value = coalesce_usecs;
1447 		q_coal->rx_coalesce_usecs = coalesce_usecs;
1448 		if (use_adaptive_coalesce) {
1449 			qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
1450 			q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
1451 		} else {
1452 			qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1453 			q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1454 			idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
1455 		}
1456 	} else {
1457 		qv->tx_itr_value = coalesce_usecs;
1458 		q_coal->tx_coalesce_usecs = coalesce_usecs;
1459 		if (use_adaptive_coalesce) {
1460 			qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
1461 			q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
1462 		} else {
1463 			qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1464 			q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1465 			idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
1466 		}
1467 	}
1468 
1469 	/* Update of static/dynamic itr will be taken care when interrupt is
1470 	 * fired
1471 	 */
1472 	return 0;
1473 }
1474 
1475 /**
1476  * idpf_set_q_coalesce - set ITR values for specific queue
1477  * @vport: vport associated to the queue that need updating
1478  * @q_coal: per queue coalesce settings
1479  * @ec: coalesce settings to program the device with
1480  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1481  * @is_rxq: is queue type rx
1482  *
1483  * Return 0 on success, and negative on failure
1484  */
1485 static int idpf_set_q_coalesce(const struct idpf_vport *vport,
1486 			       struct idpf_q_coalesce *q_coal,
1487 			       const struct ethtool_coalesce *ec,
1488 			       int q_num, bool is_rxq)
1489 {
1490 	struct idpf_q_vector *qv;
1491 
1492 	qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
1493 		      idpf_find_txq_vec(vport, q_num);
1494 
1495 	if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
1496 		return -EINVAL;
1497 
1498 	return 0;
1499 }
1500 
1501 /**
1502  * idpf_set_coalesce - set ITR values as requested by user
1503  * @netdev: pointer to the netdev associated with this query
1504  * @ec: coalesce settings to program the device with
1505  * @kec: unused
1506  * @extack: unused
1507  *
1508  * Return 0 on success, and negative on failure
1509  */
1510 static int idpf_set_coalesce(struct net_device *netdev,
1511 			     struct ethtool_coalesce *ec,
1512 			     struct kernel_ethtool_coalesce *kec,
1513 			     struct netlink_ext_ack *extack)
1514 {
1515 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1516 	struct idpf_vport_user_config_data *user_config;
1517 	struct idpf_q_coalesce *q_coal;
1518 	struct idpf_vport *vport;
1519 	int i, err = 0;
1520 
1521 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
1522 
1523 	idpf_vport_ctrl_lock(netdev);
1524 	vport = idpf_netdev_to_vport(netdev);
1525 
1526 	if (!test_bit(IDPF_VPORT_UP, np->state))
1527 		goto unlock_mutex;
1528 
1529 	for (i = 0; i < vport->num_txq; i++) {
1530 		q_coal = &user_config->q_coalesce[i];
1531 		err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
1532 		if (err)
1533 			goto unlock_mutex;
1534 	}
1535 
1536 	for (i = 0; i < vport->num_rxq; i++) {
1537 		q_coal = &user_config->q_coalesce[i];
1538 		err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
1539 		if (err)
1540 			goto unlock_mutex;
1541 	}
1542 
1543 unlock_mutex:
1544 	idpf_vport_ctrl_unlock(netdev);
1545 
1546 	return err;
1547 }
1548 
1549 /**
1550  * idpf_set_per_q_coalesce - set ITR values as requested by user
1551  * @netdev: pointer to the netdev associated with this query
1552  * @q_num: queue for which the itr values has to be set
1553  * @ec: coalesce settings to program the device with
1554  *
1555  * Return 0 on success, and negative on failure
1556  */
1557 static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
1558 				   struct ethtool_coalesce *ec)
1559 {
1560 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1561 	struct idpf_vport_user_config_data *user_config;
1562 	struct idpf_q_coalesce *q_coal;
1563 	struct idpf_vport *vport;
1564 	int err;
1565 
1566 	idpf_vport_ctrl_lock(netdev);
1567 	vport = idpf_netdev_to_vport(netdev);
1568 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
1569 	q_coal = &user_config->q_coalesce[q_num];
1570 
1571 	err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
1572 	if (err) {
1573 		idpf_vport_ctrl_unlock(netdev);
1574 
1575 		return err;
1576 	}
1577 
1578 	err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
1579 
1580 	idpf_vport_ctrl_unlock(netdev);
1581 
1582 	return err;
1583 }
1584 
1585 /**
1586  * idpf_get_msglevel - Get debug message level
1587  * @netdev: network interface device structure
1588  *
1589  * Returns current debug message level.
1590  */
1591 static u32 idpf_get_msglevel(struct net_device *netdev)
1592 {
1593 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1594 
1595 	return adapter->msg_enable;
1596 }
1597 
1598 /**
1599  * idpf_set_msglevel - Set debug message level
1600  * @netdev: network interface device structure
1601  * @data: message level
1602  *
1603  * Set current debug message level. Higher values cause the driver to
1604  * be noisier.
1605  */
1606 static void idpf_set_msglevel(struct net_device *netdev, u32 data)
1607 {
1608 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1609 
1610 	adapter->msg_enable = data;
1611 }
1612 
1613 /**
1614  * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1615  * @netdev: network interface device structure
1616  * @cmd: ethtool command
1617  *
1618  * Reports speed/duplex settings.
1619  **/
1620 static int idpf_get_link_ksettings(struct net_device *netdev,
1621 				   struct ethtool_link_ksettings *cmd)
1622 {
1623 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1624 
1625 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
1626 	cmd->base.autoneg = AUTONEG_DISABLE;
1627 	cmd->base.port = PORT_NONE;
1628 	if (netif_carrier_ok(netdev)) {
1629 		cmd->base.duplex = DUPLEX_FULL;
1630 		cmd->base.speed = np->link_speed_mbps;
1631 	} else {
1632 		cmd->base.duplex = DUPLEX_UNKNOWN;
1633 		cmd->base.speed = SPEED_UNKNOWN;
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 /**
1640  * idpf_get_timestamp_filters - Get the supported timestamping mode
1641  * @vport: Virtual port structure
1642  * @info: ethtool timestamping info structure
1643  *
1644  * Get the Tx/Rx timestamp filters.
1645  */
1646 static void idpf_get_timestamp_filters(const struct idpf_vport *vport,
1647 				       struct kernel_ethtool_ts_info *info)
1648 {
1649 	info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
1650 				SOF_TIMESTAMPING_RAW_HARDWARE;
1651 
1652 	info->tx_types = BIT(HWTSTAMP_TX_OFF);
1653 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1654 
1655 	if (!vport->tx_tstamp_caps ||
1656 	    vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE)
1657 		return;
1658 
1659 	info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
1660 				 SOF_TIMESTAMPING_TX_HARDWARE;
1661 
1662 	info->tx_types |= BIT(HWTSTAMP_TX_ON);
1663 }
1664 
1665 /**
1666  * idpf_get_ts_info - Get device PHC association
1667  * @netdev: network interface device structure
1668  * @info: ethtool timestamping info structure
1669  *
1670  * Return: 0 on success, -errno otherwise.
1671  */
1672 static int idpf_get_ts_info(struct net_device *netdev,
1673 			    struct kernel_ethtool_ts_info *info)
1674 {
1675 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1676 	struct idpf_vport *vport;
1677 	int err = 0;
1678 
1679 	if (!mutex_trylock(&np->adapter->vport_ctrl_lock))
1680 		return -EBUSY;
1681 
1682 	vport = idpf_netdev_to_vport(netdev);
1683 
1684 	if (!vport->adapter->ptp) {
1685 		err = -EOPNOTSUPP;
1686 		goto unlock;
1687 	}
1688 
1689 	if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) &&
1690 	    vport->adapter->ptp->clock) {
1691 		info->phc_index = ptp_clock_index(vport->adapter->ptp->clock);
1692 		idpf_get_timestamp_filters(vport, info);
1693 	} else {
1694 		pci_dbg(vport->adapter->pdev, "PTP clock not detected\n");
1695 		err = ethtool_op_get_ts_info(netdev, info);
1696 	}
1697 
1698 unlock:
1699 	mutex_unlock(&np->adapter->vport_ctrl_lock);
1700 
1701 	return err;
1702 }
1703 
1704 /**
1705  * idpf_get_ts_stats - Collect HW tstamping statistics
1706  * @netdev: network interface device structure
1707  * @ts_stats: HW timestamping stats structure
1708  *
1709  * Collect HW timestamping statistics including successfully timestamped
1710  * packets, discarded due to illegal values, flushed during releasing PTP and
1711  * skipped due to lack of the free index.
1712  */
1713 static void idpf_get_ts_stats(struct net_device *netdev,
1714 			      struct ethtool_ts_stats *ts_stats)
1715 {
1716 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1717 	struct idpf_vport *vport;
1718 	unsigned int start;
1719 
1720 	idpf_vport_ctrl_lock(netdev);
1721 	vport = idpf_netdev_to_vport(netdev);
1722 	do {
1723 		start = u64_stats_fetch_begin(&vport->tstamp_stats.stats_sync);
1724 		ts_stats->pkts = u64_stats_read(&vport->tstamp_stats.packets);
1725 		ts_stats->lost = u64_stats_read(&vport->tstamp_stats.flushed);
1726 		ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded);
1727 	} while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start));
1728 
1729 	if (!test_bit(IDPF_VPORT_UP, np->state))
1730 		goto exit;
1731 
1732 	for (u16 i = 0; i < vport->num_txq_grp; i++) {
1733 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1734 
1735 		for (u16 j = 0; j < txq_grp->num_txq; j++) {
1736 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
1737 			struct idpf_tx_queue_stats *stats;
1738 			u64 ts;
1739 
1740 			if (!txq)
1741 				continue;
1742 
1743 			stats = &txq->q_stats;
1744 			do {
1745 				start = u64_stats_fetch_begin(&txq->stats_sync);
1746 
1747 				ts = u64_stats_read(&stats->tstamp_skipped);
1748 			} while (u64_stats_fetch_retry(&txq->stats_sync,
1749 						       start));
1750 
1751 			ts_stats->lost += ts;
1752 		}
1753 	}
1754 
1755 exit:
1756 	idpf_vport_ctrl_unlock(netdev);
1757 }
1758 
1759 static const struct ethtool_ops idpf_ethtool_ops = {
1760 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1761 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1762 	.supported_ring_params	= ETHTOOL_RING_USE_TCP_DATA_SPLIT,
1763 	.get_msglevel		= idpf_get_msglevel,
1764 	.set_msglevel		= idpf_set_msglevel,
1765 	.get_link		= ethtool_op_get_link,
1766 	.get_coalesce		= idpf_get_coalesce,
1767 	.set_coalesce		= idpf_set_coalesce,
1768 	.get_per_queue_coalesce = idpf_get_per_q_coalesce,
1769 	.set_per_queue_coalesce = idpf_set_per_q_coalesce,
1770 	.get_ethtool_stats	= idpf_get_ethtool_stats,
1771 	.get_strings		= idpf_get_strings,
1772 	.get_sset_count		= idpf_get_sset_count,
1773 	.get_channels		= idpf_get_channels,
1774 	.get_rxnfc		= idpf_get_rxnfc,
1775 	.set_rxnfc		= idpf_set_rxnfc,
1776 	.get_rx_ring_count	= idpf_get_rx_ring_count,
1777 	.get_rxfh_key_size	= idpf_get_rxfh_key_size,
1778 	.get_rxfh_indir_size	= idpf_get_rxfh_indir_size,
1779 	.get_rxfh		= idpf_get_rxfh,
1780 	.set_rxfh		= idpf_set_rxfh,
1781 	.set_channels		= idpf_set_channels,
1782 	.get_ringparam		= idpf_get_ringparam,
1783 	.set_ringparam		= idpf_set_ringparam,
1784 	.get_link_ksettings	= idpf_get_link_ksettings,
1785 	.get_ts_info		= idpf_get_ts_info,
1786 	.get_ts_stats		= idpf_get_ts_stats,
1787 };
1788 
1789 /**
1790  * idpf_set_ethtool_ops - Initialize ethtool ops struct
1791  * @netdev: network interface device structure
1792  *
1793  * Sets ethtool ops struct in our netdev so that ethtool can call
1794  * our functions.
1795  */
1796 void idpf_set_ethtool_ops(struct net_device *netdev)
1797 {
1798 	netdev->ethtool_ops = &idpf_ethtool_ops;
1799 }
1800