xref: /linux/drivers/net/ethernet/intel/idpf/idpf_ethtool.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7 
8 /**
9  * idpf_get_rxnfc - command to get RX flow classification rules
10  * @netdev: network interface device structure
11  * @cmd: ethtool rxnfc command
12  * @rule_locs: pointer to store rule locations
13  *
14  * Returns Success if the command is supported.
15  */
idpf_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)16 static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
17 			  u32 *rule_locs)
18 {
19 	struct idpf_netdev_priv *np = netdev_priv(netdev);
20 	struct idpf_vport_user_config_data *user_config;
21 	struct idpf_fsteer_fltr *f;
22 	struct idpf_vport *vport;
23 	unsigned int cnt = 0;
24 	int err = 0;
25 
26 	idpf_vport_ctrl_lock(netdev);
27 	vport = idpf_netdev_to_vport(netdev);
28 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
29 
30 	switch (cmd->cmd) {
31 	case ETHTOOL_GRXRINGS:
32 		cmd->data = vport->num_rxq;
33 		break;
34 	case ETHTOOL_GRXCLSRLCNT:
35 		cmd->rule_cnt = user_config->num_fsteer_fltrs;
36 		cmd->data = idpf_fsteer_max_rules(vport);
37 		break;
38 	case ETHTOOL_GRXCLSRULE:
39 		err = -EINVAL;
40 		list_for_each_entry(f, &user_config->flow_steer_list, list)
41 			if (f->loc == cmd->fs.location) {
42 				cmd->fs.ring_cookie = f->q_index;
43 				err = 0;
44 				break;
45 			}
46 		break;
47 	case ETHTOOL_GRXCLSRLALL:
48 		cmd->data = idpf_fsteer_max_rules(vport);
49 		list_for_each_entry(f, &user_config->flow_steer_list, list) {
50 			if (cnt == cmd->rule_cnt) {
51 				err = -EMSGSIZE;
52 				break;
53 			}
54 			rule_locs[cnt] = f->loc;
55 			cnt++;
56 		}
57 		if (!err)
58 			cmd->rule_cnt = user_config->num_fsteer_fltrs;
59 		break;
60 	default:
61 		break;
62 	}
63 
64 	idpf_vport_ctrl_unlock(netdev);
65 
66 	return err;
67 }
68 
idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs * hdrs,struct ethtool_rx_flow_spec * fsp)69 static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs,
70 				  struct ethtool_rx_flow_spec *fsp)
71 {
72 	struct iphdr *iph;
73 
74 	hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4);
75 
76 	iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec;
77 	iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
78 	iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
79 
80 	iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask;
81 	iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src;
82 	iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst;
83 }
84 
idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs * hdrs,struct ethtool_rx_flow_spec * fsp,bool v4)85 static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs,
86 				 struct ethtool_rx_flow_spec *fsp,
87 				 bool v4)
88 {
89 	struct udphdr *udph, *udpm;
90 
91 	hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP);
92 
93 	udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec;
94 	udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask;
95 
96 	if (v4) {
97 		udph->source = fsp->h_u.udp_ip4_spec.psrc;
98 		udph->dest = fsp->h_u.udp_ip4_spec.pdst;
99 		udpm->source = fsp->m_u.udp_ip4_spec.psrc;
100 		udpm->dest = fsp->m_u.udp_ip4_spec.pdst;
101 	} else {
102 		udph->source = fsp->h_u.udp_ip6_spec.psrc;
103 		udph->dest = fsp->h_u.udp_ip6_spec.pdst;
104 		udpm->source = fsp->m_u.udp_ip6_spec.psrc;
105 		udpm->dest = fsp->m_u.udp_ip6_spec.pdst;
106 	}
107 }
108 
idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs * hdrs,struct ethtool_rx_flow_spec * fsp,bool v4)109 static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs,
110 				 struct ethtool_rx_flow_spec *fsp,
111 				 bool v4)
112 {
113 	struct tcphdr *tcph, *tcpm;
114 
115 	hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP);
116 
117 	tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec;
118 	tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask;
119 
120 	if (v4) {
121 		tcph->source = fsp->h_u.tcp_ip4_spec.psrc;
122 		tcph->dest = fsp->h_u.tcp_ip4_spec.pdst;
123 		tcpm->source = fsp->m_u.tcp_ip4_spec.psrc;
124 		tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst;
125 	} else {
126 		tcph->source = fsp->h_u.tcp_ip6_spec.psrc;
127 		tcph->dest = fsp->h_u.tcp_ip6_spec.pdst;
128 		tcpm->source = fsp->m_u.tcp_ip6_spec.psrc;
129 		tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst;
130 	}
131 }
132 
133 /**
134  * idpf_add_flow_steer - add a Flow Steering filter
135  * @netdev: network interface device structure
136  * @cmd: command to add Flow Steering filter
137  *
138  * Return: 0 on success and negative values for failure
139  */
idpf_add_flow_steer(struct net_device * netdev,struct ethtool_rxnfc * cmd)140 static int idpf_add_flow_steer(struct net_device *netdev,
141 			       struct ethtool_rxnfc *cmd)
142 {
143 	struct idpf_fsteer_fltr *fltr, *parent = NULL, *f;
144 	struct idpf_netdev_priv *np = netdev_priv(netdev);
145 	struct idpf_vport_user_config_data *user_config;
146 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
147 	struct virtchnl2_flow_rule_add_del *rule;
148 	struct idpf_vport_config *vport_config;
149 	struct virtchnl2_rule_action_set *acts;
150 	struct virtchnl2_flow_rule_info *info;
151 	struct virtchnl2_proto_hdrs *hdrs;
152 	struct idpf_vport *vport;
153 	u32 flow_type, q_index;
154 	u16 num_rxq;
155 	int err;
156 
157 	vport = idpf_netdev_to_vport(netdev);
158 	vport_config = vport->adapter->vport_config[np->vport_idx];
159 	user_config = &vport_config->user_config;
160 	num_rxq = user_config->num_req_rx_qs;
161 
162 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
163 	if (flow_type != fsp->flow_type)
164 		return -EINVAL;
165 
166 	if (!idpf_sideband_action_ena(vport, fsp) ||
167 	    !idpf_sideband_flow_type_ena(vport, flow_type))
168 		return -EOPNOTSUPP;
169 
170 	if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport))
171 		return -ENOSPC;
172 
173 	q_index = fsp->ring_cookie;
174 	if (q_index >= num_rxq)
175 		return -EINVAL;
176 
177 	rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
178 	if (!rule)
179 		return -ENOMEM;
180 
181 	rule->vport_id = cpu_to_le32(vport->vport_id);
182 	rule->count = cpu_to_le32(1);
183 	info = &rule->rule_info[0];
184 	info->rule_id = cpu_to_le32(fsp->location);
185 
186 	hdrs = &info->rule_cfg.proto_hdrs;
187 	hdrs->tunnel_level = 0;
188 	hdrs->count = cpu_to_le32(2);
189 
190 	acts = &info->rule_cfg.action_set;
191 	acts->count = cpu_to_le32(1);
192 	acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE);
193 	acts->actions[0].act_conf.q_id = cpu_to_le32(q_index);
194 
195 	switch (flow_type) {
196 	case UDP_V4_FLOW:
197 		idpf_fsteer_fill_ipv4(hdrs, fsp);
198 		idpf_fsteer_fill_udp(hdrs, fsp, true);
199 		break;
200 	case TCP_V4_FLOW:
201 		idpf_fsteer_fill_ipv4(hdrs, fsp);
202 		idpf_fsteer_fill_tcp(hdrs, fsp, true);
203 		break;
204 	default:
205 		err = -EINVAL;
206 		goto out;
207 	}
208 
209 	err = idpf_add_del_fsteer_filters(vport->adapter, rule,
210 					  VIRTCHNL2_OP_ADD_FLOW_RULE);
211 	if (err)
212 		goto out;
213 
214 	if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
215 		err = -EIO;
216 		goto out;
217 	}
218 
219 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
220 	if (!fltr) {
221 		err = -ENOMEM;
222 		goto out;
223 	}
224 
225 	fltr->loc = fsp->location;
226 	fltr->q_index = q_index;
227 	list_for_each_entry(f, &user_config->flow_steer_list, list) {
228 		if (f->loc >= fltr->loc)
229 			break;
230 		parent = f;
231 	}
232 
233 	parent ? list_add(&fltr->list, &parent->list) :
234 		 list_add(&fltr->list, &user_config->flow_steer_list);
235 
236 	user_config->num_fsteer_fltrs++;
237 
238 out:
239 	kfree(rule);
240 	return err;
241 }
242 
243 /**
244  * idpf_del_flow_steer - delete a Flow Steering filter
245  * @netdev: network interface device structure
246  * @cmd: command to add Flow Steering filter
247  *
248  * Return: 0 on success and negative values for failure
249  */
idpf_del_flow_steer(struct net_device * netdev,struct ethtool_rxnfc * cmd)250 static int idpf_del_flow_steer(struct net_device *netdev,
251 			       struct ethtool_rxnfc *cmd)
252 {
253 	struct idpf_netdev_priv *np = netdev_priv(netdev);
254 	struct idpf_vport_user_config_data *user_config;
255 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
256 	struct virtchnl2_flow_rule_add_del *rule;
257 	struct idpf_vport_config *vport_config;
258 	struct virtchnl2_flow_rule_info *info;
259 	struct idpf_fsteer_fltr *f, *iter;
260 	struct idpf_vport *vport;
261 	int err;
262 
263 	vport = idpf_netdev_to_vport(netdev);
264 	vport_config = vport->adapter->vport_config[np->vport_idx];
265 	user_config = &vport_config->user_config;
266 
267 	if (!idpf_sideband_action_ena(vport, fsp))
268 		return -EOPNOTSUPP;
269 
270 	rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
271 	if (!rule)
272 		return -ENOMEM;
273 
274 	rule->vport_id = cpu_to_le32(vport->vport_id);
275 	rule->count = cpu_to_le32(1);
276 	info = &rule->rule_info[0];
277 	info->rule_id = cpu_to_le32(fsp->location);
278 
279 	err = idpf_add_del_fsteer_filters(vport->adapter, rule,
280 					  VIRTCHNL2_OP_DEL_FLOW_RULE);
281 	if (err)
282 		goto out;
283 
284 	if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
285 		err = -EIO;
286 		goto out;
287 	}
288 
289 	list_for_each_entry_safe(f, iter,
290 				 &user_config->flow_steer_list, list) {
291 		if (f->loc == fsp->location) {
292 			list_del(&f->list);
293 			kfree(f);
294 			user_config->num_fsteer_fltrs--;
295 			goto out;
296 		}
297 	}
298 	err = -EINVAL;
299 
300 out:
301 	kfree(rule);
302 	return err;
303 }
304 
idpf_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)305 static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
306 {
307 	int ret = -EOPNOTSUPP;
308 
309 	idpf_vport_ctrl_lock(netdev);
310 	switch (cmd->cmd) {
311 	case ETHTOOL_SRXCLSRLINS:
312 		ret = idpf_add_flow_steer(netdev, cmd);
313 		break;
314 	case ETHTOOL_SRXCLSRLDEL:
315 		ret = idpf_del_flow_steer(netdev, cmd);
316 		break;
317 	default:
318 		break;
319 	}
320 
321 	idpf_vport_ctrl_unlock(netdev);
322 	return ret;
323 }
324 
325 /**
326  * idpf_get_rxfh_key_size - get the RSS hash key size
327  * @netdev: network interface device structure
328  *
329  * Returns the key size on success, error value on failure.
330  */
idpf_get_rxfh_key_size(struct net_device * netdev)331 static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
332 {
333 	struct idpf_netdev_priv *np = netdev_priv(netdev);
334 	struct idpf_vport_user_config_data *user_config;
335 
336 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
337 		return 0;
338 
339 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
340 
341 	return user_config->rss_data.rss_key_size;
342 }
343 
344 /**
345  * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
346  * @netdev: network interface device structure
347  *
348  * Returns the table size on success, error value on failure.
349  */
idpf_get_rxfh_indir_size(struct net_device * netdev)350 static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
351 {
352 	struct idpf_netdev_priv *np = netdev_priv(netdev);
353 	struct idpf_vport_user_config_data *user_config;
354 
355 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
356 		return 0;
357 
358 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
359 
360 	return user_config->rss_data.rss_lut_size;
361 }
362 
363 /**
364  * idpf_get_rxfh - get the rx flow hash indirection table
365  * @netdev: network interface device structure
366  * @rxfh: pointer to param struct (indir, key, hfunc)
367  *
368  * Reads the indirection table directly from the hardware. Always returns 0.
369  */
idpf_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)370 static int idpf_get_rxfh(struct net_device *netdev,
371 			 struct ethtool_rxfh_param *rxfh)
372 {
373 	struct idpf_netdev_priv *np = netdev_priv(netdev);
374 	struct idpf_rss_data *rss_data;
375 	struct idpf_adapter *adapter;
376 	int err = 0;
377 	u16 i;
378 
379 	idpf_vport_ctrl_lock(netdev);
380 
381 	adapter = np->adapter;
382 
383 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
384 		err = -EOPNOTSUPP;
385 		goto unlock_mutex;
386 	}
387 
388 	rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
389 	if (np->state != __IDPF_VPORT_UP)
390 		goto unlock_mutex;
391 
392 	rxfh->hfunc = ETH_RSS_HASH_TOP;
393 
394 	if (rxfh->key)
395 		memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
396 
397 	if (rxfh->indir) {
398 		for (i = 0; i < rss_data->rss_lut_size; i++)
399 			rxfh->indir[i] = rss_data->rss_lut[i];
400 	}
401 
402 unlock_mutex:
403 	idpf_vport_ctrl_unlock(netdev);
404 
405 	return err;
406 }
407 
408 /**
409  * idpf_set_rxfh - set the rx flow hash indirection table
410  * @netdev: network interface device structure
411  * @rxfh: pointer to param struct (indir, key, hfunc)
412  * @extack: extended ACK from the Netlink message
413  *
414  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
415  * returns 0 after programming the table.
416  */
idpf_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)417 static int idpf_set_rxfh(struct net_device *netdev,
418 			 struct ethtool_rxfh_param *rxfh,
419 			 struct netlink_ext_ack *extack)
420 {
421 	struct idpf_netdev_priv *np = netdev_priv(netdev);
422 	struct idpf_rss_data *rss_data;
423 	struct idpf_adapter *adapter;
424 	struct idpf_vport *vport;
425 	int err = 0;
426 	u16 lut;
427 
428 	idpf_vport_ctrl_lock(netdev);
429 	vport = idpf_netdev_to_vport(netdev);
430 
431 	adapter = vport->adapter;
432 
433 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
434 		err = -EOPNOTSUPP;
435 		goto unlock_mutex;
436 	}
437 
438 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
439 	if (np->state != __IDPF_VPORT_UP)
440 		goto unlock_mutex;
441 
442 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
443 	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
444 		err = -EOPNOTSUPP;
445 		goto unlock_mutex;
446 	}
447 
448 	if (rxfh->key)
449 		memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
450 
451 	if (rxfh->indir) {
452 		for (lut = 0; lut < rss_data->rss_lut_size; lut++)
453 			rss_data->rss_lut[lut] = rxfh->indir[lut];
454 	}
455 
456 	err = idpf_config_rss(vport);
457 
458 unlock_mutex:
459 	idpf_vport_ctrl_unlock(netdev);
460 
461 	return err;
462 }
463 
464 /**
465  * idpf_get_channels: get the number of channels supported by the device
466  * @netdev: network interface device structure
467  * @ch: channel information structure
468  *
469  * Report maximum of TX and RX. Report one extra channel to match our MailBox
470  * Queue.
471  */
idpf_get_channels(struct net_device * netdev,struct ethtool_channels * ch)472 static void idpf_get_channels(struct net_device *netdev,
473 			      struct ethtool_channels *ch)
474 {
475 	struct idpf_netdev_priv *np = netdev_priv(netdev);
476 	struct idpf_vport_config *vport_config;
477 	u16 num_txq, num_rxq;
478 	u16 combined;
479 
480 	vport_config = np->adapter->vport_config[np->vport_idx];
481 
482 	num_txq = vport_config->user_config.num_req_tx_qs;
483 	num_rxq = vport_config->user_config.num_req_rx_qs;
484 
485 	combined = min(num_txq, num_rxq);
486 
487 	/* Report maximum channels */
488 	ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
489 				 vport_config->max_q.max_rxq);
490 	ch->max_rx = vport_config->max_q.max_rxq;
491 	ch->max_tx = vport_config->max_q.max_txq;
492 
493 	ch->max_other = IDPF_MAX_MBXQ;
494 	ch->other_count = IDPF_MAX_MBXQ;
495 
496 	ch->combined_count = combined;
497 	ch->rx_count = num_rxq - combined;
498 	ch->tx_count = num_txq - combined;
499 }
500 
501 /**
502  * idpf_set_channels: set the new channel count
503  * @netdev: network interface device structure
504  * @ch: channel information structure
505  *
506  * Negotiate a new number of channels with CP. Returns 0 on success, negative
507  * on failure.
508  */
idpf_set_channels(struct net_device * netdev,struct ethtool_channels * ch)509 static int idpf_set_channels(struct net_device *netdev,
510 			     struct ethtool_channels *ch)
511 {
512 	struct idpf_vport_config *vport_config;
513 	unsigned int num_req_tx_q;
514 	unsigned int num_req_rx_q;
515 	struct idpf_vport *vport;
516 	u16 num_txq, num_rxq;
517 	struct device *dev;
518 	int err = 0;
519 	u16 idx;
520 
521 	if (ch->rx_count && ch->tx_count) {
522 		netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
523 		return -EINVAL;
524 	}
525 
526 	idpf_vport_ctrl_lock(netdev);
527 	vport = idpf_netdev_to_vport(netdev);
528 
529 	idx = vport->idx;
530 	vport_config = vport->adapter->vport_config[idx];
531 
532 	num_txq = vport_config->user_config.num_req_tx_qs;
533 	num_rxq = vport_config->user_config.num_req_rx_qs;
534 
535 	num_req_tx_q = ch->combined_count + ch->tx_count;
536 	num_req_rx_q = ch->combined_count + ch->rx_count;
537 
538 	dev = &vport->adapter->pdev->dev;
539 	/* It's possible to specify number of queues that exceeds max.
540 	 * Stack checks max combined_count and max [tx|rx]_count but not the
541 	 * max combined_count + [tx|rx]_count. These checks should catch that.
542 	 */
543 	if (num_req_tx_q > vport_config->max_q.max_txq) {
544 		dev_info(dev, "Maximum TX queues is %d\n",
545 			 vport_config->max_q.max_txq);
546 		err = -EINVAL;
547 		goto unlock_mutex;
548 	}
549 	if (num_req_rx_q > vport_config->max_q.max_rxq) {
550 		dev_info(dev, "Maximum RX queues is %d\n",
551 			 vport_config->max_q.max_rxq);
552 		err = -EINVAL;
553 		goto unlock_mutex;
554 	}
555 
556 	if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
557 		goto unlock_mutex;
558 
559 	vport_config->user_config.num_req_tx_qs = num_req_tx_q;
560 	vport_config->user_config.num_req_rx_qs = num_req_rx_q;
561 
562 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
563 	if (err) {
564 		/* roll back queue change */
565 		vport_config->user_config.num_req_tx_qs = num_txq;
566 		vport_config->user_config.num_req_rx_qs = num_rxq;
567 	}
568 
569 unlock_mutex:
570 	idpf_vport_ctrl_unlock(netdev);
571 
572 	return err;
573 }
574 
575 /**
576  * idpf_get_ringparam - Get ring parameters
577  * @netdev: network interface device structure
578  * @ring: ethtool ringparam structure
579  * @kring: unused
580  * @ext_ack: unused
581  *
582  * Returns current ring parameters. TX and RX rings are reported separately,
583  * but the number of rings is not reported.
584  */
idpf_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * ext_ack)585 static void idpf_get_ringparam(struct net_device *netdev,
586 			       struct ethtool_ringparam *ring,
587 			       struct kernel_ethtool_ringparam *kring,
588 			       struct netlink_ext_ack *ext_ack)
589 {
590 	struct idpf_vport *vport;
591 
592 	idpf_vport_ctrl_lock(netdev);
593 	vport = idpf_netdev_to_vport(netdev);
594 
595 	ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
596 	ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
597 	ring->rx_pending = vport->rxq_desc_count;
598 	ring->tx_pending = vport->txq_desc_count;
599 
600 	kring->tcp_data_split = idpf_vport_get_hsplit(vport);
601 
602 	idpf_vport_ctrl_unlock(netdev);
603 }
604 
605 /**
606  * idpf_set_ringparam - Set ring parameters
607  * @netdev: network interface device structure
608  * @ring: ethtool ringparam structure
609  * @kring: unused
610  * @ext_ack: unused
611  *
612  * Sets ring parameters. TX and RX rings are controlled separately, but the
613  * number of rings is not specified, so all rings get the same settings.
614  */
idpf_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * ext_ack)615 static int idpf_set_ringparam(struct net_device *netdev,
616 			      struct ethtool_ringparam *ring,
617 			      struct kernel_ethtool_ringparam *kring,
618 			      struct netlink_ext_ack *ext_ack)
619 {
620 	struct idpf_vport_user_config_data *config_data;
621 	u32 new_rx_count, new_tx_count;
622 	struct idpf_vport *vport;
623 	int i, err = 0;
624 	u16 idx;
625 
626 	idpf_vport_ctrl_lock(netdev);
627 	vport = idpf_netdev_to_vport(netdev);
628 
629 	idx = vport->idx;
630 
631 	if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
632 		netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
633 			   ring->tx_pending,
634 			   IDPF_MIN_TXQ_DESC);
635 		err = -EINVAL;
636 		goto unlock_mutex;
637 	}
638 
639 	if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
640 		netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
641 			   ring->rx_pending,
642 			   IDPF_MIN_RXQ_DESC);
643 		err = -EINVAL;
644 		goto unlock_mutex;
645 	}
646 
647 	new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
648 	if (new_rx_count != ring->rx_pending)
649 		netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
650 			    new_rx_count);
651 
652 	new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
653 	if (new_tx_count != ring->tx_pending)
654 		netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
655 			    new_tx_count);
656 
657 	if (new_tx_count == vport->txq_desc_count &&
658 	    new_rx_count == vport->rxq_desc_count &&
659 	    kring->tcp_data_split == idpf_vport_get_hsplit(vport))
660 		goto unlock_mutex;
661 
662 	if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
663 		NL_SET_ERR_MSG_MOD(ext_ack,
664 				   "setting TCP data split is not supported");
665 		err = -EOPNOTSUPP;
666 
667 		goto unlock_mutex;
668 	}
669 
670 	config_data = &vport->adapter->vport_config[idx]->user_config;
671 	config_data->num_req_txq_desc = new_tx_count;
672 	config_data->num_req_rxq_desc = new_rx_count;
673 
674 	/* Since we adjusted the RX completion queue count, the RX buffer queue
675 	 * descriptor count needs to be adjusted as well
676 	 */
677 	for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
678 		vport->bufq_desc_count[i] =
679 			IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
680 						vport->num_bufqs_per_qgrp);
681 
682 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
683 
684 unlock_mutex:
685 	idpf_vport_ctrl_unlock(netdev);
686 
687 	return err;
688 }
689 
690 /**
691  * struct idpf_stats - definition for an ethtool statistic
692  * @stat_string: statistic name to display in ethtool -S output
693  * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
694  * @stat_offset: offsetof() the stat from a base pointer
695  *
696  * This structure defines a statistic to be added to the ethtool stats buffer.
697  * It defines a statistic as offset from a common base pointer. Stats should
698  * be defined in constant arrays using the IDPF_STAT macro, with every element
699  * of the array using the same _type for calculating the sizeof_stat and
700  * stat_offset.
701  *
702  * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
703  * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
704  * the idpf_add_ethtool_stat() helper function.
705  *
706  * The @stat_string is interpreted as a format string, allowing formatted
707  * values to be inserted while looping over multiple structures for a given
708  * statistics array. Thus, every statistic string in an array should have the
709  * same type and number of format specifiers, to be formatted by variadic
710  * arguments to the idpf_add_stat_string() helper function.
711  */
712 struct idpf_stats {
713 	char stat_string[ETH_GSTRING_LEN];
714 	int sizeof_stat;
715 	int stat_offset;
716 };
717 
718 /* Helper macro to define an idpf_stat structure with proper size and type.
719  * Use this when defining constant statistics arrays. Note that @_type expects
720  * only a type name and is used multiple times.
721  */
722 #define IDPF_STAT(_type, _name, _stat) { \
723 	.stat_string = _name, \
724 	.sizeof_stat = sizeof_field(_type, _stat), \
725 	.stat_offset = offsetof(_type, _stat) \
726 }
727 
728 /* Helper macros for defining some statistics related to queues */
729 #define IDPF_RX_QUEUE_STAT(_name, _stat) \
730 	IDPF_STAT(struct idpf_rx_queue, _name, _stat)
731 #define IDPF_TX_QUEUE_STAT(_name, _stat) \
732 	IDPF_STAT(struct idpf_tx_queue, _name, _stat)
733 
734 /* Stats associated with a Tx queue */
735 static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
736 	IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
737 	IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
738 	IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
739 };
740 
741 /* Stats associated with an Rx queue */
742 static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
743 	IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
744 	IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
745 	IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
746 };
747 
748 #define IDPF_TX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
749 #define IDPF_RX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
750 
751 #define IDPF_PORT_STAT(_name, _stat) \
752 	IDPF_STAT(struct idpf_vport,  _name, _stat)
753 
754 static const struct idpf_stats idpf_gstrings_port_stats[] = {
755 	IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
756 	IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
757 	IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
758 	IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
759 	IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
760 	IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
761 	IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
762 	IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
763 	IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
764 	IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
765 	IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
766 	IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
767 	IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
768 	IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
769 	IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
770 };
771 
772 #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
773 
774 /**
775  * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
776  * @p: ethtool supplied buffer
777  * @stats: stat definitions array
778  * @size: size of the stats array
779  * @type: stat type
780  * @idx: stat index
781  *
782  * Format and copy the strings described by stats into the buffer pointed at
783  * by p.
784  */
__idpf_add_qstat_strings(u8 ** p,const struct idpf_stats * stats,const unsigned int size,const char * type,unsigned int idx)785 static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
786 				     const unsigned int size, const char *type,
787 				     unsigned int idx)
788 {
789 	unsigned int i;
790 
791 	for (i = 0; i < size; i++)
792 		ethtool_sprintf(p, "%s_q-%u_%s",
793 				type, idx, stats[i].stat_string);
794 }
795 
796 /**
797  * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
798  * @p: ethtool supplied buffer
799  * @stats: stat definitions array
800  * @type: stat type
801  * @idx: stat idx
802  *
803  * Format and copy the strings described by the const static stats value into
804  * the buffer pointed at by p.
805  *
806  * The parameter @stats is evaluated twice, so parameters with side effects
807  * should be avoided. Additionally, stats must be an array such that
808  * ARRAY_SIZE can be called on it.
809  */
810 #define idpf_add_qstat_strings(p, stats, type, idx) \
811 	__idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
812 
813 /**
814  * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
815  * @p: ethtool buffer
816  * @stats: struct to copy from
817  * @size: size of stats array to copy from
818  */
idpf_add_stat_strings(u8 ** p,const struct idpf_stats * stats,const unsigned int size)819 static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
820 				  const unsigned int size)
821 {
822 	unsigned int i;
823 
824 	for (i = 0; i < size; i++)
825 		ethtool_puts(p, stats[i].stat_string);
826 }
827 
828 /**
829  * idpf_get_stat_strings - Get stat strings
830  * @netdev: network interface device structure
831  * @data: buffer for string data
832  *
833  * Builds the statistics string table
834  */
idpf_get_stat_strings(struct net_device * netdev,u8 * data)835 static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
836 {
837 	struct idpf_netdev_priv *np = netdev_priv(netdev);
838 	struct idpf_vport_config *vport_config;
839 	unsigned int i;
840 
841 	idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
842 			      IDPF_PORT_STATS_LEN);
843 
844 	vport_config = np->adapter->vport_config[np->vport_idx];
845 	/* It's critical that we always report a constant number of strings and
846 	 * that the strings are reported in the same order regardless of how
847 	 * many queues are actually in use.
848 	 */
849 	for (i = 0; i < vport_config->max_q.max_txq; i++)
850 		idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
851 				       "tx", i);
852 
853 	for (i = 0; i < vport_config->max_q.max_rxq; i++)
854 		idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
855 				       "rx", i);
856 }
857 
858 /**
859  * idpf_get_strings - Get string set
860  * @netdev: network interface device structure
861  * @sset: id of string set
862  * @data: buffer for string data
863  *
864  * Builds string tables for various string sets
865  */
idpf_get_strings(struct net_device * netdev,u32 sset,u8 * data)866 static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
867 {
868 	switch (sset) {
869 	case ETH_SS_STATS:
870 		idpf_get_stat_strings(netdev, data);
871 		break;
872 	default:
873 		break;
874 	}
875 }
876 
877 /**
878  * idpf_get_sset_count - Get length of string set
879  * @netdev: network interface device structure
880  * @sset: id of string set
881  *
882  * Reports size of various string tables.
883  */
idpf_get_sset_count(struct net_device * netdev,int sset)884 static int idpf_get_sset_count(struct net_device *netdev, int sset)
885 {
886 	struct idpf_netdev_priv *np = netdev_priv(netdev);
887 	struct idpf_vport_config *vport_config;
888 	u16 max_txq, max_rxq;
889 
890 	if (sset != ETH_SS_STATS)
891 		return -EINVAL;
892 
893 	vport_config = np->adapter->vport_config[np->vport_idx];
894 	/* This size reported back here *must* be constant throughout the
895 	 * lifecycle of the netdevice, i.e. we must report the maximum length
896 	 * even for queues that don't technically exist.  This is due to the
897 	 * fact that this userspace API uses three separate ioctl calls to get
898 	 * stats data but has no way to communicate back to userspace when that
899 	 * size has changed, which can typically happen as a result of changing
900 	 * number of queues. If the number/order of stats change in the middle
901 	 * of this call chain it will lead to userspace crashing/accessing bad
902 	 * data through buffer under/overflow.
903 	 */
904 	max_txq = vport_config->max_q.max_txq;
905 	max_rxq = vport_config->max_q.max_rxq;
906 
907 	return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
908 	       (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
909 }
910 
911 /**
912  * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
913  * @data: location to store the stat value
914  * @pstat: old stat pointer to copy from
915  * @stat: the stat definition
916  *
917  * Copies the stat data defined by the pointer and stat structure pair into
918  * the memory supplied as data. If the pointer is null, data will be zero'd.
919  */
idpf_add_one_ethtool_stat(u64 * data,const void * pstat,const struct idpf_stats * stat)920 static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
921 				      const struct idpf_stats *stat)
922 {
923 	char *p;
924 
925 	if (!pstat) {
926 		/* Ensure that the ethtool data buffer is zero'd for any stats
927 		 * which don't have a valid pointer.
928 		 */
929 		*data = 0;
930 		return;
931 	}
932 
933 	p = (char *)pstat + stat->stat_offset;
934 	switch (stat->sizeof_stat) {
935 	case sizeof(u64):
936 		*data = *((u64 *)p);
937 		break;
938 	case sizeof(u32):
939 		*data = *((u32 *)p);
940 		break;
941 	case sizeof(u16):
942 		*data = *((u16 *)p);
943 		break;
944 	case sizeof(u8):
945 		*data = *((u8 *)p);
946 		break;
947 	default:
948 		WARN_ONCE(1, "unexpected stat size for %s",
949 			  stat->stat_string);
950 		*data = 0;
951 	}
952 }
953 
954 /**
955  * idpf_add_queue_stats - copy queue statistics into supplied buffer
956  * @data: ethtool stats buffer
957  * @q: the queue to copy
958  * @type: type of the queue
959  *
960  * Queue statistics must be copied while protected by u64_stats_fetch_begin,
961  * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
962  * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
963  * zero out the queue stat values and update the data pointer. Otherwise
964  * safely copy the stats from the queue into the supplied buffer and update
965  * the data pointer when finished.
966  *
967  * This function expects to be called while under rcu_read_lock().
968  */
idpf_add_queue_stats(u64 ** data,const void * q,enum virtchnl2_queue_type type)969 static void idpf_add_queue_stats(u64 **data, const void *q,
970 				 enum virtchnl2_queue_type type)
971 {
972 	const struct u64_stats_sync *stats_sync;
973 	const struct idpf_stats *stats;
974 	unsigned int start;
975 	unsigned int size;
976 	unsigned int i;
977 
978 	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
979 		size = IDPF_RX_QUEUE_STATS_LEN;
980 		stats = idpf_gstrings_rx_queue_stats;
981 		stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
982 	} else {
983 		size = IDPF_TX_QUEUE_STATS_LEN;
984 		stats = idpf_gstrings_tx_queue_stats;
985 		stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
986 	}
987 
988 	/* To avoid invalid statistics values, ensure that we keep retrying
989 	 * the copy until we get a consistent value according to
990 	 * u64_stats_fetch_retry.
991 	 */
992 	do {
993 		start = u64_stats_fetch_begin(stats_sync);
994 		for (i = 0; i < size; i++)
995 			idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
996 	} while (u64_stats_fetch_retry(stats_sync, start));
997 
998 	/* Once we successfully copy the stats in, update the data pointer */
999 	*data += size;
1000 }
1001 
1002 /**
1003  * idpf_add_empty_queue_stats - Add stats for a non-existent queue
1004  * @data: pointer to data buffer
1005  * @qtype: type of data queue
1006  *
1007  * We must report a constant length of stats back to userspace regardless of
1008  * how many queues are actually in use because stats collection happens over
1009  * three separate ioctls and there's no way to notify userspace the size
1010  * changed between those calls. This adds empty to data to the stats since we
1011  * don't have a real queue to refer to for this stats slot.
1012  */
idpf_add_empty_queue_stats(u64 ** data,u16 qtype)1013 static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
1014 {
1015 	unsigned int i;
1016 	int stats_len;
1017 
1018 	if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
1019 		stats_len = IDPF_RX_QUEUE_STATS_LEN;
1020 	else
1021 		stats_len = IDPF_TX_QUEUE_STATS_LEN;
1022 
1023 	for (i = 0; i < stats_len; i++)
1024 		(*data)[i] = 0;
1025 	*data += stats_len;
1026 }
1027 
1028 /**
1029  * idpf_add_port_stats - Copy port stats into ethtool buffer
1030  * @vport: virtual port struct
1031  * @data: ethtool buffer to copy into
1032  */
idpf_add_port_stats(struct idpf_vport * vport,u64 ** data)1033 static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
1034 {
1035 	unsigned int size = IDPF_PORT_STATS_LEN;
1036 	unsigned int start;
1037 	unsigned int i;
1038 
1039 	do {
1040 		start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
1041 		for (i = 0; i < size; i++)
1042 			idpf_add_one_ethtool_stat(&(*data)[i], vport,
1043 						  &idpf_gstrings_port_stats[i]);
1044 	} while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
1045 
1046 	*data += size;
1047 }
1048 
1049 /**
1050  * idpf_collect_queue_stats - accumulate various per queue stats
1051  * into port level stats
1052  * @vport: pointer to vport struct
1053  **/
idpf_collect_queue_stats(struct idpf_vport * vport)1054 static void idpf_collect_queue_stats(struct idpf_vport *vport)
1055 {
1056 	struct idpf_port_stats *pstats = &vport->port_stats;
1057 	int i, j;
1058 
1059 	/* zero out port stats since they're actually tracked in per
1060 	 * queue stats; this is only for reporting
1061 	 */
1062 	u64_stats_update_begin(&pstats->stats_sync);
1063 	u64_stats_set(&pstats->rx_hw_csum_err, 0);
1064 	u64_stats_set(&pstats->rx_hsplit, 0);
1065 	u64_stats_set(&pstats->rx_hsplit_hbo, 0);
1066 	u64_stats_set(&pstats->rx_bad_descs, 0);
1067 	u64_stats_set(&pstats->tx_linearize, 0);
1068 	u64_stats_set(&pstats->tx_busy, 0);
1069 	u64_stats_set(&pstats->tx_drops, 0);
1070 	u64_stats_set(&pstats->tx_dma_map_errs, 0);
1071 	u64_stats_update_end(&pstats->stats_sync);
1072 
1073 	for (i = 0; i < vport->num_rxq_grp; i++) {
1074 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
1075 		u16 num_rxq;
1076 
1077 		if (idpf_is_queue_model_split(vport->rxq_model))
1078 			num_rxq = rxq_grp->splitq.num_rxq_sets;
1079 		else
1080 			num_rxq = rxq_grp->singleq.num_rxq;
1081 
1082 		for (j = 0; j < num_rxq; j++) {
1083 			u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
1084 			struct idpf_rx_queue_stats *stats;
1085 			struct idpf_rx_queue *rxq;
1086 			unsigned int start;
1087 
1088 			if (idpf_is_queue_model_split(vport->rxq_model))
1089 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
1090 			else
1091 				rxq = rxq_grp->singleq.rxqs[j];
1092 
1093 			if (!rxq)
1094 				continue;
1095 
1096 			do {
1097 				start = u64_stats_fetch_begin(&rxq->stats_sync);
1098 
1099 				stats = &rxq->q_stats;
1100 				hw_csum_err = u64_stats_read(&stats->hw_csum_err);
1101 				hsplit = u64_stats_read(&stats->hsplit_pkts);
1102 				hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
1103 				bad_descs = u64_stats_read(&stats->bad_descs);
1104 			} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
1105 
1106 			u64_stats_update_begin(&pstats->stats_sync);
1107 			u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
1108 			u64_stats_add(&pstats->rx_hsplit, hsplit);
1109 			u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
1110 			u64_stats_add(&pstats->rx_bad_descs, bad_descs);
1111 			u64_stats_update_end(&pstats->stats_sync);
1112 		}
1113 	}
1114 
1115 	for (i = 0; i < vport->num_txq_grp; i++) {
1116 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1117 
1118 		for (j = 0; j < txq_grp->num_txq; j++) {
1119 			u64 linearize, qbusy, skb_drops, dma_map_errs;
1120 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
1121 			struct idpf_tx_queue_stats *stats;
1122 			unsigned int start;
1123 
1124 			if (!txq)
1125 				continue;
1126 
1127 			do {
1128 				start = u64_stats_fetch_begin(&txq->stats_sync);
1129 
1130 				stats = &txq->q_stats;
1131 				linearize = u64_stats_read(&stats->linearize);
1132 				qbusy = u64_stats_read(&stats->q_busy);
1133 				skb_drops = u64_stats_read(&stats->skb_drops);
1134 				dma_map_errs = u64_stats_read(&stats->dma_map_errs);
1135 			} while (u64_stats_fetch_retry(&txq->stats_sync, start));
1136 
1137 			u64_stats_update_begin(&pstats->stats_sync);
1138 			u64_stats_add(&pstats->tx_linearize, linearize);
1139 			u64_stats_add(&pstats->tx_busy, qbusy);
1140 			u64_stats_add(&pstats->tx_drops, skb_drops);
1141 			u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
1142 			u64_stats_update_end(&pstats->stats_sync);
1143 		}
1144 	}
1145 }
1146 
1147 /**
1148  * idpf_get_ethtool_stats - report device statistics
1149  * @netdev: network interface device structure
1150  * @stats: ethtool statistics structure
1151  * @data: pointer to data buffer
1152  *
1153  * All statistics are added to the data buffer as an array of u64.
1154  */
idpf_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)1155 static void idpf_get_ethtool_stats(struct net_device *netdev,
1156 				   struct ethtool_stats __always_unused *stats,
1157 				   u64 *data)
1158 {
1159 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1160 	struct idpf_vport_config *vport_config;
1161 	struct idpf_vport *vport;
1162 	unsigned int total = 0;
1163 	unsigned int i, j;
1164 	bool is_splitq;
1165 	u16 qtype;
1166 
1167 	idpf_vport_ctrl_lock(netdev);
1168 	vport = idpf_netdev_to_vport(netdev);
1169 
1170 	if (np->state != __IDPF_VPORT_UP) {
1171 		idpf_vport_ctrl_unlock(netdev);
1172 
1173 		return;
1174 	}
1175 
1176 	rcu_read_lock();
1177 
1178 	idpf_collect_queue_stats(vport);
1179 	idpf_add_port_stats(vport, &data);
1180 
1181 	for (i = 0; i < vport->num_txq_grp; i++) {
1182 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1183 
1184 		qtype = VIRTCHNL2_QUEUE_TYPE_TX;
1185 
1186 		for (j = 0; j < txq_grp->num_txq; j++, total++) {
1187 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
1188 
1189 			if (!txq)
1190 				idpf_add_empty_queue_stats(&data, qtype);
1191 			else
1192 				idpf_add_queue_stats(&data, txq, qtype);
1193 		}
1194 	}
1195 
1196 	vport_config = vport->adapter->vport_config[vport->idx];
1197 	/* It is critical we provide a constant number of stats back to
1198 	 * userspace regardless of how many queues are actually in use because
1199 	 * there is no way to inform userspace the size has changed between
1200 	 * ioctl calls. This will fill in any missing stats with zero.
1201 	 */
1202 	for (; total < vport_config->max_q.max_txq; total++)
1203 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
1204 	total = 0;
1205 
1206 	is_splitq = idpf_is_queue_model_split(vport->rxq_model);
1207 
1208 	for (i = 0; i < vport->num_rxq_grp; i++) {
1209 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
1210 		u16 num_rxq;
1211 
1212 		qtype = VIRTCHNL2_QUEUE_TYPE_RX;
1213 
1214 		if (is_splitq)
1215 			num_rxq = rxq_grp->splitq.num_rxq_sets;
1216 		else
1217 			num_rxq = rxq_grp->singleq.num_rxq;
1218 
1219 		for (j = 0; j < num_rxq; j++, total++) {
1220 			struct idpf_rx_queue *rxq;
1221 
1222 			if (is_splitq)
1223 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
1224 			else
1225 				rxq = rxq_grp->singleq.rxqs[j];
1226 			if (!rxq)
1227 				idpf_add_empty_queue_stats(&data, qtype);
1228 			else
1229 				idpf_add_queue_stats(&data, rxq, qtype);
1230 		}
1231 	}
1232 
1233 	for (; total < vport_config->max_q.max_rxq; total++)
1234 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
1235 
1236 	rcu_read_unlock();
1237 
1238 	idpf_vport_ctrl_unlock(netdev);
1239 }
1240 
1241 /**
1242  * idpf_find_rxq_vec - find rxq vector from q index
1243  * @vport: virtual port associated to queue
1244  * @q_num: q index used to find queue
1245  *
1246  * returns pointer to rx vector
1247  */
idpf_find_rxq_vec(const struct idpf_vport * vport,int q_num)1248 static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
1249 					       int q_num)
1250 {
1251 	int q_grp, q_idx;
1252 
1253 	if (!idpf_is_queue_model_split(vport->rxq_model))
1254 		return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
1255 
1256 	q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1257 	q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1258 
1259 	return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
1260 }
1261 
1262 /**
1263  * idpf_find_txq_vec - find txq vector from q index
1264  * @vport: virtual port associated to queue
1265  * @q_num: q index used to find queue
1266  *
1267  * returns pointer to tx vector
1268  */
idpf_find_txq_vec(const struct idpf_vport * vport,int q_num)1269 static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
1270 					       int q_num)
1271 {
1272 	int q_grp;
1273 
1274 	if (!idpf_is_queue_model_split(vport->txq_model))
1275 		return vport->txqs[q_num]->q_vector;
1276 
1277 	q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1278 
1279 	return vport->txq_grps[q_grp].complq->q_vector;
1280 }
1281 
1282 /**
1283  * __idpf_get_q_coalesce - get ITR values for specific queue
1284  * @ec: ethtool structure to fill with driver's coalesce settings
1285  * @q_vector: queue vector corresponding to this queue
1286  * @type: queue type
1287  */
__idpf_get_q_coalesce(struct ethtool_coalesce * ec,const struct idpf_q_vector * q_vector,enum virtchnl2_queue_type type)1288 static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
1289 				  const struct idpf_q_vector *q_vector,
1290 				  enum virtchnl2_queue_type type)
1291 {
1292 	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
1293 		ec->use_adaptive_rx_coalesce =
1294 				IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
1295 		ec->rx_coalesce_usecs = q_vector->rx_itr_value;
1296 	} else {
1297 		ec->use_adaptive_tx_coalesce =
1298 				IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
1299 		ec->tx_coalesce_usecs = q_vector->tx_itr_value;
1300 	}
1301 }
1302 
1303 /**
1304  * idpf_get_q_coalesce - get ITR values for specific queue
1305  * @netdev: pointer to the netdev associated with this query
1306  * @ec: coalesce settings to program the device with
1307  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1308  *
1309  * Return 0 on success, and negative on failure
1310  */
idpf_get_q_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,u32 q_num)1311 static int idpf_get_q_coalesce(struct net_device *netdev,
1312 			       struct ethtool_coalesce *ec,
1313 			       u32 q_num)
1314 {
1315 	const struct idpf_netdev_priv *np = netdev_priv(netdev);
1316 	const struct idpf_vport *vport;
1317 	int err = 0;
1318 
1319 	idpf_vport_ctrl_lock(netdev);
1320 	vport = idpf_netdev_to_vport(netdev);
1321 
1322 	if (np->state != __IDPF_VPORT_UP)
1323 		goto unlock_mutex;
1324 
1325 	if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
1326 		err = -EINVAL;
1327 		goto unlock_mutex;
1328 	}
1329 
1330 	if (q_num < vport->num_rxq)
1331 		__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
1332 				      VIRTCHNL2_QUEUE_TYPE_RX);
1333 
1334 	if (q_num < vport->num_txq)
1335 		__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
1336 				      VIRTCHNL2_QUEUE_TYPE_TX);
1337 
1338 unlock_mutex:
1339 	idpf_vport_ctrl_unlock(netdev);
1340 
1341 	return err;
1342 }
1343 
1344 /**
1345  * idpf_get_coalesce - get ITR values as requested by user
1346  * @netdev: pointer to the netdev associated with this query
1347  * @ec: coalesce settings to be filled
1348  * @kec: unused
1349  * @extack: unused
1350  *
1351  * Return 0 on success, and negative on failure
1352  */
idpf_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kec,struct netlink_ext_ack * extack)1353 static int idpf_get_coalesce(struct net_device *netdev,
1354 			     struct ethtool_coalesce *ec,
1355 			     struct kernel_ethtool_coalesce *kec,
1356 			     struct netlink_ext_ack *extack)
1357 {
1358 	/* Return coalesce based on queue number zero */
1359 	return idpf_get_q_coalesce(netdev, ec, 0);
1360 }
1361 
1362 /**
1363  * idpf_get_per_q_coalesce - get ITR values as requested by user
1364  * @netdev: pointer to the netdev associated with this query
1365  * @q_num: queue for which the itr values has to retrieved
1366  * @ec: coalesce settings to be filled
1367  *
1368  * Return 0 on success, and negative on failure
1369  */
1370 
idpf_get_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)1371 static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
1372 				   struct ethtool_coalesce *ec)
1373 {
1374 	return idpf_get_q_coalesce(netdev, ec, q_num);
1375 }
1376 
1377 /**
1378  * __idpf_set_q_coalesce - set ITR values for specific queue
1379  * @ec: ethtool structure from user to update ITR settings
1380  * @q_coal: per queue coalesce settings
1381  * @qv: queue vector for which itr values has to be set
1382  * @is_rxq: is queue type rx
1383  *
1384  * Returns 0 on success, negative otherwise.
1385  */
__idpf_set_q_coalesce(const struct ethtool_coalesce * ec,struct idpf_q_coalesce * q_coal,struct idpf_q_vector * qv,bool is_rxq)1386 static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
1387 				 struct idpf_q_coalesce *q_coal,
1388 				 struct idpf_q_vector *qv, bool is_rxq)
1389 {
1390 	u32 use_adaptive_coalesce, coalesce_usecs;
1391 	bool is_dim_ena = false;
1392 	u16 itr_val;
1393 
1394 	if (is_rxq) {
1395 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
1396 		use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
1397 		coalesce_usecs = ec->rx_coalesce_usecs;
1398 		itr_val = qv->rx_itr_value;
1399 	} else {
1400 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
1401 		use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
1402 		coalesce_usecs = ec->tx_coalesce_usecs;
1403 		itr_val = qv->tx_itr_value;
1404 	}
1405 	if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
1406 		netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
1407 
1408 		return -EINVAL;
1409 	}
1410 
1411 	if (is_dim_ena && use_adaptive_coalesce)
1412 		return 0;
1413 
1414 	if (coalesce_usecs > IDPF_ITR_MAX) {
1415 		netdev_err(qv->vport->netdev,
1416 			   "Invalid value, %d-usecs range is 0-%d\n",
1417 			   coalesce_usecs, IDPF_ITR_MAX);
1418 
1419 		return -EINVAL;
1420 	}
1421 
1422 	if (coalesce_usecs % 2) {
1423 		coalesce_usecs--;
1424 		netdev_info(qv->vport->netdev,
1425 			    "HW only supports even ITR values, ITR rounded to %d\n",
1426 			    coalesce_usecs);
1427 	}
1428 
1429 	if (is_rxq) {
1430 		qv->rx_itr_value = coalesce_usecs;
1431 		q_coal->rx_coalesce_usecs = coalesce_usecs;
1432 		if (use_adaptive_coalesce) {
1433 			qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
1434 			q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
1435 		} else {
1436 			qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1437 			q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1438 			idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
1439 		}
1440 	} else {
1441 		qv->tx_itr_value = coalesce_usecs;
1442 		q_coal->tx_coalesce_usecs = coalesce_usecs;
1443 		if (use_adaptive_coalesce) {
1444 			qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
1445 			q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
1446 		} else {
1447 			qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1448 			q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1449 			idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
1450 		}
1451 	}
1452 
1453 	/* Update of static/dynamic itr will be taken care when interrupt is
1454 	 * fired
1455 	 */
1456 	return 0;
1457 }
1458 
1459 /**
1460  * idpf_set_q_coalesce - set ITR values for specific queue
1461  * @vport: vport associated to the queue that need updating
1462  * @q_coal: per queue coalesce settings
1463  * @ec: coalesce settings to program the device with
1464  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1465  * @is_rxq: is queue type rx
1466  *
1467  * Return 0 on success, and negative on failure
1468  */
idpf_set_q_coalesce(const struct idpf_vport * vport,struct idpf_q_coalesce * q_coal,const struct ethtool_coalesce * ec,int q_num,bool is_rxq)1469 static int idpf_set_q_coalesce(const struct idpf_vport *vport,
1470 			       struct idpf_q_coalesce *q_coal,
1471 			       const struct ethtool_coalesce *ec,
1472 			       int q_num, bool is_rxq)
1473 {
1474 	struct idpf_q_vector *qv;
1475 
1476 	qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
1477 		      idpf_find_txq_vec(vport, q_num);
1478 
1479 	if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
1480 		return -EINVAL;
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * idpf_set_coalesce - set ITR values as requested by user
1487  * @netdev: pointer to the netdev associated with this query
1488  * @ec: coalesce settings to program the device with
1489  * @kec: unused
1490  * @extack: unused
1491  *
1492  * Return 0 on success, and negative on failure
1493  */
idpf_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kec,struct netlink_ext_ack * extack)1494 static int idpf_set_coalesce(struct net_device *netdev,
1495 			     struct ethtool_coalesce *ec,
1496 			     struct kernel_ethtool_coalesce *kec,
1497 			     struct netlink_ext_ack *extack)
1498 {
1499 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1500 	struct idpf_vport_user_config_data *user_config;
1501 	struct idpf_q_coalesce *q_coal;
1502 	struct idpf_vport *vport;
1503 	int i, err = 0;
1504 
1505 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
1506 
1507 	idpf_vport_ctrl_lock(netdev);
1508 	vport = idpf_netdev_to_vport(netdev);
1509 
1510 	if (np->state != __IDPF_VPORT_UP)
1511 		goto unlock_mutex;
1512 
1513 	for (i = 0; i < vport->num_txq; i++) {
1514 		q_coal = &user_config->q_coalesce[i];
1515 		err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
1516 		if (err)
1517 			goto unlock_mutex;
1518 	}
1519 
1520 	for (i = 0; i < vport->num_rxq; i++) {
1521 		q_coal = &user_config->q_coalesce[i];
1522 		err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
1523 		if (err)
1524 			goto unlock_mutex;
1525 	}
1526 
1527 unlock_mutex:
1528 	idpf_vport_ctrl_unlock(netdev);
1529 
1530 	return err;
1531 }
1532 
1533 /**
1534  * idpf_set_per_q_coalesce - set ITR values as requested by user
1535  * @netdev: pointer to the netdev associated with this query
1536  * @q_num: queue for which the itr values has to be set
1537  * @ec: coalesce settings to program the device with
1538  *
1539  * Return 0 on success, and negative on failure
1540  */
idpf_set_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)1541 static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
1542 				   struct ethtool_coalesce *ec)
1543 {
1544 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1545 	struct idpf_vport_user_config_data *user_config;
1546 	struct idpf_q_coalesce *q_coal;
1547 	struct idpf_vport *vport;
1548 	int err;
1549 
1550 	idpf_vport_ctrl_lock(netdev);
1551 	vport = idpf_netdev_to_vport(netdev);
1552 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
1553 	q_coal = &user_config->q_coalesce[q_num];
1554 
1555 	err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
1556 	if (err) {
1557 		idpf_vport_ctrl_unlock(netdev);
1558 
1559 		return err;
1560 	}
1561 
1562 	err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
1563 
1564 	idpf_vport_ctrl_unlock(netdev);
1565 
1566 	return err;
1567 }
1568 
1569 /**
1570  * idpf_get_msglevel - Get debug message level
1571  * @netdev: network interface device structure
1572  *
1573  * Returns current debug message level.
1574  */
idpf_get_msglevel(struct net_device * netdev)1575 static u32 idpf_get_msglevel(struct net_device *netdev)
1576 {
1577 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1578 
1579 	return adapter->msg_enable;
1580 }
1581 
1582 /**
1583  * idpf_set_msglevel - Set debug message level
1584  * @netdev: network interface device structure
1585  * @data: message level
1586  *
1587  * Set current debug message level. Higher values cause the driver to
1588  * be noisier.
1589  */
idpf_set_msglevel(struct net_device * netdev,u32 data)1590 static void idpf_set_msglevel(struct net_device *netdev, u32 data)
1591 {
1592 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1593 
1594 	adapter->msg_enable = data;
1595 }
1596 
1597 /**
1598  * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1599  * @netdev: network interface device structure
1600  * @cmd: ethtool command
1601  *
1602  * Reports speed/duplex settings.
1603  **/
idpf_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)1604 static int idpf_get_link_ksettings(struct net_device *netdev,
1605 				   struct ethtool_link_ksettings *cmd)
1606 {
1607 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1608 
1609 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
1610 	cmd->base.autoneg = AUTONEG_DISABLE;
1611 	cmd->base.port = PORT_NONE;
1612 	if (netif_carrier_ok(netdev)) {
1613 		cmd->base.duplex = DUPLEX_FULL;
1614 		cmd->base.speed = np->link_speed_mbps;
1615 	} else {
1616 		cmd->base.duplex = DUPLEX_UNKNOWN;
1617 		cmd->base.speed = SPEED_UNKNOWN;
1618 	}
1619 
1620 	return 0;
1621 }
1622 
1623 /**
1624  * idpf_get_timestamp_filters - Get the supported timestamping mode
1625  * @vport: Virtual port structure
1626  * @info: ethtool timestamping info structure
1627  *
1628  * Get the Tx/Rx timestamp filters.
1629  */
idpf_get_timestamp_filters(const struct idpf_vport * vport,struct kernel_ethtool_ts_info * info)1630 static void idpf_get_timestamp_filters(const struct idpf_vport *vport,
1631 				       struct kernel_ethtool_ts_info *info)
1632 {
1633 	info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
1634 				SOF_TIMESTAMPING_RAW_HARDWARE;
1635 
1636 	info->tx_types = BIT(HWTSTAMP_TX_OFF);
1637 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1638 
1639 	if (!vport->tx_tstamp_caps ||
1640 	    vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE)
1641 		return;
1642 
1643 	info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
1644 				 SOF_TIMESTAMPING_TX_HARDWARE;
1645 
1646 	info->tx_types |= BIT(HWTSTAMP_TX_ON);
1647 }
1648 
1649 /**
1650  * idpf_get_ts_info - Get device PHC association
1651  * @netdev: network interface device structure
1652  * @info: ethtool timestamping info structure
1653  *
1654  * Return: 0 on success, -errno otherwise.
1655  */
idpf_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * info)1656 static int idpf_get_ts_info(struct net_device *netdev,
1657 			    struct kernel_ethtool_ts_info *info)
1658 {
1659 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1660 	struct idpf_vport *vport;
1661 	int err = 0;
1662 
1663 	if (!mutex_trylock(&np->adapter->vport_ctrl_lock))
1664 		return -EBUSY;
1665 
1666 	vport = idpf_netdev_to_vport(netdev);
1667 
1668 	if (!vport->adapter->ptp) {
1669 		err = -EOPNOTSUPP;
1670 		goto unlock;
1671 	}
1672 
1673 	if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) &&
1674 	    vport->adapter->ptp->clock) {
1675 		info->phc_index = ptp_clock_index(vport->adapter->ptp->clock);
1676 		idpf_get_timestamp_filters(vport, info);
1677 	} else {
1678 		pci_dbg(vport->adapter->pdev, "PTP clock not detected\n");
1679 		err = ethtool_op_get_ts_info(netdev, info);
1680 	}
1681 
1682 unlock:
1683 	mutex_unlock(&np->adapter->vport_ctrl_lock);
1684 
1685 	return err;
1686 }
1687 
1688 static const struct ethtool_ops idpf_ethtool_ops = {
1689 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1690 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1691 	.supported_ring_params	= ETHTOOL_RING_USE_TCP_DATA_SPLIT,
1692 	.get_msglevel		= idpf_get_msglevel,
1693 	.set_msglevel		= idpf_set_msglevel,
1694 	.get_link		= ethtool_op_get_link,
1695 	.get_coalesce		= idpf_get_coalesce,
1696 	.set_coalesce		= idpf_set_coalesce,
1697 	.get_per_queue_coalesce = idpf_get_per_q_coalesce,
1698 	.set_per_queue_coalesce = idpf_set_per_q_coalesce,
1699 	.get_ethtool_stats	= idpf_get_ethtool_stats,
1700 	.get_strings		= idpf_get_strings,
1701 	.get_sset_count		= idpf_get_sset_count,
1702 	.get_channels		= idpf_get_channels,
1703 	.get_rxnfc		= idpf_get_rxnfc,
1704 	.set_rxnfc		= idpf_set_rxnfc,
1705 	.get_rxfh_key_size	= idpf_get_rxfh_key_size,
1706 	.get_rxfh_indir_size	= idpf_get_rxfh_indir_size,
1707 	.get_rxfh		= idpf_get_rxfh,
1708 	.set_rxfh		= idpf_set_rxfh,
1709 	.set_channels		= idpf_set_channels,
1710 	.get_ringparam		= idpf_get_ringparam,
1711 	.set_ringparam		= idpf_set_ringparam,
1712 	.get_link_ksettings	= idpf_get_link_ksettings,
1713 	.get_ts_info		= idpf_get_ts_info,
1714 };
1715 
1716 /**
1717  * idpf_set_ethtool_ops - Initialize ethtool ops struct
1718  * @netdev: network interface device structure
1719  *
1720  * Sets ethtool ops struct in our netdev so that ethtool can call
1721  * our functions.
1722  */
idpf_set_ethtool_ops(struct net_device * netdev)1723 void idpf_set_ethtool_ops(struct net_device *netdev)
1724 {
1725 	netdev->ethtool_ops = &idpf_ethtool_ops;
1726 }
1727