xref: /linux/drivers/net/ethernet/intel/idpf/idpf_ethtool.c (revision f2a3b12b305c7bb72467b2a56d19a4587b6007f9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7 
8 /**
9  * idpf_get_rx_ring_count - get RX ring count
10  * @netdev: network interface device structure
11  *
12  * Return: number of RX rings.
13  */
idpf_get_rx_ring_count(struct net_device * netdev)14 static u32 idpf_get_rx_ring_count(struct net_device *netdev)
15 {
16 	struct idpf_vport *vport;
17 	u32 num_rxq;
18 
19 	idpf_vport_ctrl_lock(netdev);
20 	vport = idpf_netdev_to_vport(netdev);
21 	num_rxq = vport->num_rxq;
22 	idpf_vport_ctrl_unlock(netdev);
23 
24 	return num_rxq;
25 }
26 
27 /**
28  * idpf_get_rxnfc - command to get RX flow classification rules
29  * @netdev: network interface device structure
30  * @cmd: ethtool rxnfc command
31  * @rule_locs: pointer to store rule locations
32  *
33  * Returns Success if the command is supported.
34  */
idpf_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)35 static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
36 			  u32 *rule_locs)
37 {
38 	struct idpf_netdev_priv *np = netdev_priv(netdev);
39 	struct idpf_vport_user_config_data *user_config;
40 	struct idpf_vport_config *vport_config;
41 	struct idpf_fsteer_fltr *f;
42 	struct idpf_vport *vport;
43 	unsigned int cnt = 0;
44 	int err = 0;
45 
46 	idpf_vport_ctrl_lock(netdev);
47 	vport = idpf_netdev_to_vport(netdev);
48 	vport_config = np->adapter->vport_config[np->vport_idx];
49 	user_config = &vport_config->user_config;
50 
51 	switch (cmd->cmd) {
52 	case ETHTOOL_GRXCLSRLCNT:
53 		cmd->rule_cnt = user_config->num_fsteer_fltrs;
54 		cmd->data = idpf_fsteer_max_rules(vport);
55 		break;
56 	case ETHTOOL_GRXCLSRULE:
57 		err = -ENOENT;
58 		spin_lock_bh(&vport_config->flow_steer_list_lock);
59 		list_for_each_entry(f, &user_config->flow_steer_list, list)
60 			if (f->fs.location == cmd->fs.location) {
61 				/* Avoid infoleak from padding: zero first,
62 				 * then assign fields
63 				 */
64 				memset(&cmd->fs, 0, sizeof(cmd->fs));
65 				cmd->fs = f->fs;
66 				err = 0;
67 				break;
68 			}
69 		spin_unlock_bh(&vport_config->flow_steer_list_lock);
70 		break;
71 	case ETHTOOL_GRXCLSRLALL:
72 		cmd->data = idpf_fsteer_max_rules(vport);
73 		spin_lock_bh(&vport_config->flow_steer_list_lock);
74 		list_for_each_entry(f, &user_config->flow_steer_list, list) {
75 			if (cnt == cmd->rule_cnt) {
76 				err = -EMSGSIZE;
77 				break;
78 			}
79 			rule_locs[cnt] = f->fs.location;
80 			cnt++;
81 		}
82 		if (!err)
83 			cmd->rule_cnt = user_config->num_fsteer_fltrs;
84 		spin_unlock_bh(&vport_config->flow_steer_list_lock);
85 		break;
86 	default:
87 		break;
88 	}
89 
90 	idpf_vport_ctrl_unlock(netdev);
91 
92 	return err;
93 }
94 
idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs * hdrs,struct ethtool_rx_flow_spec * fsp)95 static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs,
96 				  struct ethtool_rx_flow_spec *fsp)
97 {
98 	struct iphdr *iph;
99 
100 	hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4);
101 
102 	iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec;
103 	iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
104 	iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
105 
106 	iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask;
107 	iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src;
108 	iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst;
109 }
110 
idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs * hdrs,struct ethtool_rx_flow_spec * fsp,bool v4)111 static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs,
112 				 struct ethtool_rx_flow_spec *fsp,
113 				 bool v4)
114 {
115 	struct udphdr *udph, *udpm;
116 
117 	hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP);
118 
119 	udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec;
120 	udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask;
121 
122 	if (v4) {
123 		udph->source = fsp->h_u.udp_ip4_spec.psrc;
124 		udph->dest = fsp->h_u.udp_ip4_spec.pdst;
125 		udpm->source = fsp->m_u.udp_ip4_spec.psrc;
126 		udpm->dest = fsp->m_u.udp_ip4_spec.pdst;
127 	} else {
128 		udph->source = fsp->h_u.udp_ip6_spec.psrc;
129 		udph->dest = fsp->h_u.udp_ip6_spec.pdst;
130 		udpm->source = fsp->m_u.udp_ip6_spec.psrc;
131 		udpm->dest = fsp->m_u.udp_ip6_spec.pdst;
132 	}
133 }
134 
idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs * hdrs,struct ethtool_rx_flow_spec * fsp,bool v4)135 static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs,
136 				 struct ethtool_rx_flow_spec *fsp,
137 				 bool v4)
138 {
139 	struct tcphdr *tcph, *tcpm;
140 
141 	hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP);
142 
143 	tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec;
144 	tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask;
145 
146 	if (v4) {
147 		tcph->source = fsp->h_u.tcp_ip4_spec.psrc;
148 		tcph->dest = fsp->h_u.tcp_ip4_spec.pdst;
149 		tcpm->source = fsp->m_u.tcp_ip4_spec.psrc;
150 		tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst;
151 	} else {
152 		tcph->source = fsp->h_u.tcp_ip6_spec.psrc;
153 		tcph->dest = fsp->h_u.tcp_ip6_spec.pdst;
154 		tcpm->source = fsp->m_u.tcp_ip6_spec.psrc;
155 		tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst;
156 	}
157 }
158 
159 /**
160  * idpf_add_flow_steer - add a Flow Steering filter
161  * @netdev: network interface device structure
162  * @cmd: command to add Flow Steering filter
163  *
164  * Return: 0 on success and negative values for failure
165  */
idpf_add_flow_steer(struct net_device * netdev,struct ethtool_rxnfc * cmd)166 static int idpf_add_flow_steer(struct net_device *netdev,
167 			       struct ethtool_rxnfc *cmd)
168 {
169 	struct idpf_fsteer_fltr *fltr, *parent = NULL, *f;
170 	struct idpf_netdev_priv *np = netdev_priv(netdev);
171 	struct idpf_vport_user_config_data *user_config;
172 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
173 	struct virtchnl2_flow_rule_add_del *rule;
174 	struct idpf_vport_config *vport_config;
175 	struct virtchnl2_rule_action_set *acts;
176 	struct virtchnl2_flow_rule_info *info;
177 	struct virtchnl2_proto_hdrs *hdrs;
178 	struct idpf_vport *vport;
179 	u32 flow_type, q_index;
180 	u16 num_rxq;
181 	int err = 0;
182 
183 	vport = idpf_netdev_to_vport(netdev);
184 	vport_config = vport->adapter->vport_config[np->vport_idx];
185 	user_config = &vport_config->user_config;
186 	num_rxq = user_config->num_req_rx_qs;
187 
188 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
189 	if (flow_type != fsp->flow_type)
190 		return -EINVAL;
191 
192 	if (!idpf_sideband_action_ena(vport, fsp) ||
193 	    !idpf_sideband_flow_type_ena(vport, flow_type))
194 		return -EOPNOTSUPP;
195 
196 	if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport))
197 		return -ENOSPC;
198 
199 	q_index = fsp->ring_cookie;
200 	if (q_index >= num_rxq)
201 		return -EINVAL;
202 
203 	rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
204 	if (!rule)
205 		return -ENOMEM;
206 
207 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
208 	if (!fltr) {
209 		err = -ENOMEM;
210 		goto out_free_rule;
211 	}
212 
213 	/* detect duplicate entry and reject before adding rules */
214 	spin_lock_bh(&vport_config->flow_steer_list_lock);
215 	list_for_each_entry(f, &user_config->flow_steer_list, list) {
216 		if (f->fs.location == fsp->location) {
217 			err = -EEXIST;
218 			break;
219 		}
220 
221 		if (f->fs.location > fsp->location)
222 			break;
223 		parent = f;
224 	}
225 	spin_unlock_bh(&vport_config->flow_steer_list_lock);
226 
227 	if (err)
228 		goto out;
229 
230 	rule->vport_id = cpu_to_le32(vport->vport_id);
231 	rule->count = cpu_to_le32(1);
232 	info = &rule->rule_info[0];
233 	info->rule_id = cpu_to_le32(fsp->location);
234 
235 	hdrs = &info->rule_cfg.proto_hdrs;
236 	hdrs->tunnel_level = 0;
237 	hdrs->count = cpu_to_le32(2);
238 
239 	acts = &info->rule_cfg.action_set;
240 	acts->count = cpu_to_le32(1);
241 	acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE);
242 	acts->actions[0].act_conf.q_id = cpu_to_le32(q_index);
243 
244 	switch (flow_type) {
245 	case UDP_V4_FLOW:
246 		idpf_fsteer_fill_ipv4(hdrs, fsp);
247 		idpf_fsteer_fill_udp(hdrs, fsp, true);
248 		break;
249 	case TCP_V4_FLOW:
250 		idpf_fsteer_fill_ipv4(hdrs, fsp);
251 		idpf_fsteer_fill_tcp(hdrs, fsp, true);
252 		break;
253 	default:
254 		err = -EINVAL;
255 		goto out;
256 	}
257 
258 	err = idpf_add_del_fsteer_filters(vport->adapter, rule,
259 					  VIRTCHNL2_OP_ADD_FLOW_RULE);
260 	if (err)
261 		goto out;
262 
263 	if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
264 		err = -EIO;
265 		goto out;
266 	}
267 
268 	/* Save a copy of the user's flow spec so ethtool can later retrieve it */
269 	fltr->fs = *fsp;
270 
271 	spin_lock_bh(&vport_config->flow_steer_list_lock);
272 	parent ? list_add(&fltr->list, &parent->list) :
273 		 list_add(&fltr->list, &user_config->flow_steer_list);
274 
275 	user_config->num_fsteer_fltrs++;
276 	spin_unlock_bh(&vport_config->flow_steer_list_lock);
277 	goto out_free_rule;
278 
279 out:
280 	kfree(fltr);
281 out_free_rule:
282 	kfree(rule);
283 	return err;
284 }
285 
286 /**
287  * idpf_del_flow_steer - delete a Flow Steering filter
288  * @netdev: network interface device structure
289  * @cmd: command to add Flow Steering filter
290  *
291  * Return: 0 on success and negative values for failure
292  */
idpf_del_flow_steer(struct net_device * netdev,struct ethtool_rxnfc * cmd)293 static int idpf_del_flow_steer(struct net_device *netdev,
294 			       struct ethtool_rxnfc *cmd)
295 {
296 	struct idpf_netdev_priv *np = netdev_priv(netdev);
297 	struct idpf_vport_user_config_data *user_config;
298 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
299 	struct virtchnl2_flow_rule_add_del *rule;
300 	struct idpf_vport_config *vport_config;
301 	struct virtchnl2_flow_rule_info *info;
302 	struct idpf_fsteer_fltr *f, *iter;
303 	struct idpf_vport *vport;
304 	int err;
305 
306 	vport = idpf_netdev_to_vport(netdev);
307 	vport_config = vport->adapter->vport_config[np->vport_idx];
308 	user_config = &vport_config->user_config;
309 
310 	if (!idpf_sideband_action_ena(vport, fsp))
311 		return -EOPNOTSUPP;
312 
313 	rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
314 	if (!rule)
315 		return -ENOMEM;
316 
317 	rule->vport_id = cpu_to_le32(vport->vport_id);
318 	rule->count = cpu_to_le32(1);
319 	info = &rule->rule_info[0];
320 	info->rule_id = cpu_to_le32(fsp->location);
321 
322 	err = idpf_add_del_fsteer_filters(vport->adapter, rule,
323 					  VIRTCHNL2_OP_DEL_FLOW_RULE);
324 	if (err)
325 		goto out;
326 
327 	if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
328 		err = -EIO;
329 		goto out;
330 	}
331 
332 	spin_lock_bh(&vport_config->flow_steer_list_lock);
333 	list_for_each_entry_safe(f, iter,
334 				 &user_config->flow_steer_list, list) {
335 		if (f->fs.location == fsp->location) {
336 			list_del(&f->list);
337 			kfree(f);
338 			user_config->num_fsteer_fltrs--;
339 			goto out_unlock;
340 		}
341 	}
342 	err = -ENOENT;
343 
344 out_unlock:
345 	spin_unlock_bh(&vport_config->flow_steer_list_lock);
346 out:
347 	kfree(rule);
348 	return err;
349 }
350 
idpf_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)351 static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
352 {
353 	int ret = -EOPNOTSUPP;
354 
355 	idpf_vport_ctrl_lock(netdev);
356 	switch (cmd->cmd) {
357 	case ETHTOOL_SRXCLSRLINS:
358 		ret = idpf_add_flow_steer(netdev, cmd);
359 		break;
360 	case ETHTOOL_SRXCLSRLDEL:
361 		ret = idpf_del_flow_steer(netdev, cmd);
362 		break;
363 	default:
364 		break;
365 	}
366 
367 	idpf_vport_ctrl_unlock(netdev);
368 	return ret;
369 }
370 
371 /**
372  * idpf_get_rxfh_key_size - get the RSS hash key size
373  * @netdev: network interface device structure
374  *
375  * Returns the key size on success, error value on failure.
376  */
idpf_get_rxfh_key_size(struct net_device * netdev)377 static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
378 {
379 	struct idpf_netdev_priv *np = netdev_priv(netdev);
380 	struct idpf_vport_user_config_data *user_config;
381 
382 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
383 		return 0;
384 
385 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
386 
387 	return user_config->rss_data.rss_key_size;
388 }
389 
390 /**
391  * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
392  * @netdev: network interface device structure
393  *
394  * Returns the table size on success, error value on failure.
395  */
idpf_get_rxfh_indir_size(struct net_device * netdev)396 static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
397 {
398 	struct idpf_netdev_priv *np = netdev_priv(netdev);
399 	struct idpf_vport_user_config_data *user_config;
400 
401 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
402 		return 0;
403 
404 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
405 
406 	return user_config->rss_data.rss_lut_size;
407 }
408 
409 /**
410  * idpf_get_rxfh - get the rx flow hash indirection table
411  * @netdev: network interface device structure
412  * @rxfh: pointer to param struct (indir, key, hfunc)
413  *
414  * RSS LUT and Key information are read from driver's cached
415  * copy. When rxhash is off, rss lut will be displayed as zeros.
416  *
417  * Return: 0 on success, -errno otherwise.
418  */
idpf_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)419 static int idpf_get_rxfh(struct net_device *netdev,
420 			 struct ethtool_rxfh_param *rxfh)
421 {
422 	struct idpf_netdev_priv *np = netdev_priv(netdev);
423 	struct idpf_rss_data *rss_data;
424 	struct idpf_adapter *adapter;
425 	struct idpf_vport *vport;
426 	bool rxhash_ena;
427 	int err = 0;
428 	u16 i;
429 
430 	idpf_vport_ctrl_lock(netdev);
431 	vport = idpf_netdev_to_vport(netdev);
432 
433 	adapter = np->adapter;
434 
435 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
436 		err = -EOPNOTSUPP;
437 		goto unlock_mutex;
438 	}
439 
440 	rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
441 
442 	rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
443 	rxfh->hfunc = ETH_RSS_HASH_TOP;
444 
445 	if (rxfh->key)
446 		memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
447 
448 	if (rxfh->indir) {
449 		for (i = 0; i < rss_data->rss_lut_size; i++)
450 			rxfh->indir[i] = rxhash_ena ? rss_data->rss_lut[i] : 0;
451 	}
452 
453 unlock_mutex:
454 	idpf_vport_ctrl_unlock(netdev);
455 
456 	return err;
457 }
458 
459 /**
460  * idpf_set_rxfh - set the rx flow hash indirection table
461  * @netdev: network interface device structure
462  * @rxfh: pointer to param struct (indir, key, hfunc)
463  * @extack: extended ACK from the Netlink message
464  *
465  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
466  * returns 0 after programming the table.
467  */
idpf_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)468 static int idpf_set_rxfh(struct net_device *netdev,
469 			 struct ethtool_rxfh_param *rxfh,
470 			 struct netlink_ext_ack *extack)
471 {
472 	struct idpf_netdev_priv *np = netdev_priv(netdev);
473 	struct idpf_rss_data *rss_data;
474 	struct idpf_adapter *adapter;
475 	struct idpf_vport *vport;
476 	int err = 0;
477 	u16 lut;
478 
479 	idpf_vport_ctrl_lock(netdev);
480 	vport = idpf_netdev_to_vport(netdev);
481 
482 	adapter = vport->adapter;
483 
484 	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
485 		err = -EOPNOTSUPP;
486 		goto unlock_mutex;
487 	}
488 
489 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
490 
491 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
492 	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
493 		err = -EOPNOTSUPP;
494 		goto unlock_mutex;
495 	}
496 
497 	if (rxfh->key)
498 		memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
499 
500 	if (rxfh->indir) {
501 		for (lut = 0; lut < rss_data->rss_lut_size; lut++)
502 			rss_data->rss_lut[lut] = rxfh->indir[lut];
503 	}
504 
505 	if (test_bit(IDPF_VPORT_UP, np->state))
506 		err = idpf_config_rss(vport);
507 
508 unlock_mutex:
509 	idpf_vport_ctrl_unlock(netdev);
510 
511 	return err;
512 }
513 
514 /**
515  * idpf_get_channels: get the number of channels supported by the device
516  * @netdev: network interface device structure
517  * @ch: channel information structure
518  *
519  * Report maximum of TX and RX. Report one extra channel to match our MailBox
520  * Queue.
521  */
idpf_get_channels(struct net_device * netdev,struct ethtool_channels * ch)522 static void idpf_get_channels(struct net_device *netdev,
523 			      struct ethtool_channels *ch)
524 {
525 	struct idpf_netdev_priv *np = netdev_priv(netdev);
526 	struct idpf_vport_config *vport_config;
527 	u16 num_txq, num_rxq;
528 	u16 combined;
529 
530 	vport_config = np->adapter->vport_config[np->vport_idx];
531 
532 	num_txq = vport_config->user_config.num_req_tx_qs;
533 	num_rxq = vport_config->user_config.num_req_rx_qs;
534 
535 	combined = min(num_txq, num_rxq);
536 
537 	/* Report maximum channels */
538 	ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
539 				 vport_config->max_q.max_rxq);
540 	ch->max_rx = vport_config->max_q.max_rxq;
541 	ch->max_tx = vport_config->max_q.max_txq;
542 
543 	ch->max_other = IDPF_MAX_MBXQ;
544 	ch->other_count = IDPF_MAX_MBXQ;
545 
546 	ch->combined_count = combined;
547 	ch->rx_count = num_rxq - combined;
548 	ch->tx_count = num_txq - combined;
549 }
550 
551 /**
552  * idpf_set_channels: set the new channel count
553  * @netdev: network interface device structure
554  * @ch: channel information structure
555  *
556  * Negotiate a new number of channels with CP. Returns 0 on success, negative
557  * on failure.
558  */
idpf_set_channels(struct net_device * netdev,struct ethtool_channels * ch)559 static int idpf_set_channels(struct net_device *netdev,
560 			     struct ethtool_channels *ch)
561 {
562 	struct idpf_vport_config *vport_config;
563 	unsigned int num_req_tx_q;
564 	unsigned int num_req_rx_q;
565 	struct idpf_vport *vport;
566 	u16 num_txq, num_rxq;
567 	struct device *dev;
568 	int err = 0;
569 	u16 idx;
570 
571 	if (ch->rx_count && ch->tx_count) {
572 		netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
573 		return -EINVAL;
574 	}
575 
576 	idpf_vport_ctrl_lock(netdev);
577 	vport = idpf_netdev_to_vport(netdev);
578 
579 	idx = vport->idx;
580 	vport_config = vport->adapter->vport_config[idx];
581 
582 	num_txq = vport_config->user_config.num_req_tx_qs;
583 	num_rxq = vport_config->user_config.num_req_rx_qs;
584 
585 	num_req_tx_q = ch->combined_count + ch->tx_count;
586 	num_req_rx_q = ch->combined_count + ch->rx_count;
587 
588 	dev = &vport->adapter->pdev->dev;
589 	/* It's possible to specify number of queues that exceeds max.
590 	 * Stack checks max combined_count and max [tx|rx]_count but not the
591 	 * max combined_count + [tx|rx]_count. These checks should catch that.
592 	 */
593 	if (num_req_tx_q > vport_config->max_q.max_txq) {
594 		dev_info(dev, "Maximum TX queues is %d\n",
595 			 vport_config->max_q.max_txq);
596 		err = -EINVAL;
597 		goto unlock_mutex;
598 	}
599 	if (num_req_rx_q > vport_config->max_q.max_rxq) {
600 		dev_info(dev, "Maximum RX queues is %d\n",
601 			 vport_config->max_q.max_rxq);
602 		err = -EINVAL;
603 		goto unlock_mutex;
604 	}
605 
606 	if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
607 		goto unlock_mutex;
608 
609 	vport_config->user_config.num_req_tx_qs = num_req_tx_q;
610 	vport_config->user_config.num_req_rx_qs = num_req_rx_q;
611 
612 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
613 	if (err) {
614 		/* roll back queue change */
615 		vport_config->user_config.num_req_tx_qs = num_txq;
616 		vport_config->user_config.num_req_rx_qs = num_rxq;
617 	}
618 
619 unlock_mutex:
620 	idpf_vport_ctrl_unlock(netdev);
621 
622 	return err;
623 }
624 
625 /**
626  * idpf_get_ringparam - Get ring parameters
627  * @netdev: network interface device structure
628  * @ring: ethtool ringparam structure
629  * @kring: unused
630  * @ext_ack: unused
631  *
632  * Returns current ring parameters. TX and RX rings are reported separately,
633  * but the number of rings is not reported.
634  */
idpf_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * ext_ack)635 static void idpf_get_ringparam(struct net_device *netdev,
636 			       struct ethtool_ringparam *ring,
637 			       struct kernel_ethtool_ringparam *kring,
638 			       struct netlink_ext_ack *ext_ack)
639 {
640 	struct idpf_vport *vport;
641 
642 	idpf_vport_ctrl_lock(netdev);
643 	vport = idpf_netdev_to_vport(netdev);
644 
645 	ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
646 	ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
647 	ring->rx_pending = vport->rxq_desc_count;
648 	ring->tx_pending = vport->txq_desc_count;
649 
650 	kring->tcp_data_split = idpf_vport_get_hsplit(vport);
651 
652 	idpf_vport_ctrl_unlock(netdev);
653 }
654 
655 /**
656  * idpf_set_ringparam - Set ring parameters
657  * @netdev: network interface device structure
658  * @ring: ethtool ringparam structure
659  * @kring: unused
660  * @ext_ack: unused
661  *
662  * Sets ring parameters. TX and RX rings are controlled separately, but the
663  * number of rings is not specified, so all rings get the same settings.
664  */
idpf_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * ext_ack)665 static int idpf_set_ringparam(struct net_device *netdev,
666 			      struct ethtool_ringparam *ring,
667 			      struct kernel_ethtool_ringparam *kring,
668 			      struct netlink_ext_ack *ext_ack)
669 {
670 	struct idpf_vport_user_config_data *config_data;
671 	u32 new_rx_count, new_tx_count;
672 	struct idpf_vport *vport;
673 	int i, err = 0;
674 	u16 idx;
675 
676 	idpf_vport_ctrl_lock(netdev);
677 	vport = idpf_netdev_to_vport(netdev);
678 
679 	idx = vport->idx;
680 
681 	if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
682 		netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
683 			   ring->tx_pending,
684 			   IDPF_MIN_TXQ_DESC);
685 		err = -EINVAL;
686 		goto unlock_mutex;
687 	}
688 
689 	if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
690 		netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
691 			   ring->rx_pending,
692 			   IDPF_MIN_RXQ_DESC);
693 		err = -EINVAL;
694 		goto unlock_mutex;
695 	}
696 
697 	new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
698 	if (new_rx_count != ring->rx_pending)
699 		netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
700 			    new_rx_count);
701 
702 	new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
703 	if (new_tx_count != ring->tx_pending)
704 		netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
705 			    new_tx_count);
706 
707 	if (new_tx_count == vport->txq_desc_count &&
708 	    new_rx_count == vport->rxq_desc_count &&
709 	    kring->tcp_data_split == idpf_vport_get_hsplit(vport))
710 		goto unlock_mutex;
711 
712 	if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
713 		NL_SET_ERR_MSG_MOD(ext_ack,
714 				   "setting TCP data split is not supported");
715 		err = -EOPNOTSUPP;
716 
717 		goto unlock_mutex;
718 	}
719 
720 	config_data = &vport->adapter->vport_config[idx]->user_config;
721 	config_data->num_req_txq_desc = new_tx_count;
722 	config_data->num_req_rxq_desc = new_rx_count;
723 
724 	/* Since we adjusted the RX completion queue count, the RX buffer queue
725 	 * descriptor count needs to be adjusted as well
726 	 */
727 	for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
728 		vport->bufq_desc_count[i] =
729 			IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
730 						vport->num_bufqs_per_qgrp);
731 
732 	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
733 
734 unlock_mutex:
735 	idpf_vport_ctrl_unlock(netdev);
736 
737 	return err;
738 }
739 
740 /**
741  * struct idpf_stats - definition for an ethtool statistic
742  * @stat_string: statistic name to display in ethtool -S output
743  * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
744  * @stat_offset: offsetof() the stat from a base pointer
745  *
746  * This structure defines a statistic to be added to the ethtool stats buffer.
747  * It defines a statistic as offset from a common base pointer. Stats should
748  * be defined in constant arrays using the IDPF_STAT macro, with every element
749  * of the array using the same _type for calculating the sizeof_stat and
750  * stat_offset.
751  *
752  * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
753  * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
754  * the idpf_add_ethtool_stat() helper function.
755  *
756  * The @stat_string is interpreted as a format string, allowing formatted
757  * values to be inserted while looping over multiple structures for a given
758  * statistics array. Thus, every statistic string in an array should have the
759  * same type and number of format specifiers, to be formatted by variadic
760  * arguments to the idpf_add_stat_string() helper function.
761  */
762 struct idpf_stats {
763 	char stat_string[ETH_GSTRING_LEN];
764 	int sizeof_stat;
765 	int stat_offset;
766 };
767 
768 /* Helper macro to define an idpf_stat structure with proper size and type.
769  * Use this when defining constant statistics arrays. Note that @_type expects
770  * only a type name and is used multiple times.
771  */
772 #define IDPF_STAT(_type, _name, _stat) { \
773 	.stat_string = _name, \
774 	.sizeof_stat = sizeof_field(_type, _stat), \
775 	.stat_offset = offsetof(_type, _stat) \
776 }
777 
778 /* Helper macros for defining some statistics related to queues */
779 #define IDPF_RX_QUEUE_STAT(_name, _stat) \
780 	IDPF_STAT(struct idpf_rx_queue, _name, _stat)
781 #define IDPF_TX_QUEUE_STAT(_name, _stat) \
782 	IDPF_STAT(struct idpf_tx_queue, _name, _stat)
783 
784 /* Stats associated with a Tx queue */
785 static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
786 	IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
787 	IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
788 	IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
789 };
790 
791 /* Stats associated with an Rx queue */
792 static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
793 	IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
794 	IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
795 	IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
796 };
797 
798 #define IDPF_TX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
799 #define IDPF_RX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
800 
801 #define IDPF_PORT_STAT(_name, _stat) \
802 	IDPF_STAT(struct idpf_vport,  _name, _stat)
803 
804 static const struct idpf_stats idpf_gstrings_port_stats[] = {
805 	IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
806 	IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
807 	IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
808 	IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
809 	IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
810 	IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
811 	IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
812 	IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
813 	IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
814 	IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
815 	IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
816 	IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
817 	IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
818 	IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
819 	IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
820 };
821 
822 #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
823 
824 /**
825  * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
826  * @p: ethtool supplied buffer
827  * @stats: stat definitions array
828  * @size: size of the stats array
829  * @type: stat type
830  * @idx: stat index
831  *
832  * Format and copy the strings described by stats into the buffer pointed at
833  * by p.
834  */
__idpf_add_qstat_strings(u8 ** p,const struct idpf_stats * stats,const unsigned int size,const char * type,unsigned int idx)835 static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
836 				     const unsigned int size, const char *type,
837 				     unsigned int idx)
838 {
839 	unsigned int i;
840 
841 	for (i = 0; i < size; i++)
842 		ethtool_sprintf(p, "%s_q-%u_%s",
843 				type, idx, stats[i].stat_string);
844 }
845 
846 /**
847  * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
848  * @p: ethtool supplied buffer
849  * @stats: stat definitions array
850  * @type: stat type
851  * @idx: stat idx
852  *
853  * Format and copy the strings described by the const static stats value into
854  * the buffer pointed at by p.
855  *
856  * The parameter @stats is evaluated twice, so parameters with side effects
857  * should be avoided. Additionally, stats must be an array such that
858  * ARRAY_SIZE can be called on it.
859  */
860 #define idpf_add_qstat_strings(p, stats, type, idx) \
861 	__idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
862 
863 /**
864  * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
865  * @p: ethtool buffer
866  * @stats: struct to copy from
867  * @size: size of stats array to copy from
868  */
idpf_add_stat_strings(u8 ** p,const struct idpf_stats * stats,const unsigned int size)869 static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
870 				  const unsigned int size)
871 {
872 	unsigned int i;
873 
874 	for (i = 0; i < size; i++)
875 		ethtool_puts(p, stats[i].stat_string);
876 }
877 
878 /**
879  * idpf_get_stat_strings - Get stat strings
880  * @netdev: network interface device structure
881  * @data: buffer for string data
882  *
883  * Builds the statistics string table
884  */
idpf_get_stat_strings(struct net_device * netdev,u8 * data)885 static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
886 {
887 	struct idpf_netdev_priv *np = netdev_priv(netdev);
888 	struct idpf_vport_config *vport_config;
889 	unsigned int i;
890 
891 	idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
892 			      IDPF_PORT_STATS_LEN);
893 
894 	vport_config = np->adapter->vport_config[np->vport_idx];
895 	/* It's critical that we always report a constant number of strings and
896 	 * that the strings are reported in the same order regardless of how
897 	 * many queues are actually in use.
898 	 */
899 	for (i = 0; i < vport_config->max_q.max_txq; i++)
900 		idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
901 				       "tx", i);
902 
903 	for (i = 0; i < vport_config->max_q.max_rxq; i++)
904 		idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
905 				       "rx", i);
906 }
907 
908 /**
909  * idpf_get_strings - Get string set
910  * @netdev: network interface device structure
911  * @sset: id of string set
912  * @data: buffer for string data
913  *
914  * Builds string tables for various string sets
915  */
idpf_get_strings(struct net_device * netdev,u32 sset,u8 * data)916 static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
917 {
918 	switch (sset) {
919 	case ETH_SS_STATS:
920 		idpf_get_stat_strings(netdev, data);
921 		break;
922 	default:
923 		break;
924 	}
925 }
926 
927 /**
928  * idpf_get_sset_count - Get length of string set
929  * @netdev: network interface device structure
930  * @sset: id of string set
931  *
932  * Reports size of various string tables.
933  */
idpf_get_sset_count(struct net_device * netdev,int sset)934 static int idpf_get_sset_count(struct net_device *netdev, int sset)
935 {
936 	struct idpf_netdev_priv *np = netdev_priv(netdev);
937 	struct idpf_vport_config *vport_config;
938 	u16 max_txq, max_rxq;
939 
940 	if (sset != ETH_SS_STATS)
941 		return -EINVAL;
942 
943 	vport_config = np->adapter->vport_config[np->vport_idx];
944 	/* This size reported back here *must* be constant throughout the
945 	 * lifecycle of the netdevice, i.e. we must report the maximum length
946 	 * even for queues that don't technically exist.  This is due to the
947 	 * fact that this userspace API uses three separate ioctl calls to get
948 	 * stats data but has no way to communicate back to userspace when that
949 	 * size has changed, which can typically happen as a result of changing
950 	 * number of queues. If the number/order of stats change in the middle
951 	 * of this call chain it will lead to userspace crashing/accessing bad
952 	 * data through buffer under/overflow.
953 	 */
954 	max_txq = vport_config->max_q.max_txq;
955 	max_rxq = vport_config->max_q.max_rxq;
956 
957 	return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
958 	       (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
959 }
960 
961 /**
962  * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
963  * @data: location to store the stat value
964  * @pstat: old stat pointer to copy from
965  * @stat: the stat definition
966  *
967  * Copies the stat data defined by the pointer and stat structure pair into
968  * the memory supplied as data. If the pointer is null, data will be zero'd.
969  */
idpf_add_one_ethtool_stat(u64 * data,const void * pstat,const struct idpf_stats * stat)970 static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
971 				      const struct idpf_stats *stat)
972 {
973 	char *p;
974 
975 	if (!pstat) {
976 		/* Ensure that the ethtool data buffer is zero'd for any stats
977 		 * which don't have a valid pointer.
978 		 */
979 		*data = 0;
980 		return;
981 	}
982 
983 	p = (char *)pstat + stat->stat_offset;
984 	switch (stat->sizeof_stat) {
985 	case sizeof(u64):
986 		*data = *((u64 *)p);
987 		break;
988 	case sizeof(u32):
989 		*data = *((u32 *)p);
990 		break;
991 	case sizeof(u16):
992 		*data = *((u16 *)p);
993 		break;
994 	case sizeof(u8):
995 		*data = *((u8 *)p);
996 		break;
997 	default:
998 		WARN_ONCE(1, "unexpected stat size for %s",
999 			  stat->stat_string);
1000 		*data = 0;
1001 	}
1002 }
1003 
1004 /**
1005  * idpf_add_queue_stats - copy queue statistics into supplied buffer
1006  * @data: ethtool stats buffer
1007  * @q: the queue to copy
1008  * @type: type of the queue
1009  *
1010  * Queue statistics must be copied while protected by u64_stats_fetch_begin,
1011  * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
1012  * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
1013  * zero out the queue stat values and update the data pointer. Otherwise
1014  * safely copy the stats from the queue into the supplied buffer and update
1015  * the data pointer when finished.
1016  *
1017  * This function expects to be called while under rcu_read_lock().
1018  */
idpf_add_queue_stats(u64 ** data,const void * q,enum virtchnl2_queue_type type)1019 static void idpf_add_queue_stats(u64 **data, const void *q,
1020 				 enum virtchnl2_queue_type type)
1021 {
1022 	const struct u64_stats_sync *stats_sync;
1023 	const struct idpf_stats *stats;
1024 	unsigned int start;
1025 	unsigned int size;
1026 	unsigned int i;
1027 
1028 	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
1029 		size = IDPF_RX_QUEUE_STATS_LEN;
1030 		stats = idpf_gstrings_rx_queue_stats;
1031 		stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
1032 	} else {
1033 		size = IDPF_TX_QUEUE_STATS_LEN;
1034 		stats = idpf_gstrings_tx_queue_stats;
1035 		stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
1036 	}
1037 
1038 	/* To avoid invalid statistics values, ensure that we keep retrying
1039 	 * the copy until we get a consistent value according to
1040 	 * u64_stats_fetch_retry.
1041 	 */
1042 	do {
1043 		start = u64_stats_fetch_begin(stats_sync);
1044 		for (i = 0; i < size; i++)
1045 			idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
1046 	} while (u64_stats_fetch_retry(stats_sync, start));
1047 
1048 	/* Once we successfully copy the stats in, update the data pointer */
1049 	*data += size;
1050 }
1051 
1052 /**
1053  * idpf_add_empty_queue_stats - Add stats for a non-existent queue
1054  * @data: pointer to data buffer
1055  * @qtype: type of data queue
1056  *
1057  * We must report a constant length of stats back to userspace regardless of
1058  * how many queues are actually in use because stats collection happens over
1059  * three separate ioctls and there's no way to notify userspace the size
1060  * changed between those calls. This adds empty to data to the stats since we
1061  * don't have a real queue to refer to for this stats slot.
1062  */
idpf_add_empty_queue_stats(u64 ** data,u16 qtype)1063 static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
1064 {
1065 	unsigned int i;
1066 	int stats_len;
1067 
1068 	if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
1069 		stats_len = IDPF_RX_QUEUE_STATS_LEN;
1070 	else
1071 		stats_len = IDPF_TX_QUEUE_STATS_LEN;
1072 
1073 	for (i = 0; i < stats_len; i++)
1074 		(*data)[i] = 0;
1075 	*data += stats_len;
1076 }
1077 
1078 /**
1079  * idpf_add_port_stats - Copy port stats into ethtool buffer
1080  * @vport: virtual port struct
1081  * @data: ethtool buffer to copy into
1082  */
idpf_add_port_stats(struct idpf_vport * vport,u64 ** data)1083 static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
1084 {
1085 	unsigned int size = IDPF_PORT_STATS_LEN;
1086 	unsigned int start;
1087 	unsigned int i;
1088 
1089 	do {
1090 		start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
1091 		for (i = 0; i < size; i++)
1092 			idpf_add_one_ethtool_stat(&(*data)[i], vport,
1093 						  &idpf_gstrings_port_stats[i]);
1094 	} while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
1095 
1096 	*data += size;
1097 }
1098 
1099 /**
1100  * idpf_collect_queue_stats - accumulate various per queue stats
1101  * into port level stats
1102  * @vport: pointer to vport struct
1103  **/
idpf_collect_queue_stats(struct idpf_vport * vport)1104 static void idpf_collect_queue_stats(struct idpf_vport *vport)
1105 {
1106 	struct idpf_port_stats *pstats = &vport->port_stats;
1107 	int i, j;
1108 
1109 	/* zero out port stats since they're actually tracked in per
1110 	 * queue stats; this is only for reporting
1111 	 */
1112 	u64_stats_update_begin(&pstats->stats_sync);
1113 	u64_stats_set(&pstats->rx_hw_csum_err, 0);
1114 	u64_stats_set(&pstats->rx_hsplit, 0);
1115 	u64_stats_set(&pstats->rx_hsplit_hbo, 0);
1116 	u64_stats_set(&pstats->rx_bad_descs, 0);
1117 	u64_stats_set(&pstats->tx_linearize, 0);
1118 	u64_stats_set(&pstats->tx_busy, 0);
1119 	u64_stats_set(&pstats->tx_drops, 0);
1120 	u64_stats_set(&pstats->tx_dma_map_errs, 0);
1121 	u64_stats_update_end(&pstats->stats_sync);
1122 
1123 	for (i = 0; i < vport->num_rxq_grp; i++) {
1124 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
1125 		u16 num_rxq;
1126 
1127 		if (idpf_is_queue_model_split(vport->rxq_model))
1128 			num_rxq = rxq_grp->splitq.num_rxq_sets;
1129 		else
1130 			num_rxq = rxq_grp->singleq.num_rxq;
1131 
1132 		for (j = 0; j < num_rxq; j++) {
1133 			u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
1134 			struct idpf_rx_queue_stats *stats;
1135 			struct idpf_rx_queue *rxq;
1136 			unsigned int start;
1137 
1138 			if (idpf_is_queue_model_split(vport->rxq_model))
1139 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
1140 			else
1141 				rxq = rxq_grp->singleq.rxqs[j];
1142 
1143 			if (!rxq)
1144 				continue;
1145 
1146 			do {
1147 				start = u64_stats_fetch_begin(&rxq->stats_sync);
1148 
1149 				stats = &rxq->q_stats;
1150 				hw_csum_err = u64_stats_read(&stats->hw_csum_err);
1151 				hsplit = u64_stats_read(&stats->hsplit_pkts);
1152 				hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
1153 				bad_descs = u64_stats_read(&stats->bad_descs);
1154 			} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
1155 
1156 			u64_stats_update_begin(&pstats->stats_sync);
1157 			u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
1158 			u64_stats_add(&pstats->rx_hsplit, hsplit);
1159 			u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
1160 			u64_stats_add(&pstats->rx_bad_descs, bad_descs);
1161 			u64_stats_update_end(&pstats->stats_sync);
1162 		}
1163 	}
1164 
1165 	for (i = 0; i < vport->num_txq_grp; i++) {
1166 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1167 
1168 		for (j = 0; j < txq_grp->num_txq; j++) {
1169 			u64 linearize, qbusy, skb_drops, dma_map_errs;
1170 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
1171 			struct idpf_tx_queue_stats *stats;
1172 			unsigned int start;
1173 
1174 			if (!txq)
1175 				continue;
1176 
1177 			do {
1178 				start = u64_stats_fetch_begin(&txq->stats_sync);
1179 
1180 				stats = &txq->q_stats;
1181 				linearize = u64_stats_read(&stats->linearize);
1182 				qbusy = u64_stats_read(&stats->q_busy);
1183 				skb_drops = u64_stats_read(&stats->skb_drops);
1184 				dma_map_errs = u64_stats_read(&stats->dma_map_errs);
1185 			} while (u64_stats_fetch_retry(&txq->stats_sync, start));
1186 
1187 			u64_stats_update_begin(&pstats->stats_sync);
1188 			u64_stats_add(&pstats->tx_linearize, linearize);
1189 			u64_stats_add(&pstats->tx_busy, qbusy);
1190 			u64_stats_add(&pstats->tx_drops, skb_drops);
1191 			u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
1192 			u64_stats_update_end(&pstats->stats_sync);
1193 		}
1194 	}
1195 }
1196 
1197 /**
1198  * idpf_get_ethtool_stats - report device statistics
1199  * @netdev: network interface device structure
1200  * @stats: ethtool statistics structure
1201  * @data: pointer to data buffer
1202  *
1203  * All statistics are added to the data buffer as an array of u64.
1204  */
idpf_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)1205 static void idpf_get_ethtool_stats(struct net_device *netdev,
1206 				   struct ethtool_stats __always_unused *stats,
1207 				   u64 *data)
1208 {
1209 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1210 	struct idpf_vport_config *vport_config;
1211 	struct idpf_vport *vport;
1212 	unsigned int total = 0;
1213 	unsigned int i, j;
1214 	bool is_splitq;
1215 	u16 qtype;
1216 
1217 	idpf_vport_ctrl_lock(netdev);
1218 	vport = idpf_netdev_to_vport(netdev);
1219 
1220 	if (!test_bit(IDPF_VPORT_UP, np->state)) {
1221 		idpf_vport_ctrl_unlock(netdev);
1222 
1223 		return;
1224 	}
1225 
1226 	rcu_read_lock();
1227 
1228 	idpf_collect_queue_stats(vport);
1229 	idpf_add_port_stats(vport, &data);
1230 
1231 	for (i = 0; i < vport->num_txq_grp; i++) {
1232 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1233 
1234 		qtype = VIRTCHNL2_QUEUE_TYPE_TX;
1235 
1236 		for (j = 0; j < txq_grp->num_txq; j++, total++) {
1237 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
1238 
1239 			if (!txq)
1240 				idpf_add_empty_queue_stats(&data, qtype);
1241 			else
1242 				idpf_add_queue_stats(&data, txq, qtype);
1243 		}
1244 	}
1245 
1246 	vport_config = vport->adapter->vport_config[vport->idx];
1247 	/* It is critical we provide a constant number of stats back to
1248 	 * userspace regardless of how many queues are actually in use because
1249 	 * there is no way to inform userspace the size has changed between
1250 	 * ioctl calls. This will fill in any missing stats with zero.
1251 	 */
1252 	for (; total < vport_config->max_q.max_txq; total++)
1253 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
1254 	total = 0;
1255 
1256 	is_splitq = idpf_is_queue_model_split(vport->rxq_model);
1257 
1258 	for (i = 0; i < vport->num_rxq_grp; i++) {
1259 		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
1260 		u16 num_rxq;
1261 
1262 		qtype = VIRTCHNL2_QUEUE_TYPE_RX;
1263 
1264 		if (is_splitq)
1265 			num_rxq = rxq_grp->splitq.num_rxq_sets;
1266 		else
1267 			num_rxq = rxq_grp->singleq.num_rxq;
1268 
1269 		for (j = 0; j < num_rxq; j++, total++) {
1270 			struct idpf_rx_queue *rxq;
1271 
1272 			if (is_splitq)
1273 				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
1274 			else
1275 				rxq = rxq_grp->singleq.rxqs[j];
1276 			if (!rxq)
1277 				idpf_add_empty_queue_stats(&data, qtype);
1278 			else
1279 				idpf_add_queue_stats(&data, rxq, qtype);
1280 		}
1281 	}
1282 
1283 	for (; total < vport_config->max_q.max_rxq; total++)
1284 		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
1285 
1286 	rcu_read_unlock();
1287 
1288 	idpf_vport_ctrl_unlock(netdev);
1289 }
1290 
1291 /**
1292  * idpf_find_rxq_vec - find rxq vector from q index
1293  * @vport: virtual port associated to queue
1294  * @q_num: q index used to find queue
1295  *
1296  * returns pointer to rx vector
1297  */
idpf_find_rxq_vec(const struct idpf_vport * vport,u32 q_num)1298 struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
1299 					u32 q_num)
1300 {
1301 	int q_grp, q_idx;
1302 
1303 	if (!idpf_is_queue_model_split(vport->rxq_model))
1304 		return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
1305 
1306 	q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1307 	q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1308 
1309 	return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
1310 }
1311 
1312 /**
1313  * idpf_find_txq_vec - find txq vector from q index
1314  * @vport: virtual port associated to queue
1315  * @q_num: q index used to find queue
1316  *
1317  * returns pointer to tx vector
1318  */
idpf_find_txq_vec(const struct idpf_vport * vport,u32 q_num)1319 struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
1320 					u32 q_num)
1321 {
1322 	int q_grp;
1323 
1324 	if (!idpf_is_queue_model_split(vport->txq_model))
1325 		return vport->txqs[q_num]->q_vector;
1326 
1327 	q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1328 
1329 	return vport->txq_grps[q_grp].complq->q_vector;
1330 }
1331 
1332 /**
1333  * __idpf_get_q_coalesce - get ITR values for specific queue
1334  * @ec: ethtool structure to fill with driver's coalesce settings
1335  * @q_vector: queue vector corresponding to this queue
1336  * @type: queue type
1337  */
__idpf_get_q_coalesce(struct ethtool_coalesce * ec,const struct idpf_q_vector * q_vector,enum virtchnl2_queue_type type)1338 static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
1339 				  const struct idpf_q_vector *q_vector,
1340 				  enum virtchnl2_queue_type type)
1341 {
1342 	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
1343 		ec->use_adaptive_rx_coalesce =
1344 				IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
1345 		ec->rx_coalesce_usecs = q_vector->rx_itr_value;
1346 	} else {
1347 		ec->use_adaptive_tx_coalesce =
1348 				IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
1349 		ec->tx_coalesce_usecs = q_vector->tx_itr_value;
1350 	}
1351 }
1352 
1353 /**
1354  * idpf_get_q_coalesce - get ITR values for specific queue
1355  * @netdev: pointer to the netdev associated with this query
1356  * @ec: coalesce settings to program the device with
1357  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1358  *
1359  * Return 0 on success, and negative on failure
1360  */
idpf_get_q_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,u32 q_num)1361 static int idpf_get_q_coalesce(struct net_device *netdev,
1362 			       struct ethtool_coalesce *ec,
1363 			       u32 q_num)
1364 {
1365 	const struct idpf_netdev_priv *np = netdev_priv(netdev);
1366 	const struct idpf_vport *vport;
1367 	int err = 0;
1368 
1369 	idpf_vport_ctrl_lock(netdev);
1370 	vport = idpf_netdev_to_vport(netdev);
1371 
1372 	if (!test_bit(IDPF_VPORT_UP, np->state))
1373 		goto unlock_mutex;
1374 
1375 	if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
1376 		err = -EINVAL;
1377 		goto unlock_mutex;
1378 	}
1379 
1380 	if (q_num < vport->num_rxq)
1381 		__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
1382 				      VIRTCHNL2_QUEUE_TYPE_RX);
1383 
1384 	if (q_num < vport->num_txq)
1385 		__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
1386 				      VIRTCHNL2_QUEUE_TYPE_TX);
1387 
1388 unlock_mutex:
1389 	idpf_vport_ctrl_unlock(netdev);
1390 
1391 	return err;
1392 }
1393 
1394 /**
1395  * idpf_get_coalesce - get ITR values as requested by user
1396  * @netdev: pointer to the netdev associated with this query
1397  * @ec: coalesce settings to be filled
1398  * @kec: unused
1399  * @extack: unused
1400  *
1401  * Return 0 on success, and negative on failure
1402  */
idpf_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kec,struct netlink_ext_ack * extack)1403 static int idpf_get_coalesce(struct net_device *netdev,
1404 			     struct ethtool_coalesce *ec,
1405 			     struct kernel_ethtool_coalesce *kec,
1406 			     struct netlink_ext_ack *extack)
1407 {
1408 	/* Return coalesce based on queue number zero */
1409 	return idpf_get_q_coalesce(netdev, ec, 0);
1410 }
1411 
1412 /**
1413  * idpf_get_per_q_coalesce - get ITR values as requested by user
1414  * @netdev: pointer to the netdev associated with this query
1415  * @q_num: queue for which the itr values has to retrieved
1416  * @ec: coalesce settings to be filled
1417  *
1418  * Return 0 on success, and negative on failure
1419  */
1420 
idpf_get_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)1421 static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
1422 				   struct ethtool_coalesce *ec)
1423 {
1424 	return idpf_get_q_coalesce(netdev, ec, q_num);
1425 }
1426 
1427 /**
1428  * __idpf_set_q_coalesce - set ITR values for specific queue
1429  * @ec: ethtool structure from user to update ITR settings
1430  * @q_coal: per queue coalesce settings
1431  * @qv: queue vector for which itr values has to be set
1432  * @is_rxq: is queue type rx
1433  *
1434  * Returns 0 on success, negative otherwise.
1435  */
__idpf_set_q_coalesce(const struct ethtool_coalesce * ec,struct idpf_q_coalesce * q_coal,struct idpf_q_vector * qv,bool is_rxq)1436 static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
1437 				 struct idpf_q_coalesce *q_coal,
1438 				 struct idpf_q_vector *qv, bool is_rxq)
1439 {
1440 	u32 use_adaptive_coalesce, coalesce_usecs;
1441 	bool is_dim_ena = false;
1442 	u16 itr_val;
1443 
1444 	if (is_rxq) {
1445 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
1446 		use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
1447 		coalesce_usecs = ec->rx_coalesce_usecs;
1448 		itr_val = qv->rx_itr_value;
1449 	} else {
1450 		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
1451 		use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
1452 		coalesce_usecs = ec->tx_coalesce_usecs;
1453 		itr_val = qv->tx_itr_value;
1454 	}
1455 	if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
1456 		netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
1457 
1458 		return -EINVAL;
1459 	}
1460 
1461 	if (is_dim_ena && use_adaptive_coalesce)
1462 		return 0;
1463 
1464 	if (coalesce_usecs > IDPF_ITR_MAX) {
1465 		netdev_err(qv->vport->netdev,
1466 			   "Invalid value, %d-usecs range is 0-%d\n",
1467 			   coalesce_usecs, IDPF_ITR_MAX);
1468 
1469 		return -EINVAL;
1470 	}
1471 
1472 	if (coalesce_usecs % 2) {
1473 		coalesce_usecs--;
1474 		netdev_info(qv->vport->netdev,
1475 			    "HW only supports even ITR values, ITR rounded to %d\n",
1476 			    coalesce_usecs);
1477 	}
1478 
1479 	if (is_rxq) {
1480 		qv->rx_itr_value = coalesce_usecs;
1481 		q_coal->rx_coalesce_usecs = coalesce_usecs;
1482 		if (use_adaptive_coalesce) {
1483 			qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
1484 			q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
1485 		} else {
1486 			qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1487 			q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1488 			idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
1489 		}
1490 	} else {
1491 		qv->tx_itr_value = coalesce_usecs;
1492 		q_coal->tx_coalesce_usecs = coalesce_usecs;
1493 		if (use_adaptive_coalesce) {
1494 			qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
1495 			q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
1496 		} else {
1497 			qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1498 			q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1499 			idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
1500 		}
1501 	}
1502 
1503 	/* Update of static/dynamic itr will be taken care when interrupt is
1504 	 * fired
1505 	 */
1506 	return 0;
1507 }
1508 
1509 /**
1510  * idpf_set_q_coalesce - set ITR values for specific queue
1511  * @vport: vport associated to the queue that need updating
1512  * @q_coal: per queue coalesce settings
1513  * @ec: coalesce settings to program the device with
1514  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1515  * @is_rxq: is queue type rx
1516  *
1517  * Return 0 on success, and negative on failure
1518  */
idpf_set_q_coalesce(const struct idpf_vport * vport,struct idpf_q_coalesce * q_coal,const struct ethtool_coalesce * ec,int q_num,bool is_rxq)1519 static int idpf_set_q_coalesce(const struct idpf_vport *vport,
1520 			       struct idpf_q_coalesce *q_coal,
1521 			       const struct ethtool_coalesce *ec,
1522 			       int q_num, bool is_rxq)
1523 {
1524 	struct idpf_q_vector *qv;
1525 
1526 	qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
1527 		      idpf_find_txq_vec(vport, q_num);
1528 
1529 	if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
1530 		return -EINVAL;
1531 
1532 	return 0;
1533 }
1534 
1535 /**
1536  * idpf_set_coalesce - set ITR values as requested by user
1537  * @netdev: pointer to the netdev associated with this query
1538  * @ec: coalesce settings to program the device with
1539  * @kec: unused
1540  * @extack: unused
1541  *
1542  * Return 0 on success, and negative on failure
1543  */
idpf_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kec,struct netlink_ext_ack * extack)1544 static int idpf_set_coalesce(struct net_device *netdev,
1545 			     struct ethtool_coalesce *ec,
1546 			     struct kernel_ethtool_coalesce *kec,
1547 			     struct netlink_ext_ack *extack)
1548 {
1549 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1550 	struct idpf_vport_user_config_data *user_config;
1551 	struct idpf_q_coalesce *q_coal;
1552 	struct idpf_vport *vport;
1553 	int i, err = 0;
1554 
1555 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
1556 
1557 	idpf_vport_ctrl_lock(netdev);
1558 	vport = idpf_netdev_to_vport(netdev);
1559 
1560 	if (!test_bit(IDPF_VPORT_UP, np->state))
1561 		goto unlock_mutex;
1562 
1563 	for (i = 0; i < vport->num_txq; i++) {
1564 		q_coal = &user_config->q_coalesce[i];
1565 		err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
1566 		if (err)
1567 			goto unlock_mutex;
1568 	}
1569 
1570 	for (i = 0; i < vport->num_rxq; i++) {
1571 		q_coal = &user_config->q_coalesce[i];
1572 		err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
1573 		if (err)
1574 			goto unlock_mutex;
1575 	}
1576 
1577 unlock_mutex:
1578 	idpf_vport_ctrl_unlock(netdev);
1579 
1580 	return err;
1581 }
1582 
1583 /**
1584  * idpf_set_per_q_coalesce - set ITR values as requested by user
1585  * @netdev: pointer to the netdev associated with this query
1586  * @q_num: queue for which the itr values has to be set
1587  * @ec: coalesce settings to program the device with
1588  *
1589  * Return 0 on success, and negative on failure
1590  */
idpf_set_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)1591 static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
1592 				   struct ethtool_coalesce *ec)
1593 {
1594 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1595 	struct idpf_vport_user_config_data *user_config;
1596 	struct idpf_q_coalesce *q_coal;
1597 	struct idpf_vport *vport;
1598 	int err;
1599 
1600 	idpf_vport_ctrl_lock(netdev);
1601 	vport = idpf_netdev_to_vport(netdev);
1602 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
1603 	q_coal = &user_config->q_coalesce[q_num];
1604 
1605 	err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
1606 	if (err) {
1607 		idpf_vport_ctrl_unlock(netdev);
1608 
1609 		return err;
1610 	}
1611 
1612 	err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
1613 
1614 	idpf_vport_ctrl_unlock(netdev);
1615 
1616 	return err;
1617 }
1618 
1619 /**
1620  * idpf_get_msglevel - Get debug message level
1621  * @netdev: network interface device structure
1622  *
1623  * Returns current debug message level.
1624  */
idpf_get_msglevel(struct net_device * netdev)1625 static u32 idpf_get_msglevel(struct net_device *netdev)
1626 {
1627 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1628 
1629 	return adapter->msg_enable;
1630 }
1631 
1632 /**
1633  * idpf_set_msglevel - Set debug message level
1634  * @netdev: network interface device structure
1635  * @data: message level
1636  *
1637  * Set current debug message level. Higher values cause the driver to
1638  * be noisier.
1639  */
idpf_set_msglevel(struct net_device * netdev,u32 data)1640 static void idpf_set_msglevel(struct net_device *netdev, u32 data)
1641 {
1642 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1643 
1644 	adapter->msg_enable = data;
1645 }
1646 
1647 /**
1648  * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1649  * @netdev: network interface device structure
1650  * @cmd: ethtool command
1651  *
1652  * Reports speed/duplex settings.
1653  **/
idpf_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)1654 static int idpf_get_link_ksettings(struct net_device *netdev,
1655 				   struct ethtool_link_ksettings *cmd)
1656 {
1657 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1658 
1659 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
1660 	cmd->base.autoneg = AUTONEG_DISABLE;
1661 	cmd->base.port = PORT_NONE;
1662 	if (netif_carrier_ok(netdev)) {
1663 		cmd->base.duplex = DUPLEX_FULL;
1664 		cmd->base.speed = np->link_speed_mbps;
1665 	} else {
1666 		cmd->base.duplex = DUPLEX_UNKNOWN;
1667 		cmd->base.speed = SPEED_UNKNOWN;
1668 	}
1669 
1670 	return 0;
1671 }
1672 
1673 /**
1674  * idpf_get_timestamp_filters - Get the supported timestamping mode
1675  * @vport: Virtual port structure
1676  * @info: ethtool timestamping info structure
1677  *
1678  * Get the Tx/Rx timestamp filters.
1679  */
idpf_get_timestamp_filters(const struct idpf_vport * vport,struct kernel_ethtool_ts_info * info)1680 static void idpf_get_timestamp_filters(const struct idpf_vport *vport,
1681 				       struct kernel_ethtool_ts_info *info)
1682 {
1683 	info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
1684 				SOF_TIMESTAMPING_RAW_HARDWARE;
1685 
1686 	info->tx_types = BIT(HWTSTAMP_TX_OFF);
1687 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1688 
1689 	if (!vport->tx_tstamp_caps ||
1690 	    vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE)
1691 		return;
1692 
1693 	info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
1694 				 SOF_TIMESTAMPING_TX_HARDWARE;
1695 
1696 	info->tx_types |= BIT(HWTSTAMP_TX_ON);
1697 }
1698 
1699 /**
1700  * idpf_get_ts_info - Get device PHC association
1701  * @netdev: network interface device structure
1702  * @info: ethtool timestamping info structure
1703  *
1704  * Return: 0 on success, -errno otherwise.
1705  */
idpf_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * info)1706 static int idpf_get_ts_info(struct net_device *netdev,
1707 			    struct kernel_ethtool_ts_info *info)
1708 {
1709 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1710 	struct idpf_vport *vport;
1711 	int err = 0;
1712 
1713 	if (!mutex_trylock(&np->adapter->vport_ctrl_lock))
1714 		return -EBUSY;
1715 
1716 	vport = idpf_netdev_to_vport(netdev);
1717 
1718 	if (!vport->adapter->ptp) {
1719 		err = -EOPNOTSUPP;
1720 		goto unlock;
1721 	}
1722 
1723 	if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) &&
1724 	    vport->adapter->ptp->clock) {
1725 		info->phc_index = ptp_clock_index(vport->adapter->ptp->clock);
1726 		idpf_get_timestamp_filters(vport, info);
1727 	} else {
1728 		pci_dbg(vport->adapter->pdev, "PTP clock not detected\n");
1729 		err = ethtool_op_get_ts_info(netdev, info);
1730 	}
1731 
1732 unlock:
1733 	mutex_unlock(&np->adapter->vport_ctrl_lock);
1734 
1735 	return err;
1736 }
1737 
1738 /**
1739  * idpf_get_ts_stats - Collect HW tstamping statistics
1740  * @netdev: network interface device structure
1741  * @ts_stats: HW timestamping stats structure
1742  *
1743  * Collect HW timestamping statistics including successfully timestamped
1744  * packets, discarded due to illegal values, flushed during releasing PTP and
1745  * skipped due to lack of the free index.
1746  */
idpf_get_ts_stats(struct net_device * netdev,struct ethtool_ts_stats * ts_stats)1747 static void idpf_get_ts_stats(struct net_device *netdev,
1748 			      struct ethtool_ts_stats *ts_stats)
1749 {
1750 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1751 	struct idpf_vport *vport;
1752 	unsigned int start;
1753 
1754 	idpf_vport_ctrl_lock(netdev);
1755 	vport = idpf_netdev_to_vport(netdev);
1756 	do {
1757 		start = u64_stats_fetch_begin(&vport->tstamp_stats.stats_sync);
1758 		ts_stats->pkts = u64_stats_read(&vport->tstamp_stats.packets);
1759 		ts_stats->lost = u64_stats_read(&vport->tstamp_stats.flushed);
1760 		ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded);
1761 	} while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start));
1762 
1763 	if (!test_bit(IDPF_VPORT_UP, np->state))
1764 		goto exit;
1765 
1766 	for (u16 i = 0; i < vport->num_txq_grp; i++) {
1767 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
1768 
1769 		for (u16 j = 0; j < txq_grp->num_txq; j++) {
1770 			struct idpf_tx_queue *txq = txq_grp->txqs[j];
1771 			struct idpf_tx_queue_stats *stats;
1772 			u64 ts;
1773 
1774 			if (!txq)
1775 				continue;
1776 
1777 			stats = &txq->q_stats;
1778 			do {
1779 				start = u64_stats_fetch_begin(&txq->stats_sync);
1780 
1781 				ts = u64_stats_read(&stats->tstamp_skipped);
1782 			} while (u64_stats_fetch_retry(&txq->stats_sync,
1783 						       start));
1784 
1785 			ts_stats->lost += ts;
1786 		}
1787 	}
1788 
1789 exit:
1790 	idpf_vport_ctrl_unlock(netdev);
1791 }
1792 
1793 static const struct ethtool_ops idpf_ethtool_ops = {
1794 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1795 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1796 	.supported_ring_params	= ETHTOOL_RING_USE_TCP_DATA_SPLIT,
1797 	.get_msglevel		= idpf_get_msglevel,
1798 	.set_msglevel		= idpf_set_msglevel,
1799 	.get_link		= ethtool_op_get_link,
1800 	.get_coalesce		= idpf_get_coalesce,
1801 	.set_coalesce		= idpf_set_coalesce,
1802 	.get_per_queue_coalesce = idpf_get_per_q_coalesce,
1803 	.set_per_queue_coalesce = idpf_set_per_q_coalesce,
1804 	.get_ethtool_stats	= idpf_get_ethtool_stats,
1805 	.get_strings		= idpf_get_strings,
1806 	.get_sset_count		= idpf_get_sset_count,
1807 	.get_channels		= idpf_get_channels,
1808 	.get_rxnfc		= idpf_get_rxnfc,
1809 	.set_rxnfc		= idpf_set_rxnfc,
1810 	.get_rx_ring_count	= idpf_get_rx_ring_count,
1811 	.get_rxfh_key_size	= idpf_get_rxfh_key_size,
1812 	.get_rxfh_indir_size	= idpf_get_rxfh_indir_size,
1813 	.get_rxfh		= idpf_get_rxfh,
1814 	.set_rxfh		= idpf_set_rxfh,
1815 	.set_channels		= idpf_set_channels,
1816 	.get_ringparam		= idpf_get_ringparam,
1817 	.set_ringparam		= idpf_set_ringparam,
1818 	.get_link_ksettings	= idpf_get_link_ksettings,
1819 	.get_ts_info		= idpf_get_ts_info,
1820 	.get_ts_stats		= idpf_get_ts_stats,
1821 };
1822 
1823 /**
1824  * idpf_set_ethtool_ops - Initialize ethtool ops struct
1825  * @netdev: network interface device structure
1826  *
1827  * Sets ethtool ops struct in our netdev so that ethtool can call
1828  * our functions.
1829  */
idpf_set_ethtool_ops(struct net_device * netdev)1830 void idpf_set_ethtool_ops(struct net_device *netdev)
1831 {
1832 	netdev->ethtool_ops = &idpf_ethtool_ops;
1833 }
1834