xref: /linux/drivers/net/ethernet/intel/ice/ice_repr.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_eswitch.h"
6 #include "devlink/devlink.h"
7 #include "devlink/devlink_port.h"
8 #include "ice_sriov.h"
9 #include "ice_tc_lib.h"
10 #include "ice_dcb_lib.h"
11 
12 /**
13  * ice_repr_inc_tx_stats - increment Tx statistic by one packet
14  * @repr: repr to increment stats on
15  * @len: length of the packet
16  * @xmit_status: value returned by xmit function
17  */
ice_repr_inc_tx_stats(struct ice_repr * repr,unsigned int len,int xmit_status)18 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
19 			   int xmit_status)
20 {
21 	struct ice_repr_pcpu_stats *stats;
22 
23 	if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
24 		     xmit_status != NET_XMIT_CN)) {
25 		this_cpu_inc(repr->stats->tx_drops);
26 		return;
27 	}
28 
29 	stats = this_cpu_ptr(repr->stats);
30 	u64_stats_update_begin(&stats->syncp);
31 	stats->tx_packets++;
32 	stats->tx_bytes += len;
33 	u64_stats_update_end(&stats->syncp);
34 }
35 
36 /**
37  * ice_repr_inc_rx_stats - increment Rx statistic by one packet
38  * @netdev: repr netdev to increment stats on
39  * @len: length of the packet
40  */
ice_repr_inc_rx_stats(struct net_device * netdev,unsigned int len)41 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
42 {
43 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
44 	struct ice_repr_pcpu_stats *stats;
45 
46 	stats = this_cpu_ptr(repr->stats);
47 	u64_stats_update_begin(&stats->syncp);
48 	stats->rx_packets++;
49 	stats->rx_bytes += len;
50 	u64_stats_update_end(&stats->syncp);
51 }
52 
53 /**
54  * ice_repr_get_stats64 - get VF stats for VFPR use
55  * @netdev: pointer to port representor netdev
56  * @stats: pointer to struct where stats can be stored
57  */
58 static void
ice_repr_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)59 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
60 {
61 	struct ice_netdev_priv *np = netdev_priv(netdev);
62 	struct ice_repr *repr = np->repr;
63 	struct ice_eth_stats *eth_stats;
64 	struct ice_vsi *vsi;
65 
66 	if (repr->ops.ready(repr))
67 		return;
68 	vsi = repr->src_vsi;
69 
70 	ice_update_vsi_stats(vsi);
71 	eth_stats = &vsi->eth_stats;
72 
73 	stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
74 			    eth_stats->tx_multicast;
75 	stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
76 			    eth_stats->rx_multicast;
77 	stats->tx_bytes = eth_stats->tx_bytes;
78 	stats->rx_bytes = eth_stats->rx_bytes;
79 	stats->multicast = eth_stats->rx_multicast;
80 	stats->tx_errors = eth_stats->tx_errors;
81 	stats->tx_dropped = eth_stats->tx_discards;
82 	stats->rx_dropped = eth_stats->rx_discards;
83 }
84 
85 /**
86  * ice_netdev_to_repr - Get port representor for given netdevice
87  * @netdev: pointer to port representor netdev
88  */
ice_netdev_to_repr(const struct net_device * netdev)89 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
90 {
91 	struct ice_netdev_priv *np = netdev_priv(netdev);
92 
93 	return np->repr;
94 }
95 
96 /**
97  * ice_repr_vf_open - Enable port representor's network interface
98  * @netdev: network interface device structure
99  *
100  * The open entry point is called when a port representor's network
101  * interface is made active by the system (IFF_UP). Corresponding
102  * VF is notified about link status change.
103  *
104  * Returns 0 on success
105  */
ice_repr_vf_open(struct net_device * netdev)106 static int ice_repr_vf_open(struct net_device *netdev)
107 {
108 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
109 	struct ice_vf *vf;
110 
111 	vf = repr->vf;
112 	vf->link_forced = true;
113 	vf->link_up = true;
114 	ice_vc_notify_vf_link_state(vf);
115 
116 	netif_carrier_on(netdev);
117 	netif_tx_start_all_queues(netdev);
118 
119 	return 0;
120 }
121 
ice_repr_sf_open(struct net_device * netdev)122 static int ice_repr_sf_open(struct net_device *netdev)
123 {
124 	netif_carrier_on(netdev);
125 	netif_tx_start_all_queues(netdev);
126 
127 	return 0;
128 }
129 
130 /**
131  * ice_repr_vf_stop - Disable port representor's network interface
132  * @netdev: network interface device structure
133  *
134  * The stop entry point is called when a port representor's network
135  * interface is de-activated by the system. Corresponding
136  * VF is notified about link status change.
137  *
138  * Returns 0 on success
139  */
ice_repr_vf_stop(struct net_device * netdev)140 static int ice_repr_vf_stop(struct net_device *netdev)
141 {
142 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
143 	struct ice_vf *vf;
144 
145 	vf = repr->vf;
146 	vf->link_forced = true;
147 	vf->link_up = false;
148 	ice_vc_notify_vf_link_state(vf);
149 
150 	netif_carrier_off(netdev);
151 	netif_tx_stop_all_queues(netdev);
152 
153 	return 0;
154 }
155 
ice_repr_sf_stop(struct net_device * netdev)156 static int ice_repr_sf_stop(struct net_device *netdev)
157 {
158 	netif_carrier_off(netdev);
159 	netif_tx_stop_all_queues(netdev);
160 
161 	return 0;
162 }
163 
164 /**
165  * ice_repr_sp_stats64 - get slow path stats for port representor
166  * @dev: network interface device structure
167  * @stats: netlink stats structure
168  */
169 static int
ice_repr_sp_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)170 ice_repr_sp_stats64(const struct net_device *dev,
171 		    struct rtnl_link_stats64 *stats)
172 {
173 	struct ice_repr *repr = ice_netdev_to_repr(dev);
174 	int i;
175 
176 	for_each_possible_cpu(i) {
177 		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
178 		struct ice_repr_pcpu_stats *repr_stats;
179 		unsigned int start;
180 
181 		repr_stats = per_cpu_ptr(repr->stats, i);
182 		do {
183 			start = u64_stats_fetch_begin(&repr_stats->syncp);
184 			tbytes = repr_stats->tx_bytes;
185 			tpkts = repr_stats->tx_packets;
186 			tdrops = repr_stats->tx_drops;
187 			rbytes = repr_stats->rx_bytes;
188 			rpkts = repr_stats->rx_packets;
189 		} while (u64_stats_fetch_retry(&repr_stats->syncp, start));
190 
191 		stats->tx_bytes += tbytes;
192 		stats->tx_packets += tpkts;
193 		stats->tx_dropped += tdrops;
194 		stats->rx_bytes += rbytes;
195 		stats->rx_packets += rpkts;
196 	}
197 	return 0;
198 }
199 
200 static bool
ice_repr_ndo_has_offload_stats(const struct net_device * dev,int attr_id)201 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
202 {
203 	return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
204 }
205 
206 static int
ice_repr_ndo_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)207 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
208 			       void *sp)
209 {
210 	if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
211 		return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
212 
213 	return -EINVAL;
214 }
215 
216 static int
ice_repr_setup_tc_cls_flower(struct ice_repr * repr,struct flow_cls_offload * flower)217 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
218 			     struct flow_cls_offload *flower)
219 {
220 	switch (flower->command) {
221 	case FLOW_CLS_REPLACE:
222 		return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
223 	case FLOW_CLS_DESTROY:
224 		return ice_del_cls_flower(repr->src_vsi, flower);
225 	default:
226 		return -EINVAL;
227 	}
228 }
229 
230 static int
ice_repr_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)231 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
232 			   void *cb_priv)
233 {
234 	struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
235 	struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
236 
237 	switch (type) {
238 	case TC_SETUP_CLSFLOWER:
239 		return ice_repr_setup_tc_cls_flower(np->repr, flower);
240 	default:
241 		return -EOPNOTSUPP;
242 	}
243 }
244 
245 static LIST_HEAD(ice_repr_block_cb_list);
246 
247 static int
ice_repr_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)248 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
249 		  void *type_data)
250 {
251 	struct ice_netdev_priv *np = netdev_priv(netdev);
252 
253 	switch (type) {
254 	case TC_SETUP_BLOCK:
255 		return flow_block_cb_setup_simple((struct flow_block_offload *)
256 						  type_data,
257 						  &ice_repr_block_cb_list,
258 						  ice_repr_setup_tc_block_cb,
259 						  np, np, true);
260 	default:
261 		return -EOPNOTSUPP;
262 	}
263 }
264 
265 static const struct net_device_ops ice_repr_vf_netdev_ops = {
266 	.ndo_get_stats64 = ice_repr_get_stats64,
267 	.ndo_open = ice_repr_vf_open,
268 	.ndo_stop = ice_repr_vf_stop,
269 	.ndo_start_xmit = ice_eswitch_port_start_xmit,
270 	.ndo_setup_tc = ice_repr_setup_tc,
271 	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
272 	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
273 };
274 
275 static const struct net_device_ops ice_repr_sf_netdev_ops = {
276 	.ndo_get_stats64 = ice_repr_get_stats64,
277 	.ndo_open = ice_repr_sf_open,
278 	.ndo_stop = ice_repr_sf_stop,
279 	.ndo_start_xmit = ice_eswitch_port_start_xmit,
280 	.ndo_setup_tc = ice_repr_setup_tc,
281 	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
282 	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
283 };
284 
285 /**
286  * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
287  * @netdev: pointer to netdev
288  */
ice_is_port_repr_netdev(const struct net_device * netdev)289 bool ice_is_port_repr_netdev(const struct net_device *netdev)
290 {
291 	return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
292 			  netdev->netdev_ops == &ice_repr_sf_netdev_ops);
293 }
294 
295 /**
296  * ice_repr_reg_netdev - register port representor netdev
297  * @netdev: pointer to port representor netdev
298  * @ops: new ops for netdev
299  */
300 static int
ice_repr_reg_netdev(struct net_device * netdev,const struct net_device_ops * ops)301 ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
302 {
303 	eth_hw_addr_random(netdev);
304 	netdev->netdev_ops = ops;
305 	ice_set_ethtool_repr_ops(netdev);
306 
307 	netdev->hw_features |= NETIF_F_HW_TC;
308 
309 	netif_carrier_off(netdev);
310 	netif_tx_stop_all_queues(netdev);
311 
312 	return register_netdev(netdev);
313 }
314 
ice_repr_ready_vf(struct ice_repr * repr)315 static int ice_repr_ready_vf(struct ice_repr *repr)
316 {
317 	return !ice_check_vf_ready_for_cfg(repr->vf);
318 }
319 
ice_repr_ready_sf(struct ice_repr * repr)320 static int ice_repr_ready_sf(struct ice_repr *repr)
321 {
322 	return !repr->sf->active;
323 }
324 
325 /**
326  * ice_repr_destroy - remove representor from VF
327  * @repr: pointer to representor structure
328  */
ice_repr_destroy(struct ice_repr * repr)329 void ice_repr_destroy(struct ice_repr *repr)
330 {
331 	free_percpu(repr->stats);
332 	free_netdev(repr->netdev);
333 	kfree(repr);
334 }
335 
ice_repr_rem_vf(struct ice_repr * repr)336 static void ice_repr_rem_vf(struct ice_repr *repr)
337 {
338 	ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
339 	unregister_netdev(repr->netdev);
340 	ice_devlink_destroy_vf_port(repr->vf);
341 	ice_virtchnl_set_dflt_ops(repr->vf);
342 }
343 
ice_repr_rem_sf(struct ice_repr * repr)344 static void ice_repr_rem_sf(struct ice_repr *repr)
345 {
346 	unregister_netdev(repr->netdev);
347 	ice_devlink_destroy_sf_port(repr->sf);
348 }
349 
ice_repr_set_tx_topology(struct ice_pf * pf,struct devlink * devlink)350 static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
351 {
352 	/* only export if ADQ and DCB disabled and eswitch enabled*/
353 	if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
354 	    !ice_is_switchdev_running(pf))
355 		return;
356 
357 	ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
358 }
359 
360 /**
361  * ice_repr_create - add representor for generic VSI
362  * @src_vsi: pointer to VSI structure of device to represent
363  */
ice_repr_create(struct ice_vsi * src_vsi)364 static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
365 {
366 	struct ice_netdev_priv *np;
367 	struct ice_repr *repr;
368 	int err;
369 
370 	repr = kzalloc(sizeof(*repr), GFP_KERNEL);
371 	if (!repr)
372 		return ERR_PTR(-ENOMEM);
373 
374 	repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
375 	if (!repr->netdev) {
376 		err =  -ENOMEM;
377 		goto err_alloc;
378 	}
379 
380 	repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
381 	if (!repr->stats) {
382 		err = -ENOMEM;
383 		goto err_stats;
384 	}
385 
386 	repr->src_vsi = src_vsi;
387 	repr->id = src_vsi->vsi_num;
388 	np = netdev_priv(repr->netdev);
389 	np->repr = repr;
390 
391 	repr->netdev->min_mtu = ETH_MIN_MTU;
392 	repr->netdev->max_mtu = ICE_MAX_MTU;
393 
394 	SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
395 
396 	return repr;
397 
398 err_stats:
399 	free_netdev(repr->netdev);
400 err_alloc:
401 	kfree(repr);
402 	return ERR_PTR(err);
403 }
404 
ice_repr_add_vf(struct ice_repr * repr)405 static int ice_repr_add_vf(struct ice_repr *repr)
406 {
407 	struct ice_vf *vf = repr->vf;
408 	struct devlink *devlink;
409 	int err;
410 
411 	err = ice_devlink_create_vf_port(vf);
412 	if (err)
413 		return err;
414 
415 	SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
416 	err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
417 	if (err)
418 		goto err_netdev;
419 
420 	err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
421 	if (err)
422 		goto err_cfg_vsi;
423 
424 	ice_virtchnl_set_repr_ops(vf);
425 
426 	devlink = priv_to_devlink(vf->pf);
427 	ice_repr_set_tx_topology(vf->pf, devlink);
428 
429 	return 0;
430 
431 err_cfg_vsi:
432 	unregister_netdev(repr->netdev);
433 err_netdev:
434 	ice_devlink_destroy_vf_port(vf);
435 	return err;
436 }
437 
438 /**
439  * ice_repr_create_vf - add representor for VF VSI
440  * @vf: VF to create port representor on
441  *
442  * Set correct representor type for VF and functions pointer.
443  *
444  * Return: created port representor on success, error otherwise
445  */
ice_repr_create_vf(struct ice_vf * vf)446 struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
447 {
448 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
449 	struct ice_repr *repr;
450 
451 	if (!vsi)
452 		return ERR_PTR(-EINVAL);
453 
454 	repr = ice_repr_create(vsi);
455 	if (IS_ERR(repr))
456 		return repr;
457 
458 	repr->type = ICE_REPR_TYPE_VF;
459 	repr->vf = vf;
460 	repr->ops.add = ice_repr_add_vf;
461 	repr->ops.rem = ice_repr_rem_vf;
462 	repr->ops.ready = ice_repr_ready_vf;
463 
464 	ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
465 
466 	return repr;
467 }
468 
ice_repr_add_sf(struct ice_repr * repr)469 static int ice_repr_add_sf(struct ice_repr *repr)
470 {
471 	struct ice_dynamic_port *sf = repr->sf;
472 	int err;
473 
474 	err = ice_devlink_create_sf_port(sf);
475 	if (err)
476 		return err;
477 
478 	SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
479 	err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
480 	if (err)
481 		goto err_netdev;
482 
483 	ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
484 
485 	return 0;
486 
487 err_netdev:
488 	ice_devlink_destroy_sf_port(sf);
489 	return err;
490 }
491 
492 /**
493  * ice_repr_create_sf - add representor for SF VSI
494  * @sf: SF to create port representor on
495  *
496  * Set correct representor type for SF and functions pointer.
497  *
498  * Return: created port representor on success, error otherwise
499  */
ice_repr_create_sf(struct ice_dynamic_port * sf)500 struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
501 {
502 	struct ice_repr *repr = ice_repr_create(sf->vsi);
503 
504 	if (IS_ERR(repr))
505 		return repr;
506 
507 	repr->type = ICE_REPR_TYPE_SF;
508 	repr->sf = sf;
509 	repr->ops.add = ice_repr_add_sf;
510 	repr->ops.rem = ice_repr_rem_sf;
511 	repr->ops.ready = ice_repr_ready_sf;
512 
513 	ether_addr_copy(repr->parent_mac, sf->hw_addr);
514 
515 	return repr;
516 }
517 
ice_repr_get(struct ice_pf * pf,u32 id)518 struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
519 {
520 	return xa_load(&pf->eswitch.reprs, id);
521 }
522 
523 /**
524  * ice_repr_start_tx_queues - start Tx queues of port representor
525  * @repr: pointer to repr structure
526  */
ice_repr_start_tx_queues(struct ice_repr * repr)527 void ice_repr_start_tx_queues(struct ice_repr *repr)
528 {
529 	netif_carrier_on(repr->netdev);
530 	netif_tx_start_all_queues(repr->netdev);
531 }
532 
533 /**
534  * ice_repr_stop_tx_queues - stop Tx queues of port representor
535  * @repr: pointer to repr structure
536  */
ice_repr_stop_tx_queues(struct ice_repr * repr)537 void ice_repr_stop_tx_queues(struct ice_repr *repr)
538 {
539 	netif_carrier_off(repr->netdev);
540 	netif_tx_stop_all_queues(repr->netdev);
541 }
542