xref: /linux/drivers/net/ethernet/intel/ice/ice_repr.c (revision 977514fb0fa84ca199262b1a9c1d6f3d8175e775)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_eswitch.h"
6 #include "devlink/devlink.h"
7 #include "devlink/devlink_port.h"
8 #include "ice_sriov.h"
9 #include "ice_tc_lib.h"
10 #include "ice_dcb_lib.h"
11 
12 /**
13  * ice_repr_inc_tx_stats - increment Tx statistic by one packet
14  * @repr: repr to increment stats on
15  * @len: length of the packet
16  * @xmit_status: value returned by xmit function
17  */
18 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
19 			   int xmit_status)
20 {
21 	struct ice_repr_pcpu_stats *stats;
22 
23 	if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
24 		     xmit_status != NET_XMIT_CN)) {
25 		this_cpu_inc(repr->stats->tx_drops);
26 		return;
27 	}
28 
29 	stats = this_cpu_ptr(repr->stats);
30 	u64_stats_update_begin(&stats->syncp);
31 	stats->tx_packets++;
32 	stats->tx_bytes += len;
33 	u64_stats_update_end(&stats->syncp);
34 }
35 
36 /**
37  * ice_repr_inc_rx_stats - increment Rx statistic by one packet
38  * @netdev: repr netdev to increment stats on
39  * @len: length of the packet
40  */
41 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
42 {
43 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
44 	struct ice_repr_pcpu_stats *stats;
45 
46 	stats = this_cpu_ptr(repr->stats);
47 	u64_stats_update_begin(&stats->syncp);
48 	stats->rx_packets++;
49 	stats->rx_bytes += len;
50 	u64_stats_update_end(&stats->syncp);
51 }
52 
53 /**
54  * ice_repr_get_stats64 - get VF stats for VFPR use
55  * @netdev: pointer to port representor netdev
56  * @stats: pointer to struct where stats can be stored
57  */
58 static void
59 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
60 {
61 	struct ice_netdev_priv *np = netdev_priv(netdev);
62 	struct ice_eth_stats *eth_stats;
63 	struct ice_vsi *vsi;
64 
65 	if (ice_is_vf_disabled(np->repr->vf))
66 		return;
67 	vsi = np->repr->src_vsi;
68 
69 	ice_update_vsi_stats(vsi);
70 	eth_stats = &vsi->eth_stats;
71 
72 	stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
73 			    eth_stats->tx_multicast;
74 	stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
75 			    eth_stats->rx_multicast;
76 	stats->tx_bytes = eth_stats->tx_bytes;
77 	stats->rx_bytes = eth_stats->rx_bytes;
78 	stats->multicast = eth_stats->rx_multicast;
79 	stats->tx_errors = eth_stats->tx_errors;
80 	stats->tx_dropped = eth_stats->tx_discards;
81 	stats->rx_dropped = eth_stats->rx_discards;
82 }
83 
84 /**
85  * ice_netdev_to_repr - Get port representor for given netdevice
86  * @netdev: pointer to port representor netdev
87  */
88 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
89 {
90 	struct ice_netdev_priv *np = netdev_priv(netdev);
91 
92 	return np->repr;
93 }
94 
95 /**
96  * ice_repr_open - Enable port representor's network interface
97  * @netdev: network interface device structure
98  *
99  * The open entry point is called when a port representor's network
100  * interface is made active by the system (IFF_UP). Corresponding
101  * VF is notified about link status change.
102  *
103  * Returns 0 on success
104  */
105 static int ice_repr_open(struct net_device *netdev)
106 {
107 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
108 	struct ice_vf *vf;
109 
110 	vf = repr->vf;
111 	vf->link_forced = true;
112 	vf->link_up = true;
113 	ice_vc_notify_vf_link_state(vf);
114 
115 	netif_carrier_on(netdev);
116 	netif_tx_start_all_queues(netdev);
117 
118 	return 0;
119 }
120 
121 /**
122  * ice_repr_stop - Disable port representor's network interface
123  * @netdev: network interface device structure
124  *
125  * The stop entry point is called when a port representor's network
126  * interface is de-activated by the system. Corresponding
127  * VF is notified about link status change.
128  *
129  * Returns 0 on success
130  */
131 static int ice_repr_stop(struct net_device *netdev)
132 {
133 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
134 	struct ice_vf *vf;
135 
136 	vf = repr->vf;
137 	vf->link_forced = true;
138 	vf->link_up = false;
139 	ice_vc_notify_vf_link_state(vf);
140 
141 	netif_carrier_off(netdev);
142 	netif_tx_stop_all_queues(netdev);
143 
144 	return 0;
145 }
146 
147 /**
148  * ice_repr_sp_stats64 - get slow path stats for port representor
149  * @dev: network interface device structure
150  * @stats: netlink stats structure
151  */
152 static int
153 ice_repr_sp_stats64(const struct net_device *dev,
154 		    struct rtnl_link_stats64 *stats)
155 {
156 	struct ice_repr *repr = ice_netdev_to_repr(dev);
157 	int i;
158 
159 	for_each_possible_cpu(i) {
160 		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
161 		struct ice_repr_pcpu_stats *repr_stats;
162 		unsigned int start;
163 
164 		repr_stats = per_cpu_ptr(repr->stats, i);
165 		do {
166 			start = u64_stats_fetch_begin(&repr_stats->syncp);
167 			tbytes = repr_stats->tx_bytes;
168 			tpkts = repr_stats->tx_packets;
169 			tdrops = repr_stats->tx_drops;
170 			rbytes = repr_stats->rx_bytes;
171 			rpkts = repr_stats->rx_packets;
172 		} while (u64_stats_fetch_retry(&repr_stats->syncp, start));
173 
174 		stats->tx_bytes += tbytes;
175 		stats->tx_packets += tpkts;
176 		stats->tx_dropped += tdrops;
177 		stats->rx_bytes += rbytes;
178 		stats->rx_packets += rpkts;
179 	}
180 	return 0;
181 }
182 
183 static bool
184 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
185 {
186 	return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
187 }
188 
189 static int
190 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
191 			       void *sp)
192 {
193 	if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
194 		return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
195 
196 	return -EINVAL;
197 }
198 
199 static int
200 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
201 			     struct flow_cls_offload *flower)
202 {
203 	switch (flower->command) {
204 	case FLOW_CLS_REPLACE:
205 		return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
206 	case FLOW_CLS_DESTROY:
207 		return ice_del_cls_flower(repr->src_vsi, flower);
208 	default:
209 		return -EINVAL;
210 	}
211 }
212 
213 static int
214 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
215 			   void *cb_priv)
216 {
217 	struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
218 	struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
219 
220 	switch (type) {
221 	case TC_SETUP_CLSFLOWER:
222 		return ice_repr_setup_tc_cls_flower(np->repr, flower);
223 	default:
224 		return -EOPNOTSUPP;
225 	}
226 }
227 
228 static LIST_HEAD(ice_repr_block_cb_list);
229 
230 static int
231 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
232 		  void *type_data)
233 {
234 	struct ice_netdev_priv *np = netdev_priv(netdev);
235 
236 	switch (type) {
237 	case TC_SETUP_BLOCK:
238 		return flow_block_cb_setup_simple((struct flow_block_offload *)
239 						  type_data,
240 						  &ice_repr_block_cb_list,
241 						  ice_repr_setup_tc_block_cb,
242 						  np, np, true);
243 	default:
244 		return -EOPNOTSUPP;
245 	}
246 }
247 
248 static const struct net_device_ops ice_repr_netdev_ops = {
249 	.ndo_get_stats64 = ice_repr_get_stats64,
250 	.ndo_open = ice_repr_open,
251 	.ndo_stop = ice_repr_stop,
252 	.ndo_start_xmit = ice_eswitch_port_start_xmit,
253 	.ndo_setup_tc = ice_repr_setup_tc,
254 	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
255 	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
256 };
257 
258 /**
259  * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
260  * @netdev: pointer to netdev
261  */
262 bool ice_is_port_repr_netdev(const struct net_device *netdev)
263 {
264 	return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
265 }
266 
267 /**
268  * ice_repr_reg_netdev - register port representor netdev
269  * @netdev: pointer to port representor netdev
270  */
271 static int
272 ice_repr_reg_netdev(struct net_device *netdev)
273 {
274 	eth_hw_addr_random(netdev);
275 	netdev->netdev_ops = &ice_repr_netdev_ops;
276 	ice_set_ethtool_repr_ops(netdev);
277 
278 	netdev->hw_features |= NETIF_F_HW_TC;
279 
280 	netif_carrier_off(netdev);
281 	netif_tx_stop_all_queues(netdev);
282 
283 	return register_netdev(netdev);
284 }
285 
286 /**
287  * ice_repr_destroy - remove representor from VF
288  * @repr: pointer to representor structure
289  */
290 void ice_repr_destroy(struct ice_repr *repr)
291 {
292 	free_percpu(repr->stats);
293 	free_netdev(repr->netdev);
294 	kfree(repr);
295 }
296 
297 static void ice_repr_rem_vf(struct ice_repr *repr)
298 {
299 	ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
300 	unregister_netdev(repr->netdev);
301 	ice_devlink_destroy_vf_port(repr->vf);
302 	ice_virtchnl_set_dflt_ops(repr->vf);
303 }
304 
305 static void ice_repr_rem_sf(struct ice_repr *repr)
306 {
307 	unregister_netdev(repr->netdev);
308 	ice_devlink_destroy_sf_port(repr->sf);
309 }
310 
311 static void ice_repr_set_tx_topology(struct ice_pf *pf)
312 {
313 	struct devlink *devlink;
314 
315 	/* only export if ADQ and DCB disabled and eswitch enabled*/
316 	if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
317 	    !ice_is_switchdev_running(pf))
318 		return;
319 
320 	devlink = priv_to_devlink(pf);
321 	ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
322 }
323 
324 /**
325  * ice_repr_create - add representor for generic VSI
326  * @src_vsi: pointer to VSI structure of device to represent
327  */
328 static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
329 {
330 	struct ice_netdev_priv *np;
331 	struct ice_repr *repr;
332 	int err;
333 
334 	repr = kzalloc(sizeof(*repr), GFP_KERNEL);
335 	if (!repr)
336 		return ERR_PTR(-ENOMEM);
337 
338 	repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
339 	if (!repr->netdev) {
340 		err =  -ENOMEM;
341 		goto err_alloc;
342 	}
343 
344 	repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
345 	if (!repr->stats) {
346 		err = -ENOMEM;
347 		goto err_stats;
348 	}
349 
350 	repr->src_vsi = src_vsi;
351 	repr->id = src_vsi->vsi_num;
352 	np = netdev_priv(repr->netdev);
353 	np->repr = repr;
354 
355 	repr->netdev->min_mtu = ETH_MIN_MTU;
356 	repr->netdev->max_mtu = ICE_MAX_MTU;
357 
358 	SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
359 
360 	return repr;
361 
362 err_stats:
363 	free_netdev(repr->netdev);
364 err_alloc:
365 	kfree(repr);
366 	return ERR_PTR(err);
367 }
368 
369 static int ice_repr_add_vf(struct ice_repr *repr)
370 {
371 	struct ice_vf *vf = repr->vf;
372 	int err;
373 
374 	err = ice_devlink_create_vf_port(vf);
375 	if (err)
376 		return err;
377 
378 	SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
379 	err = ice_repr_reg_netdev(repr->netdev);
380 	if (err)
381 		goto err_netdev;
382 
383 	err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
384 	if (err)
385 		goto err_cfg_vsi;
386 
387 	ice_virtchnl_set_repr_ops(vf);
388 	ice_repr_set_tx_topology(vf->pf);
389 
390 	return 0;
391 
392 err_cfg_vsi:
393 	unregister_netdev(repr->netdev);
394 err_netdev:
395 	ice_devlink_destroy_vf_port(vf);
396 	return err;
397 }
398 
399 /**
400  * ice_repr_create_vf - add representor for VF VSI
401  * @vf: VF to create port representor on
402  *
403  * Set correct representor type for VF and functions pointer.
404  *
405  * Return: created port representor on success, error otherwise
406  */
407 struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
408 {
409 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
410 	struct ice_repr *repr;
411 
412 	if (!vsi)
413 		return ERR_PTR(-EINVAL);
414 
415 	repr = ice_repr_create(vsi);
416 	if (!repr)
417 		return ERR_PTR(-ENOMEM);
418 
419 	repr->type = ICE_REPR_TYPE_VF;
420 	repr->vf = vf;
421 	repr->ops.add = ice_repr_add_vf;
422 	repr->ops.rem = ice_repr_rem_vf;
423 
424 	ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
425 
426 	return repr;
427 }
428 
429 static int ice_repr_add_sf(struct ice_repr *repr)
430 {
431 	struct ice_dynamic_port *sf = repr->sf;
432 	int err;
433 
434 	err = ice_devlink_create_sf_port(sf);
435 	if (err)
436 		return err;
437 
438 	SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
439 	err = ice_repr_reg_netdev(repr->netdev);
440 	if (err)
441 		goto err_netdev;
442 
443 	return 0;
444 
445 err_netdev:
446 	ice_devlink_destroy_sf_port(sf);
447 	return err;
448 }
449 
450 /**
451  * ice_repr_create_sf - add representor for SF VSI
452  * @sf: SF to create port representor on
453  *
454  * Set correct representor type for SF and functions pointer.
455  *
456  * Return: created port representor on success, error otherwise
457  */
458 struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
459 {
460 	struct ice_repr *repr = ice_repr_create(sf->vsi);
461 
462 	if (!repr)
463 		return ERR_PTR(-ENOMEM);
464 
465 	repr->type = ICE_REPR_TYPE_SF;
466 	repr->sf = sf;
467 	repr->ops.add = ice_repr_add_sf;
468 	repr->ops.rem = ice_repr_rem_sf;
469 
470 	ether_addr_copy(repr->parent_mac, sf->hw_addr);
471 
472 	return repr;
473 }
474 
475 struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
476 {
477 	return xa_load(&pf->eswitch.reprs, id);
478 }
479 
480 /**
481  * ice_repr_start_tx_queues - start Tx queues of port representor
482  * @repr: pointer to repr structure
483  */
484 void ice_repr_start_tx_queues(struct ice_repr *repr)
485 {
486 	netif_carrier_on(repr->netdev);
487 	netif_tx_start_all_queues(repr->netdev);
488 }
489 
490 /**
491  * ice_repr_stop_tx_queues - stop Tx queues of port representor
492  * @repr: pointer to repr structure
493  */
494 void ice_repr_stop_tx_queues(struct ice_repr *repr)
495 {
496 	netif_carrier_off(repr->netdev);
497 	netif_tx_stop_all_queues(repr->netdev);
498 }
499