xref: /linux/drivers/net/ethernet/intel/ice/ice_repr.c (revision ff9f065318e17a1a97981d9e535fcfc6ce5d5614)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_eswitch.h"
6 #include "devlink/devlink.h"
7 #include "devlink/devlink_port.h"
8 #include "ice_sriov.h"
9 #include "ice_tc_lib.h"
10 #include "ice_dcb_lib.h"
11 
12 /**
13  * ice_repr_inc_tx_stats - increment Tx statistic by one packet
14  * @repr: repr to increment stats on
15  * @len: length of the packet
16  * @xmit_status: value returned by xmit function
17  */
18 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
19 			   int xmit_status)
20 {
21 	struct ice_repr_pcpu_stats *stats;
22 
23 	if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
24 		     xmit_status != NET_XMIT_CN)) {
25 		this_cpu_inc(repr->stats->tx_drops);
26 		return;
27 	}
28 
29 	stats = this_cpu_ptr(repr->stats);
30 	u64_stats_update_begin(&stats->syncp);
31 	stats->tx_packets++;
32 	stats->tx_bytes += len;
33 	u64_stats_update_end(&stats->syncp);
34 }
35 
36 /**
37  * ice_repr_inc_rx_stats - increment Rx statistic by one packet
38  * @netdev: repr netdev to increment stats on
39  * @len: length of the packet
40  */
41 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
42 {
43 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
44 	struct ice_repr_pcpu_stats *stats;
45 
46 	stats = this_cpu_ptr(repr->stats);
47 	u64_stats_update_begin(&stats->syncp);
48 	stats->rx_packets++;
49 	stats->rx_bytes += len;
50 	u64_stats_update_end(&stats->syncp);
51 }
52 
53 /**
54  * ice_repr_get_stats64 - get VF stats for VFPR use
55  * @netdev: pointer to port representor netdev
56  * @stats: pointer to struct where stats can be stored
57  */
58 static void
59 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
60 {
61 	struct ice_netdev_priv *np = netdev_priv(netdev);
62 	struct ice_eth_stats *eth_stats;
63 	struct ice_vsi *vsi;
64 
65 	if (ice_is_vf_disabled(np->repr->vf))
66 		return;
67 	vsi = np->repr->src_vsi;
68 
69 	ice_update_vsi_stats(vsi);
70 	eth_stats = &vsi->eth_stats;
71 
72 	stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
73 			    eth_stats->tx_multicast;
74 	stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
75 			    eth_stats->rx_multicast;
76 	stats->tx_bytes = eth_stats->tx_bytes;
77 	stats->rx_bytes = eth_stats->rx_bytes;
78 	stats->multicast = eth_stats->rx_multicast;
79 	stats->tx_errors = eth_stats->tx_errors;
80 	stats->tx_dropped = eth_stats->tx_discards;
81 	stats->rx_dropped = eth_stats->rx_discards;
82 }
83 
84 /**
85  * ice_netdev_to_repr - Get port representor for given netdevice
86  * @netdev: pointer to port representor netdev
87  */
88 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
89 {
90 	struct ice_netdev_priv *np = netdev_priv(netdev);
91 
92 	return np->repr;
93 }
94 
95 /**
96  * ice_repr_open - Enable port representor's network interface
97  * @netdev: network interface device structure
98  *
99  * The open entry point is called when a port representor's network
100  * interface is made active by the system (IFF_UP). Corresponding
101  * VF is notified about link status change.
102  *
103  * Returns 0 on success
104  */
105 static int ice_repr_open(struct net_device *netdev)
106 {
107 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
108 	struct ice_vf *vf;
109 
110 	vf = repr->vf;
111 	vf->link_forced = true;
112 	vf->link_up = true;
113 	ice_vc_notify_vf_link_state(vf);
114 
115 	netif_carrier_on(netdev);
116 	netif_tx_start_all_queues(netdev);
117 
118 	return 0;
119 }
120 
121 /**
122  * ice_repr_stop - Disable port representor's network interface
123  * @netdev: network interface device structure
124  *
125  * The stop entry point is called when a port representor's network
126  * interface is de-activated by the system. Corresponding
127  * VF is notified about link status change.
128  *
129  * Returns 0 on success
130  */
131 static int ice_repr_stop(struct net_device *netdev)
132 {
133 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
134 	struct ice_vf *vf;
135 
136 	vf = repr->vf;
137 	vf->link_forced = true;
138 	vf->link_up = false;
139 	ice_vc_notify_vf_link_state(vf);
140 
141 	netif_carrier_off(netdev);
142 	netif_tx_stop_all_queues(netdev);
143 
144 	return 0;
145 }
146 
147 /**
148  * ice_repr_sp_stats64 - get slow path stats for port representor
149  * @dev: network interface device structure
150  * @stats: netlink stats structure
151  */
152 static int
153 ice_repr_sp_stats64(const struct net_device *dev,
154 		    struct rtnl_link_stats64 *stats)
155 {
156 	struct ice_repr *repr = ice_netdev_to_repr(dev);
157 	int i;
158 
159 	for_each_possible_cpu(i) {
160 		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
161 		struct ice_repr_pcpu_stats *repr_stats;
162 		unsigned int start;
163 
164 		repr_stats = per_cpu_ptr(repr->stats, i);
165 		do {
166 			start = u64_stats_fetch_begin(&repr_stats->syncp);
167 			tbytes = repr_stats->tx_bytes;
168 			tpkts = repr_stats->tx_packets;
169 			tdrops = repr_stats->tx_drops;
170 			rbytes = repr_stats->rx_bytes;
171 			rpkts = repr_stats->rx_packets;
172 		} while (u64_stats_fetch_retry(&repr_stats->syncp, start));
173 
174 		stats->tx_bytes += tbytes;
175 		stats->tx_packets += tpkts;
176 		stats->tx_dropped += tdrops;
177 		stats->rx_bytes += rbytes;
178 		stats->rx_packets += rpkts;
179 	}
180 	return 0;
181 }
182 
183 static bool
184 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
185 {
186 	return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
187 }
188 
189 static int
190 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
191 			       void *sp)
192 {
193 	if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
194 		return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
195 
196 	return -EINVAL;
197 }
198 
199 static int
200 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
201 			     struct flow_cls_offload *flower)
202 {
203 	switch (flower->command) {
204 	case FLOW_CLS_REPLACE:
205 		return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
206 	case FLOW_CLS_DESTROY:
207 		return ice_del_cls_flower(repr->src_vsi, flower);
208 	default:
209 		return -EINVAL;
210 	}
211 }
212 
213 static int
214 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
215 			   void *cb_priv)
216 {
217 	struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
218 	struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
219 
220 	switch (type) {
221 	case TC_SETUP_CLSFLOWER:
222 		return ice_repr_setup_tc_cls_flower(np->repr, flower);
223 	default:
224 		return -EOPNOTSUPP;
225 	}
226 }
227 
228 static LIST_HEAD(ice_repr_block_cb_list);
229 
230 static int
231 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
232 		  void *type_data)
233 {
234 	struct ice_netdev_priv *np = netdev_priv(netdev);
235 
236 	switch (type) {
237 	case TC_SETUP_BLOCK:
238 		return flow_block_cb_setup_simple((struct flow_block_offload *)
239 						  type_data,
240 						  &ice_repr_block_cb_list,
241 						  ice_repr_setup_tc_block_cb,
242 						  np, np, true);
243 	default:
244 		return -EOPNOTSUPP;
245 	}
246 }
247 
248 static const struct net_device_ops ice_repr_netdev_ops = {
249 	.ndo_get_stats64 = ice_repr_get_stats64,
250 	.ndo_open = ice_repr_open,
251 	.ndo_stop = ice_repr_stop,
252 	.ndo_start_xmit = ice_eswitch_port_start_xmit,
253 	.ndo_setup_tc = ice_repr_setup_tc,
254 	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
255 	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
256 };
257 
258 /**
259  * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
260  * @netdev: pointer to netdev
261  */
262 bool ice_is_port_repr_netdev(const struct net_device *netdev)
263 {
264 	return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
265 }
266 
267 /**
268  * ice_repr_reg_netdev - register port representor netdev
269  * @netdev: pointer to port representor netdev
270  */
271 static int
272 ice_repr_reg_netdev(struct net_device *netdev)
273 {
274 	eth_hw_addr_random(netdev);
275 	netdev->netdev_ops = &ice_repr_netdev_ops;
276 	ice_set_ethtool_repr_ops(netdev);
277 
278 	netdev->hw_features |= NETIF_F_HW_TC;
279 
280 	netif_carrier_off(netdev);
281 	netif_tx_stop_all_queues(netdev);
282 
283 	return register_netdev(netdev);
284 }
285 
286 static void ice_repr_remove_node(struct devlink_port *devlink_port)
287 {
288 	devl_rate_leaf_destroy(devlink_port);
289 }
290 
291 /**
292  * ice_repr_rem - remove representor from VF
293  * @repr: pointer to representor structure
294  */
295 static void ice_repr_rem(struct ice_repr *repr)
296 {
297 	free_percpu(repr->stats);
298 	free_netdev(repr->netdev);
299 	kfree(repr);
300 }
301 
302 /**
303  * ice_repr_rem_vf - remove representor from VF
304  * @repr: pointer to representor structure
305  */
306 void ice_repr_rem_vf(struct ice_repr *repr)
307 {
308 	ice_repr_remove_node(&repr->vf->devlink_port);
309 	ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
310 	unregister_netdev(repr->netdev);
311 	ice_devlink_destroy_vf_port(repr->vf);
312 	ice_virtchnl_set_dflt_ops(repr->vf);
313 	ice_repr_rem(repr);
314 }
315 
316 static void ice_repr_set_tx_topology(struct ice_pf *pf)
317 {
318 	struct devlink *devlink;
319 
320 	/* only export if ADQ and DCB disabled and eswitch enabled*/
321 	if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
322 	    !ice_is_switchdev_running(pf))
323 		return;
324 
325 	devlink = priv_to_devlink(pf);
326 	ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
327 }
328 
329 /**
330  * ice_repr_add - add representor for generic VSI
331  * @pf: pointer to PF structure
332  * @src_vsi: pointer to VSI structure of device to represent
333  * @parent_mac: device MAC address
334  */
335 static struct ice_repr *
336 ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
337 {
338 	struct ice_netdev_priv *np;
339 	struct ice_repr *repr;
340 	int err;
341 
342 	repr = kzalloc(sizeof(*repr), GFP_KERNEL);
343 	if (!repr)
344 		return ERR_PTR(-ENOMEM);
345 
346 	repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
347 	if (!repr->netdev) {
348 		err =  -ENOMEM;
349 		goto err_alloc;
350 	}
351 
352 	repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
353 	if (!repr->stats) {
354 		err = -ENOMEM;
355 		goto err_stats;
356 	}
357 
358 	repr->src_vsi = src_vsi;
359 	repr->id = src_vsi->vsi_num;
360 	np = netdev_priv(repr->netdev);
361 	np->repr = repr;
362 
363 	ether_addr_copy(repr->parent_mac, parent_mac);
364 
365 	return repr;
366 
367 err_stats:
368 	free_netdev(repr->netdev);
369 err_alloc:
370 	kfree(repr);
371 	return ERR_PTR(err);
372 }
373 
374 struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
375 {
376 	struct ice_repr *repr;
377 	struct ice_vsi *vsi;
378 	int err;
379 
380 	vsi = ice_get_vf_vsi(vf);
381 	if (!vsi)
382 		return ERR_PTR(-ENOENT);
383 
384 	err = ice_devlink_create_vf_port(vf);
385 	if (err)
386 		return ERR_PTR(err);
387 
388 	repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
389 	if (IS_ERR(repr)) {
390 		err = PTR_ERR(repr);
391 		goto err_repr_add;
392 	}
393 
394 	repr->vf = vf;
395 
396 	repr->netdev->min_mtu = ETH_MIN_MTU;
397 	repr->netdev->max_mtu = ICE_MAX_MTU;
398 
399 	SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
400 	SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
401 	err = ice_repr_reg_netdev(repr->netdev);
402 	if (err)
403 		goto err_netdev;
404 
405 	err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
406 	if (err)
407 		goto err_cfg_vsi;
408 
409 	ice_virtchnl_set_repr_ops(vf);
410 	ice_repr_set_tx_topology(vf->pf);
411 
412 	return repr;
413 
414 err_cfg_vsi:
415 	unregister_netdev(repr->netdev);
416 err_netdev:
417 	ice_repr_rem(repr);
418 err_repr_add:
419 	ice_devlink_destroy_vf_port(vf);
420 	return ERR_PTR(err);
421 }
422 
423 struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
424 {
425 	return xa_load(&pf->eswitch.reprs, id);
426 }
427 
428 /**
429  * ice_repr_start_tx_queues - start Tx queues of port representor
430  * @repr: pointer to repr structure
431  */
432 void ice_repr_start_tx_queues(struct ice_repr *repr)
433 {
434 	netif_carrier_on(repr->netdev);
435 	netif_tx_start_all_queues(repr->netdev);
436 }
437 
438 /**
439  * ice_repr_stop_tx_queues - stop Tx queues of port representor
440  * @repr: pointer to repr structure
441  */
442 void ice_repr_stop_tx_queues(struct ice_repr *repr)
443 {
444 	netif_carrier_off(repr->netdev);
445 	netif_tx_stop_all_queues(repr->netdev);
446 }
447