xref: /linux/drivers/net/ethernet/intel/ice/ice_repr.c (revision 5832c4a77d6931cebf9ba737129ae8f14b66ee1d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_eswitch.h"
6 #include "ice_devlink.h"
7 #include "ice_sriov.h"
8 #include "ice_tc_lib.h"
9 #include "ice_dcb_lib.h"
10 
11 /**
12  * ice_repr_get_sw_port_id - get port ID associated with representor
13  * @repr: pointer to port representor
14  */
15 static int ice_repr_get_sw_port_id(struct ice_repr *repr)
16 {
17 	return repr->src_vsi->back->hw.port_info->lport;
18 }
19 
20 /**
21  * ice_repr_get_phys_port_name - get phys port name
22  * @netdev: pointer to port representor netdev
23  * @buf: write here port name
24  * @len: max length of buf
25  */
26 static int
27 ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
28 {
29 	struct ice_netdev_priv *np = netdev_priv(netdev);
30 	struct ice_repr *repr = np->repr;
31 	int res;
32 
33 	/* Devlink port is registered and devlink core is taking care of name formatting. */
34 	if (repr->vf->devlink_port.devlink)
35 		return -EOPNOTSUPP;
36 
37 	res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
38 		       repr->id);
39 	if (res <= 0)
40 		return -EOPNOTSUPP;
41 	return 0;
42 }
43 
44 /**
45  * ice_repr_inc_tx_stats - increment Tx statistic by one packet
46  * @repr: repr to increment stats on
47  * @len: length of the packet
48  * @xmit_status: value returned by xmit function
49  */
50 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
51 			   int xmit_status)
52 {
53 	struct ice_repr_pcpu_stats *stats;
54 
55 	if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
56 		     xmit_status != NET_XMIT_CN)) {
57 		this_cpu_inc(repr->stats->tx_drops);
58 		return;
59 	}
60 
61 	stats = this_cpu_ptr(repr->stats);
62 	u64_stats_update_begin(&stats->syncp);
63 	stats->tx_packets++;
64 	stats->tx_bytes += len;
65 	u64_stats_update_end(&stats->syncp);
66 }
67 
68 /**
69  * ice_repr_inc_rx_stats - increment Rx statistic by one packet
70  * @netdev: repr netdev to increment stats on
71  * @len: length of the packet
72  */
73 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
74 {
75 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
76 	struct ice_repr_pcpu_stats *stats;
77 
78 	stats = this_cpu_ptr(repr->stats);
79 	u64_stats_update_begin(&stats->syncp);
80 	stats->rx_packets++;
81 	stats->rx_bytes += len;
82 	u64_stats_update_end(&stats->syncp);
83 }
84 
85 /**
86  * ice_repr_get_stats64 - get VF stats for VFPR use
87  * @netdev: pointer to port representor netdev
88  * @stats: pointer to struct where stats can be stored
89  */
90 static void
91 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
92 {
93 	struct ice_netdev_priv *np = netdev_priv(netdev);
94 	struct ice_eth_stats *eth_stats;
95 	struct ice_vsi *vsi;
96 
97 	if (ice_is_vf_disabled(np->repr->vf))
98 		return;
99 	vsi = np->repr->src_vsi;
100 
101 	ice_update_vsi_stats(vsi);
102 	eth_stats = &vsi->eth_stats;
103 
104 	stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
105 			    eth_stats->tx_multicast;
106 	stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
107 			    eth_stats->rx_multicast;
108 	stats->tx_bytes = eth_stats->tx_bytes;
109 	stats->rx_bytes = eth_stats->rx_bytes;
110 	stats->multicast = eth_stats->rx_multicast;
111 	stats->tx_errors = eth_stats->tx_errors;
112 	stats->tx_dropped = eth_stats->tx_discards;
113 	stats->rx_dropped = eth_stats->rx_discards;
114 }
115 
116 /**
117  * ice_netdev_to_repr - Get port representor for given netdevice
118  * @netdev: pointer to port representor netdev
119  */
120 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
121 {
122 	struct ice_netdev_priv *np = netdev_priv(netdev);
123 
124 	return np->repr;
125 }
126 
127 /**
128  * ice_repr_open - Enable port representor's network interface
129  * @netdev: network interface device structure
130  *
131  * The open entry point is called when a port representor's network
132  * interface is made active by the system (IFF_UP). Corresponding
133  * VF is notified about link status change.
134  *
135  * Returns 0 on success
136  */
137 static int ice_repr_open(struct net_device *netdev)
138 {
139 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
140 	struct ice_vf *vf;
141 
142 	vf = repr->vf;
143 	vf->link_forced = true;
144 	vf->link_up = true;
145 	ice_vc_notify_vf_link_state(vf);
146 
147 	netif_carrier_on(netdev);
148 	netif_tx_start_all_queues(netdev);
149 
150 	return 0;
151 }
152 
153 /**
154  * ice_repr_stop - Disable port representor's network interface
155  * @netdev: network interface device structure
156  *
157  * The stop entry point is called when a port representor's network
158  * interface is de-activated by the system. Corresponding
159  * VF is notified about link status change.
160  *
161  * Returns 0 on success
162  */
163 static int ice_repr_stop(struct net_device *netdev)
164 {
165 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
166 	struct ice_vf *vf;
167 
168 	vf = repr->vf;
169 	vf->link_forced = true;
170 	vf->link_up = false;
171 	ice_vc_notify_vf_link_state(vf);
172 
173 	netif_carrier_off(netdev);
174 	netif_tx_stop_all_queues(netdev);
175 
176 	return 0;
177 }
178 
179 /**
180  * ice_repr_sp_stats64 - get slow path stats for port representor
181  * @dev: network interface device structure
182  * @stats: netlink stats structure
183  */
184 static int
185 ice_repr_sp_stats64(const struct net_device *dev,
186 		    struct rtnl_link_stats64 *stats)
187 {
188 	struct ice_repr *repr = ice_netdev_to_repr(dev);
189 	int i;
190 
191 	for_each_possible_cpu(i) {
192 		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
193 		struct ice_repr_pcpu_stats *repr_stats;
194 		unsigned int start;
195 
196 		repr_stats = per_cpu_ptr(repr->stats, i);
197 		do {
198 			start = u64_stats_fetch_begin(&repr_stats->syncp);
199 			tbytes = repr_stats->tx_bytes;
200 			tpkts = repr_stats->tx_packets;
201 			tdrops = repr_stats->tx_drops;
202 			rbytes = repr_stats->rx_bytes;
203 			rpkts = repr_stats->rx_packets;
204 		} while (u64_stats_fetch_retry(&repr_stats->syncp, start));
205 
206 		stats->tx_bytes += tbytes;
207 		stats->tx_packets += tpkts;
208 		stats->tx_dropped += tdrops;
209 		stats->rx_bytes += rbytes;
210 		stats->rx_packets += rpkts;
211 	}
212 	return 0;
213 }
214 
215 static bool
216 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
217 {
218 	return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
219 }
220 
221 static int
222 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
223 			       void *sp)
224 {
225 	if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
226 		return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
227 
228 	return -EINVAL;
229 }
230 
231 static int
232 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
233 			     struct flow_cls_offload *flower)
234 {
235 	switch (flower->command) {
236 	case FLOW_CLS_REPLACE:
237 		return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
238 	case FLOW_CLS_DESTROY:
239 		return ice_del_cls_flower(repr->src_vsi, flower);
240 	default:
241 		return -EINVAL;
242 	}
243 }
244 
245 static int
246 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
247 			   void *cb_priv)
248 {
249 	struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
250 	struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
251 
252 	switch (type) {
253 	case TC_SETUP_CLSFLOWER:
254 		return ice_repr_setup_tc_cls_flower(np->repr, flower);
255 	default:
256 		return -EOPNOTSUPP;
257 	}
258 }
259 
260 static LIST_HEAD(ice_repr_block_cb_list);
261 
262 static int
263 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
264 		  void *type_data)
265 {
266 	struct ice_netdev_priv *np = netdev_priv(netdev);
267 
268 	switch (type) {
269 	case TC_SETUP_BLOCK:
270 		return flow_block_cb_setup_simple((struct flow_block_offload *)
271 						  type_data,
272 						  &ice_repr_block_cb_list,
273 						  ice_repr_setup_tc_block_cb,
274 						  np, np, true);
275 	default:
276 		return -EOPNOTSUPP;
277 	}
278 }
279 
280 static const struct net_device_ops ice_repr_netdev_ops = {
281 	.ndo_get_phys_port_name = ice_repr_get_phys_port_name,
282 	.ndo_get_stats64 = ice_repr_get_stats64,
283 	.ndo_open = ice_repr_open,
284 	.ndo_stop = ice_repr_stop,
285 	.ndo_start_xmit = ice_eswitch_port_start_xmit,
286 	.ndo_setup_tc = ice_repr_setup_tc,
287 	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
288 	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
289 };
290 
291 /**
292  * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
293  * @netdev: pointer to netdev
294  */
295 bool ice_is_port_repr_netdev(const struct net_device *netdev)
296 {
297 	return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
298 }
299 
300 /**
301  * ice_repr_reg_netdev - register port representor netdev
302  * @netdev: pointer to port representor netdev
303  */
304 static int
305 ice_repr_reg_netdev(struct net_device *netdev)
306 {
307 	eth_hw_addr_random(netdev);
308 	netdev->netdev_ops = &ice_repr_netdev_ops;
309 	ice_set_ethtool_repr_ops(netdev);
310 
311 	netdev->hw_features |= NETIF_F_HW_TC;
312 
313 	netif_carrier_off(netdev);
314 	netif_tx_stop_all_queues(netdev);
315 
316 	return register_netdev(netdev);
317 }
318 
319 static void ice_repr_remove_node(struct devlink_port *devlink_port)
320 {
321 	devl_lock(devlink_port->devlink);
322 	devl_rate_leaf_destroy(devlink_port);
323 	devl_unlock(devlink_port->devlink);
324 }
325 
326 /**
327  * ice_repr_rem - remove representor from VF
328  * @repr: pointer to representor structure
329  */
330 static void ice_repr_rem(struct ice_repr *repr)
331 {
332 	free_percpu(repr->stats);
333 	free_netdev(repr->netdev);
334 	kfree(repr);
335 }
336 
337 /**
338  * ice_repr_rem_vf - remove representor from VF
339  * @repr: pointer to representor structure
340  */
341 void ice_repr_rem_vf(struct ice_repr *repr)
342 {
343 	ice_repr_remove_node(&repr->vf->devlink_port);
344 	unregister_netdev(repr->netdev);
345 	ice_devlink_destroy_vf_port(repr->vf);
346 	ice_virtchnl_set_dflt_ops(repr->vf);
347 	ice_repr_rem(repr);
348 }
349 
350 static void ice_repr_set_tx_topology(struct ice_pf *pf)
351 {
352 	struct devlink *devlink;
353 
354 	/* only export if ADQ and DCB disabled and eswitch enabled*/
355 	if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
356 	    !ice_is_switchdev_running(pf))
357 		return;
358 
359 	devlink = priv_to_devlink(pf);
360 	ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
361 }
362 
363 /**
364  * ice_repr_add - add representor for generic VSI
365  * @pf: pointer to PF structure
366  * @src_vsi: pointer to VSI structure of device to represent
367  * @parent_mac: device MAC address
368  */
369 static struct ice_repr *
370 ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
371 {
372 	struct ice_netdev_priv *np;
373 	struct ice_repr *repr;
374 	int err;
375 
376 	repr = kzalloc(sizeof(*repr), GFP_KERNEL);
377 	if (!repr)
378 		return ERR_PTR(-ENOMEM);
379 
380 	repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
381 	if (!repr->netdev) {
382 		err =  -ENOMEM;
383 		goto err_alloc;
384 	}
385 
386 	repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
387 	if (!repr->stats) {
388 		err = -ENOMEM;
389 		goto err_stats;
390 	}
391 
392 	repr->src_vsi = src_vsi;
393 	repr->id = src_vsi->vsi_num;
394 	np = netdev_priv(repr->netdev);
395 	np->repr = repr;
396 
397 	ether_addr_copy(repr->parent_mac, parent_mac);
398 
399 	return repr;
400 
401 err_stats:
402 	free_netdev(repr->netdev);
403 err_alloc:
404 	kfree(repr);
405 	return ERR_PTR(err);
406 }
407 
408 struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
409 {
410 	struct ice_repr *repr;
411 	struct ice_vsi *vsi;
412 	int err;
413 
414 	vsi = ice_get_vf_vsi(vf);
415 	if (!vsi)
416 		return ERR_PTR(-ENOENT);
417 
418 	err = ice_devlink_create_vf_port(vf);
419 	if (err)
420 		return ERR_PTR(err);
421 
422 	repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
423 	if (IS_ERR(repr)) {
424 		err = PTR_ERR(repr);
425 		goto err_repr_add;
426 	}
427 
428 	repr->vf = vf;
429 
430 	repr->netdev->min_mtu = ETH_MIN_MTU;
431 	repr->netdev->max_mtu = ICE_MAX_MTU;
432 
433 	SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
434 	SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
435 	err = ice_repr_reg_netdev(repr->netdev);
436 	if (err)
437 		goto err_netdev;
438 
439 	ice_virtchnl_set_repr_ops(vf);
440 	ice_repr_set_tx_topology(vf->pf);
441 
442 	return repr;
443 
444 err_netdev:
445 	ice_repr_rem(repr);
446 err_repr_add:
447 	ice_devlink_destroy_vf_port(vf);
448 	return ERR_PTR(err);
449 }
450 
451 struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
452 {
453 	if (!vsi->vf)
454 		return NULL;
455 
456 	return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
457 }
458 
459 /**
460  * ice_repr_start_tx_queues - start Tx queues of port representor
461  * @repr: pointer to repr structure
462  */
463 void ice_repr_start_tx_queues(struct ice_repr *repr)
464 {
465 	netif_carrier_on(repr->netdev);
466 	netif_tx_start_all_queues(repr->netdev);
467 }
468 
469 /**
470  * ice_repr_stop_tx_queues - stop Tx queues of port representor
471  * @repr: pointer to repr structure
472  */
473 void ice_repr_stop_tx_queues(struct ice_repr *repr)
474 {
475 	netif_carrier_off(repr->netdev);
476 	netif_tx_stop_all_queues(repr->netdev);
477 }
478