xref: /linux/drivers/net/ethernet/intel/ice/ice_repr.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_eswitch.h"
6 #include "ice_devlink.h"
7 #include "ice_sriov.h"
8 #include "ice_tc_lib.h"
9 #include "ice_dcb_lib.h"
10 
11 /**
12  * ice_repr_get_sw_port_id - get port ID associated with representor
13  * @repr: pointer to port representor
14  */
15 static int ice_repr_get_sw_port_id(struct ice_repr *repr)
16 {
17 	return repr->src_vsi->back->hw.port_info->lport;
18 }
19 
20 /**
21  * ice_repr_get_phys_port_name - get phys port name
22  * @netdev: pointer to port representor netdev
23  * @buf: write here port name
24  * @len: max length of buf
25  */
26 static int
27 ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
28 {
29 	struct ice_netdev_priv *np = netdev_priv(netdev);
30 	struct ice_repr *repr = np->repr;
31 	int res;
32 
33 	/* Devlink port is registered and devlink core is taking care of name formatting. */
34 	if (repr->vf->devlink_port.devlink)
35 		return -EOPNOTSUPP;
36 
37 	res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
38 		       repr->id);
39 	if (res <= 0)
40 		return -EOPNOTSUPP;
41 	return 0;
42 }
43 
44 /**
45  * ice_repr_get_stats64 - get VF stats for VFPR use
46  * @netdev: pointer to port representor netdev
47  * @stats: pointer to struct where stats can be stored
48  */
49 static void
50 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
51 {
52 	struct ice_netdev_priv *np = netdev_priv(netdev);
53 	struct ice_eth_stats *eth_stats;
54 	struct ice_vsi *vsi;
55 
56 	if (ice_is_vf_disabled(np->repr->vf))
57 		return;
58 	vsi = np->repr->src_vsi;
59 
60 	ice_update_vsi_stats(vsi);
61 	eth_stats = &vsi->eth_stats;
62 
63 	stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
64 			    eth_stats->tx_multicast;
65 	stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
66 			    eth_stats->rx_multicast;
67 	stats->tx_bytes = eth_stats->tx_bytes;
68 	stats->rx_bytes = eth_stats->rx_bytes;
69 	stats->multicast = eth_stats->rx_multicast;
70 	stats->tx_errors = eth_stats->tx_errors;
71 	stats->tx_dropped = eth_stats->tx_discards;
72 	stats->rx_dropped = eth_stats->rx_discards;
73 }
74 
75 /**
76  * ice_netdev_to_repr - Get port representor for given netdevice
77  * @netdev: pointer to port representor netdev
78  */
79 struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
80 {
81 	struct ice_netdev_priv *np = netdev_priv(netdev);
82 
83 	return np->repr;
84 }
85 
86 /**
87  * ice_repr_open - Enable port representor's network interface
88  * @netdev: network interface device structure
89  *
90  * The open entry point is called when a port representor's network
91  * interface is made active by the system (IFF_UP). Corresponding
92  * VF is notified about link status change.
93  *
94  * Returns 0 on success
95  */
96 static int ice_repr_open(struct net_device *netdev)
97 {
98 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
99 	struct ice_vf *vf;
100 
101 	vf = repr->vf;
102 	vf->link_forced = true;
103 	vf->link_up = true;
104 	ice_vc_notify_vf_link_state(vf);
105 
106 	netif_carrier_on(netdev);
107 	netif_tx_start_all_queues(netdev);
108 
109 	return 0;
110 }
111 
112 /**
113  * ice_repr_stop - Disable port representor's network interface
114  * @netdev: network interface device structure
115  *
116  * The stop entry point is called when a port representor's network
117  * interface is de-activated by the system. Corresponding
118  * VF is notified about link status change.
119  *
120  * Returns 0 on success
121  */
122 static int ice_repr_stop(struct net_device *netdev)
123 {
124 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
125 	struct ice_vf *vf;
126 
127 	vf = repr->vf;
128 	vf->link_forced = true;
129 	vf->link_up = false;
130 	ice_vc_notify_vf_link_state(vf);
131 
132 	netif_carrier_off(netdev);
133 	netif_tx_stop_all_queues(netdev);
134 
135 	return 0;
136 }
137 
138 /**
139  * ice_repr_sp_stats64 - get slow path stats for port representor
140  * @dev: network interface device structure
141  * @stats: netlink stats structure
142  *
143  * RX/TX stats are being swapped here to be consistent with VF stats. In slow
144  * path, port representor receives data when the corresponding VF is sending it
145  * (and vice versa), TX and RX bytes/packets are effectively swapped on port
146  * representor.
147  */
148 static int
149 ice_repr_sp_stats64(const struct net_device *dev,
150 		    struct rtnl_link_stats64 *stats)
151 {
152 	struct ice_netdev_priv *np = netdev_priv(dev);
153 	int vf_id = np->repr->vf->vf_id;
154 	struct ice_tx_ring *tx_ring;
155 	struct ice_rx_ring *rx_ring;
156 	u64 pkts, bytes;
157 
158 	tx_ring = np->vsi->tx_rings[vf_id];
159 	ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
160 				     tx_ring->ring_stats->stats,
161 				     &pkts, &bytes);
162 	stats->rx_packets = pkts;
163 	stats->rx_bytes = bytes;
164 
165 	rx_ring = np->vsi->rx_rings[vf_id];
166 	ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
167 				     rx_ring->ring_stats->stats,
168 				     &pkts, &bytes);
169 	stats->tx_packets = pkts;
170 	stats->tx_bytes = bytes;
171 	stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
172 			    rx_ring->ring_stats->rx_stats.alloc_buf_failed;
173 
174 	return 0;
175 }
176 
177 static bool
178 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
179 {
180 	return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
181 }
182 
183 static int
184 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
185 			       void *sp)
186 {
187 	if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
188 		return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
189 
190 	return -EINVAL;
191 }
192 
193 static int
194 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
195 			     struct flow_cls_offload *flower)
196 {
197 	switch (flower->command) {
198 	case FLOW_CLS_REPLACE:
199 		return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
200 	case FLOW_CLS_DESTROY:
201 		return ice_del_cls_flower(repr->src_vsi, flower);
202 	default:
203 		return -EINVAL;
204 	}
205 }
206 
207 static int
208 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
209 			   void *cb_priv)
210 {
211 	struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
212 	struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
213 
214 	switch (type) {
215 	case TC_SETUP_CLSFLOWER:
216 		return ice_repr_setup_tc_cls_flower(np->repr, flower);
217 	default:
218 		return -EOPNOTSUPP;
219 	}
220 }
221 
222 static LIST_HEAD(ice_repr_block_cb_list);
223 
224 static int
225 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
226 		  void *type_data)
227 {
228 	struct ice_netdev_priv *np = netdev_priv(netdev);
229 
230 	switch (type) {
231 	case TC_SETUP_BLOCK:
232 		return flow_block_cb_setup_simple((struct flow_block_offload *)
233 						  type_data,
234 						  &ice_repr_block_cb_list,
235 						  ice_repr_setup_tc_block_cb,
236 						  np, np, true);
237 	default:
238 		return -EOPNOTSUPP;
239 	}
240 }
241 
242 static const struct net_device_ops ice_repr_netdev_ops = {
243 	.ndo_get_phys_port_name = ice_repr_get_phys_port_name,
244 	.ndo_get_stats64 = ice_repr_get_stats64,
245 	.ndo_open = ice_repr_open,
246 	.ndo_stop = ice_repr_stop,
247 	.ndo_start_xmit = ice_eswitch_port_start_xmit,
248 	.ndo_setup_tc = ice_repr_setup_tc,
249 	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
250 	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
251 };
252 
253 /**
254  * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
255  * @netdev: pointer to netdev
256  */
257 bool ice_is_port_repr_netdev(const struct net_device *netdev)
258 {
259 	return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
260 }
261 
262 /**
263  * ice_repr_reg_netdev - register port representor netdev
264  * @netdev: pointer to port representor netdev
265  */
266 static int
267 ice_repr_reg_netdev(struct net_device *netdev)
268 {
269 	eth_hw_addr_random(netdev);
270 	netdev->netdev_ops = &ice_repr_netdev_ops;
271 	ice_set_ethtool_repr_ops(netdev);
272 
273 	netdev->hw_features |= NETIF_F_HW_TC;
274 
275 	netif_carrier_off(netdev);
276 	netif_tx_stop_all_queues(netdev);
277 
278 	return register_netdev(netdev);
279 }
280 
281 static void ice_repr_remove_node(struct devlink_port *devlink_port)
282 {
283 	devl_lock(devlink_port->devlink);
284 	devl_rate_leaf_destroy(devlink_port);
285 	devl_unlock(devlink_port->devlink);
286 }
287 
288 /**
289  * ice_repr_rem - remove representor from VF
290  * @repr: pointer to representor structure
291  */
292 static void ice_repr_rem(struct ice_repr *repr)
293 {
294 	kfree(repr->q_vector);
295 	free_netdev(repr->netdev);
296 	kfree(repr);
297 }
298 
299 /**
300  * ice_repr_rem_vf - remove representor from VF
301  * @repr: pointer to representor structure
302  */
303 void ice_repr_rem_vf(struct ice_repr *repr)
304 {
305 	ice_repr_remove_node(&repr->vf->devlink_port);
306 	unregister_netdev(repr->netdev);
307 	ice_devlink_destroy_vf_port(repr->vf);
308 	ice_virtchnl_set_dflt_ops(repr->vf);
309 	ice_repr_rem(repr);
310 }
311 
312 static void ice_repr_set_tx_topology(struct ice_pf *pf)
313 {
314 	struct devlink *devlink;
315 
316 	/* only export if ADQ and DCB disabled and eswitch enabled*/
317 	if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
318 	    !ice_is_switchdev_running(pf))
319 		return;
320 
321 	devlink = priv_to_devlink(pf);
322 	ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
323 }
324 
325 /**
326  * ice_repr_add - add representor for generic VSI
327  * @pf: pointer to PF structure
328  * @src_vsi: pointer to VSI structure of device to represent
329  * @parent_mac: device MAC address
330  */
331 static struct ice_repr *
332 ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
333 {
334 	struct ice_q_vector *q_vector;
335 	struct ice_netdev_priv *np;
336 	struct ice_repr *repr;
337 	int err;
338 
339 	repr = kzalloc(sizeof(*repr), GFP_KERNEL);
340 	if (!repr)
341 		return ERR_PTR(-ENOMEM);
342 
343 	repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
344 	if (!repr->netdev) {
345 		err =  -ENOMEM;
346 		goto err_alloc;
347 	}
348 
349 	repr->src_vsi = src_vsi;
350 	np = netdev_priv(repr->netdev);
351 	np->repr = repr;
352 
353 	q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
354 	if (!q_vector) {
355 		err = -ENOMEM;
356 		goto err_alloc_q_vector;
357 	}
358 	repr->q_vector = q_vector;
359 	repr->q_id = repr->id;
360 
361 	ether_addr_copy(repr->parent_mac, parent_mac);
362 
363 	return repr;
364 
365 err_alloc_q_vector:
366 	free_netdev(repr->netdev);
367 err_alloc:
368 	kfree(repr);
369 	return ERR_PTR(err);
370 }
371 
372 struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
373 {
374 	struct ice_repr *repr;
375 	struct ice_vsi *vsi;
376 	int err;
377 
378 	vsi = ice_get_vf_vsi(vf);
379 	if (!vsi)
380 		return ERR_PTR(-ENOENT);
381 
382 	err = ice_devlink_create_vf_port(vf);
383 	if (err)
384 		return ERR_PTR(err);
385 
386 	repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
387 	if (IS_ERR(repr)) {
388 		err = PTR_ERR(repr);
389 		goto err_repr_add;
390 	}
391 
392 	repr->vf = vf;
393 
394 	repr->netdev->min_mtu = ETH_MIN_MTU;
395 	repr->netdev->max_mtu = ICE_MAX_MTU;
396 
397 	SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
398 	SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
399 	err = ice_repr_reg_netdev(repr->netdev);
400 	if (err)
401 		goto err_netdev;
402 
403 	ice_virtchnl_set_repr_ops(vf);
404 	ice_repr_set_tx_topology(vf->pf);
405 
406 	return repr;
407 
408 err_netdev:
409 	ice_repr_rem(repr);
410 err_repr_add:
411 	ice_devlink_destroy_vf_port(vf);
412 	return ERR_PTR(err);
413 }
414 
415 struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
416 {
417 	if (!vsi->vf)
418 		return NULL;
419 
420 	return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
421 }
422 
423 /**
424  * ice_repr_start_tx_queues - start Tx queues of port representor
425  * @repr: pointer to repr structure
426  */
427 void ice_repr_start_tx_queues(struct ice_repr *repr)
428 {
429 	netif_carrier_on(repr->netdev);
430 	netif_tx_start_all_queues(repr->netdev);
431 }
432 
433 /**
434  * ice_repr_stop_tx_queues - stop Tx queues of port representor
435  * @repr: pointer to repr structure
436  */
437 void ice_repr_stop_tx_queues(struct ice_repr *repr)
438 {
439 	netif_carrier_off(repr->netdev);
440 	netif_tx_stop_all_queues(repr->netdev);
441 }
442 
443 /**
444  * ice_repr_set_traffic_vsi - set traffic VSI for port representor
445  * @repr: repr on with VSI will be set
446  * @vsi: pointer to VSI that will be used by port representor to pass traffic
447  */
448 void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
449 {
450 	struct ice_netdev_priv *np = netdev_priv(repr->netdev);
451 
452 	np->vsi = vsi;
453 }
454