1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_eswitch.h"
7 #include "devlink/devlink.h"
8 #include "devlink/port.h"
9 #include "ice_sriov.h"
10 #include "ice_tc_lib.h"
11 #include "ice_dcb_lib.h"
12
13 /**
14 * ice_repr_inc_tx_stats - increment Tx statistic by one packet
15 * @repr: repr to increment stats on
16 * @len: length of the packet
17 * @xmit_status: value returned by xmit function
18 */
ice_repr_inc_tx_stats(struct ice_repr * repr,unsigned int len,int xmit_status)19 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
20 int xmit_status)
21 {
22 struct ice_repr_pcpu_stats *stats;
23
24 if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
25 xmit_status != NET_XMIT_CN)) {
26 this_cpu_inc(repr->stats->tx_drops);
27 return;
28 }
29
30 stats = this_cpu_ptr(repr->stats);
31 u64_stats_update_begin(&stats->syncp);
32 stats->tx_packets++;
33 stats->tx_bytes += len;
34 u64_stats_update_end(&stats->syncp);
35 }
36
37 /**
38 * ice_repr_inc_rx_stats - increment Rx statistic by one packet
39 * @netdev: repr netdev to increment stats on
40 * @len: length of the packet
41 */
ice_repr_inc_rx_stats(struct net_device * netdev,unsigned int len)42 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
43 {
44 struct ice_repr *repr = ice_netdev_to_repr(netdev);
45 struct ice_repr_pcpu_stats *stats;
46
47 stats = this_cpu_ptr(repr->stats);
48 u64_stats_update_begin(&stats->syncp);
49 stats->rx_packets++;
50 stats->rx_bytes += len;
51 u64_stats_update_end(&stats->syncp);
52 }
53
54 /**
55 * ice_repr_get_stats64 - get VF stats for VFPR use
56 * @netdev: pointer to port representor netdev
57 * @stats: pointer to struct where stats can be stored
58 */
59 static void
ice_repr_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)60 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
61 {
62 struct ice_netdev_priv *np = netdev_priv(netdev);
63 struct ice_repr *repr = np->repr;
64 struct ice_eth_stats *eth_stats;
65 struct ice_vsi *vsi;
66
67 if (repr->ops.ready(repr))
68 return;
69 vsi = repr->src_vsi;
70
71 ice_update_eth_stats(vsi);
72 eth_stats = &vsi->eth_stats;
73
74 stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
75 eth_stats->tx_multicast;
76 stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
77 eth_stats->rx_multicast;
78 stats->tx_bytes = eth_stats->tx_bytes;
79 stats->rx_bytes = eth_stats->rx_bytes;
80 stats->multicast = eth_stats->rx_multicast;
81 stats->tx_errors = eth_stats->tx_errors;
82 stats->tx_dropped = eth_stats->tx_discards;
83 stats->rx_dropped = eth_stats->rx_discards;
84 }
85
86 /**
87 * ice_netdev_to_repr - Get port representor for given netdevice
88 * @netdev: pointer to port representor netdev
89 */
ice_netdev_to_repr(const struct net_device * netdev)90 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
91 {
92 struct ice_netdev_priv *np = netdev_priv(netdev);
93
94 return np->repr;
95 }
96
97 /**
98 * ice_repr_vf_open - Enable port representor's network interface
99 * @netdev: network interface device structure
100 *
101 * The open entry point is called when a port representor's network
102 * interface is made active by the system (IFF_UP). Corresponding
103 * VF is notified about link status change.
104 *
105 * Returns 0 on success
106 */
ice_repr_vf_open(struct net_device * netdev)107 static int ice_repr_vf_open(struct net_device *netdev)
108 {
109 struct ice_repr *repr = ice_netdev_to_repr(netdev);
110 struct ice_vf *vf;
111
112 vf = repr->vf;
113 vf->link_forced = true;
114 vf->link_up = true;
115 ice_vc_notify_vf_link_state(vf);
116
117 netif_carrier_on(netdev);
118 netif_tx_start_all_queues(netdev);
119
120 return 0;
121 }
122
ice_repr_sf_open(struct net_device * netdev)123 static int ice_repr_sf_open(struct net_device *netdev)
124 {
125 netif_carrier_on(netdev);
126 netif_tx_start_all_queues(netdev);
127
128 return 0;
129 }
130
131 /**
132 * ice_repr_vf_stop - Disable port representor's network interface
133 * @netdev: network interface device structure
134 *
135 * The stop entry point is called when a port representor's network
136 * interface is de-activated by the system. Corresponding
137 * VF is notified about link status change.
138 *
139 * Returns 0 on success
140 */
ice_repr_vf_stop(struct net_device * netdev)141 static int ice_repr_vf_stop(struct net_device *netdev)
142 {
143 struct ice_repr *repr = ice_netdev_to_repr(netdev);
144 struct ice_vf *vf;
145
146 vf = repr->vf;
147 vf->link_forced = true;
148 vf->link_up = false;
149 ice_vc_notify_vf_link_state(vf);
150
151 netif_carrier_off(netdev);
152 netif_tx_stop_all_queues(netdev);
153
154 return 0;
155 }
156
ice_repr_sf_stop(struct net_device * netdev)157 static int ice_repr_sf_stop(struct net_device *netdev)
158 {
159 netif_carrier_off(netdev);
160 netif_tx_stop_all_queues(netdev);
161
162 return 0;
163 }
164
165 /**
166 * ice_repr_sp_stats64 - get slow path stats for port representor
167 * @dev: network interface device structure
168 * @stats: netlink stats structure
169 */
170 static int
ice_repr_sp_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)171 ice_repr_sp_stats64(const struct net_device *dev,
172 struct rtnl_link_stats64 *stats)
173 {
174 struct ice_repr *repr = ice_netdev_to_repr(dev);
175 int i;
176
177 for_each_possible_cpu(i) {
178 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
179 struct ice_repr_pcpu_stats *repr_stats;
180 unsigned int start;
181
182 repr_stats = per_cpu_ptr(repr->stats, i);
183 do {
184 start = u64_stats_fetch_begin(&repr_stats->syncp);
185 tbytes = repr_stats->tx_bytes;
186 tpkts = repr_stats->tx_packets;
187 tdrops = repr_stats->tx_drops;
188 rbytes = repr_stats->rx_bytes;
189 rpkts = repr_stats->rx_packets;
190 } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
191
192 stats->tx_bytes += tbytes;
193 stats->tx_packets += tpkts;
194 stats->tx_dropped += tdrops;
195 stats->rx_bytes += rbytes;
196 stats->rx_packets += rpkts;
197 }
198 return 0;
199 }
200
201 static bool
ice_repr_ndo_has_offload_stats(const struct net_device * dev,int attr_id)202 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
203 {
204 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
205 }
206
207 static int
ice_repr_ndo_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)208 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
209 void *sp)
210 {
211 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
212 return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
213
214 return -EINVAL;
215 }
216
217 static int
ice_repr_setup_tc_cls_flower(struct ice_repr * repr,struct flow_cls_offload * flower)218 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
219 struct flow_cls_offload *flower)
220 {
221 switch (flower->command) {
222 case FLOW_CLS_REPLACE:
223 return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower,
224 true);
225 case FLOW_CLS_DESTROY:
226 return ice_del_cls_flower(repr->src_vsi, flower);
227 default:
228 return -EINVAL;
229 }
230 }
231
232 static int
ice_repr_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)233 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
234 void *cb_priv)
235 {
236 struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
237 struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
238
239 switch (type) {
240 case TC_SETUP_CLSFLOWER:
241 return ice_repr_setup_tc_cls_flower(np->repr, flower);
242 default:
243 return -EOPNOTSUPP;
244 }
245 }
246
247 static LIST_HEAD(ice_repr_block_cb_list);
248
249 static int
ice_repr_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)250 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
251 void *type_data)
252 {
253 struct ice_netdev_priv *np = netdev_priv(netdev);
254
255 switch (type) {
256 case TC_SETUP_BLOCK:
257 return flow_block_cb_setup_simple((struct flow_block_offload *)
258 type_data,
259 &ice_repr_block_cb_list,
260 ice_repr_setup_tc_block_cb,
261 np, np, true);
262 default:
263 return -EOPNOTSUPP;
264 }
265 }
266
267 static const struct net_device_ops ice_repr_vf_netdev_ops = {
268 .ndo_get_stats64 = ice_repr_get_stats64,
269 .ndo_open = ice_repr_vf_open,
270 .ndo_stop = ice_repr_vf_stop,
271 .ndo_start_xmit = ice_eswitch_port_start_xmit,
272 .ndo_setup_tc = ice_repr_setup_tc,
273 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
274 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
275 };
276
277 static const struct net_device_ops ice_repr_sf_netdev_ops = {
278 .ndo_get_stats64 = ice_repr_get_stats64,
279 .ndo_open = ice_repr_sf_open,
280 .ndo_stop = ice_repr_sf_stop,
281 .ndo_start_xmit = ice_eswitch_port_start_xmit,
282 .ndo_setup_tc = ice_repr_setup_tc,
283 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
284 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
285 };
286
287 /**
288 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
289 * @netdev: pointer to netdev
290 */
ice_is_port_repr_netdev(const struct net_device * netdev)291 bool ice_is_port_repr_netdev(const struct net_device *netdev)
292 {
293 return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
294 netdev->netdev_ops == &ice_repr_sf_netdev_ops);
295 }
296
297 /**
298 * ice_repr_reg_netdev - register port representor netdev
299 * @netdev: pointer to port representor netdev
300 * @ops: new ops for netdev
301 */
302 static int
ice_repr_reg_netdev(struct net_device * netdev,const struct net_device_ops * ops)303 ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
304 {
305 eth_hw_addr_random(netdev);
306 netdev->netdev_ops = ops;
307 ice_set_ethtool_repr_ops(netdev);
308
309 netdev->hw_features |= NETIF_F_HW_TC;
310
311 netif_carrier_off(netdev);
312 netif_tx_stop_all_queues(netdev);
313
314 return register_netdev(netdev);
315 }
316
ice_repr_ready_vf(struct ice_repr * repr)317 static int ice_repr_ready_vf(struct ice_repr *repr)
318 {
319 return ice_check_vf_ready_for_cfg(repr->vf);
320 }
321
ice_repr_ready_sf(struct ice_repr * repr)322 static int ice_repr_ready_sf(struct ice_repr *repr)
323 {
324 return !repr->sf->active;
325 }
326
327 /**
328 * ice_repr_destroy - remove representor from VF
329 * @repr: pointer to representor structure
330 */
ice_repr_destroy(struct ice_repr * repr)331 void ice_repr_destroy(struct ice_repr *repr)
332 {
333 free_percpu(repr->stats);
334 free_netdev(repr->netdev);
335 kfree(repr);
336 }
337
ice_repr_rem_vf(struct ice_repr * repr)338 static void ice_repr_rem_vf(struct ice_repr *repr)
339 {
340 ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
341 ice_pass_vf_tx_lldp(repr->src_vsi, true);
342 unregister_netdev(repr->netdev);
343 ice_devlink_destroy_vf_port(repr->vf);
344 ice_virtchnl_set_dflt_ops(repr->vf);
345 }
346
ice_repr_rem_sf(struct ice_repr * repr)347 static void ice_repr_rem_sf(struct ice_repr *repr)
348 {
349 unregister_netdev(repr->netdev);
350 ice_devlink_destroy_sf_port(repr->sf);
351 }
352
ice_repr_set_tx_topology(struct ice_pf * pf,struct devlink * devlink)353 static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
354 {
355 /* only export if ADQ and DCB disabled and eswitch enabled*/
356 if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
357 !ice_is_switchdev_running(pf))
358 return;
359
360 ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
361 }
362
363 /**
364 * ice_repr_create - add representor for generic VSI
365 * @src_vsi: pointer to VSI structure of device to represent
366 */
ice_repr_create(struct ice_vsi * src_vsi)367 static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
368 {
369 struct ice_netdev_priv *np;
370 struct ice_repr *repr;
371 int err;
372
373 repr = kzalloc_obj(*repr);
374 if (!repr)
375 return ERR_PTR(-ENOMEM);
376
377 repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
378 if (!repr->netdev) {
379 err = -ENOMEM;
380 goto err_alloc;
381 }
382
383 repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
384 if (!repr->stats) {
385 err = -ENOMEM;
386 goto err_stats;
387 }
388
389 repr->src_vsi = src_vsi;
390 repr->id = src_vsi->vsi_num;
391 np = netdev_priv(repr->netdev);
392 np->repr = repr;
393
394 repr->netdev->min_mtu = ETH_MIN_MTU;
395 repr->netdev->max_mtu = ICE_MAX_MTU;
396
397 SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
398
399 return repr;
400
401 err_stats:
402 free_netdev(repr->netdev);
403 err_alloc:
404 kfree(repr);
405 return ERR_PTR(err);
406 }
407
ice_repr_add_vf(struct ice_repr * repr)408 static int ice_repr_add_vf(struct ice_repr *repr)
409 {
410 struct ice_vf *vf = repr->vf;
411 struct devlink *devlink;
412 int err;
413
414 err = ice_devlink_create_vf_port(vf);
415 if (err)
416 return err;
417
418 SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
419 err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
420 if (err)
421 goto err_netdev;
422
423 err = ice_drop_vf_tx_lldp(repr->src_vsi, true);
424 if (err)
425 goto err_drop_lldp;
426
427 err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
428 if (err)
429 goto err_cfg_vsi;
430
431 ice_virtchnl_set_repr_ops(vf);
432
433 devlink = priv_to_devlink(vf->pf);
434 ice_repr_set_tx_topology(vf->pf, devlink);
435
436 return 0;
437
438 err_cfg_vsi:
439 ice_pass_vf_tx_lldp(repr->src_vsi, true);
440 err_drop_lldp:
441 unregister_netdev(repr->netdev);
442 err_netdev:
443 ice_devlink_destroy_vf_port(vf);
444 return err;
445 }
446
447 /**
448 * ice_repr_create_vf - add representor for VF VSI
449 * @vf: VF to create port representor on
450 *
451 * Set correct representor type for VF and functions pointer.
452 *
453 * Return: created port representor on success, error otherwise
454 */
ice_repr_create_vf(struct ice_vf * vf)455 struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
456 {
457 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
458 struct ice_repr *repr;
459
460 if (!vsi)
461 return ERR_PTR(-EINVAL);
462
463 repr = ice_repr_create(vsi);
464 if (IS_ERR(repr))
465 return repr;
466
467 repr->type = ICE_REPR_TYPE_VF;
468 repr->vf = vf;
469 repr->ops.add = ice_repr_add_vf;
470 repr->ops.rem = ice_repr_rem_vf;
471 repr->ops.ready = ice_repr_ready_vf;
472
473 ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
474
475 return repr;
476 }
477
ice_repr_add_sf(struct ice_repr * repr)478 static int ice_repr_add_sf(struct ice_repr *repr)
479 {
480 struct ice_dynamic_port *sf = repr->sf;
481 int err;
482
483 err = ice_devlink_create_sf_port(sf);
484 if (err)
485 return err;
486
487 SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
488 err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
489 if (err)
490 goto err_netdev;
491
492 ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
493
494 return 0;
495
496 err_netdev:
497 ice_devlink_destroy_sf_port(sf);
498 return err;
499 }
500
501 /**
502 * ice_repr_create_sf - add representor for SF VSI
503 * @sf: SF to create port representor on
504 *
505 * Set correct representor type for SF and functions pointer.
506 *
507 * Return: created port representor on success, error otherwise
508 */
ice_repr_create_sf(struct ice_dynamic_port * sf)509 struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
510 {
511 struct ice_repr *repr = ice_repr_create(sf->vsi);
512
513 if (IS_ERR(repr))
514 return repr;
515
516 repr->type = ICE_REPR_TYPE_SF;
517 repr->sf = sf;
518 repr->ops.add = ice_repr_add_sf;
519 repr->ops.rem = ice_repr_rem_sf;
520 repr->ops.ready = ice_repr_ready_sf;
521
522 ether_addr_copy(repr->parent_mac, sf->hw_addr);
523
524 return repr;
525 }
526
ice_repr_get(struct ice_pf * pf,u32 id)527 struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
528 {
529 return xa_load(&pf->eswitch.reprs, id);
530 }
531
532 /**
533 * ice_repr_start_tx_queues - start Tx queues of port representor
534 * @repr: pointer to repr structure
535 */
ice_repr_start_tx_queues(struct ice_repr * repr)536 void ice_repr_start_tx_queues(struct ice_repr *repr)
537 {
538 netif_carrier_on(repr->netdev);
539 netif_tx_start_all_queues(repr->netdev);
540 }
541
542 /**
543 * ice_repr_stop_tx_queues - stop Tx queues of port representor
544 * @repr: pointer to repr structure
545 */
ice_repr_stop_tx_queues(struct ice_repr * repr)546 void ice_repr_stop_tx_queues(struct ice_repr *repr)
547 {
548 netif_carrier_off(repr->netdev);
549 netif_tx_stop_all_queues(repr->netdev);
550 }
551