xref: /linux/drivers/net/ethernet/intel/ice/ice_eswitch.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_eswitch.h"
7 #include "ice_eswitch_br.h"
8 #include "ice_fltr.h"
9 #include "ice_repr.h"
10 #include "devlink/devlink.h"
11 #include "ice_tc_lib.h"
12 
13 /**
14  * ice_eswitch_setup_env - configure eswitch HW filters
15  * @pf: pointer to PF struct
16  *
17  * This function adds HW filters configuration specific for switchdev
18  * mode.
19  */
20 static int ice_eswitch_setup_env(struct ice_pf *pf)
21 {
22 	struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
23 	struct net_device *netdev = uplink_vsi->netdev;
24 	bool if_running = netif_running(netdev);
25 	struct ice_vsi_vlan_ops *vlan_ops;
26 
27 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state))
28 		if (ice_down(uplink_vsi))
29 			return -ENODEV;
30 
31 	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
32 
33 	netif_addr_lock_bh(netdev);
34 	__dev_uc_unsync(netdev, NULL);
35 	__dev_mc_unsync(netdev, NULL);
36 	netif_addr_unlock_bh(netdev);
37 
38 	if (ice_vsi_add_vlan_zero(uplink_vsi))
39 		goto err_vlan_zero;
40 
41 	if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
42 			     ICE_FLTR_RX))
43 		goto err_def_rx;
44 
45 	if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
46 			     ICE_FLTR_TX))
47 		goto err_def_tx;
48 
49 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
50 	if (vlan_ops->dis_rx_filtering(uplink_vsi))
51 		goto err_vlan_filtering;
52 
53 	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
54 		goto err_override_uplink;
55 
56 	if (ice_vsi_update_local_lb(uplink_vsi, true))
57 		goto err_override_local_lb;
58 
59 	if (if_running && ice_up(uplink_vsi))
60 		goto err_up;
61 
62 	return 0;
63 
64 err_up:
65 	ice_vsi_update_local_lb(uplink_vsi, false);
66 err_override_local_lb:
67 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
68 err_override_uplink:
69 	vlan_ops->ena_rx_filtering(uplink_vsi);
70 err_vlan_filtering:
71 	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
72 			 ICE_FLTR_TX);
73 err_def_tx:
74 	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
75 			 ICE_FLTR_RX);
76 err_def_rx:
77 	ice_vsi_del_vlan_zero(uplink_vsi);
78 err_vlan_zero:
79 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
80 				       uplink_vsi->port_info->mac.perm_addr,
81 				       ICE_FWD_TO_VSI);
82 	if (if_running)
83 		ice_up(uplink_vsi);
84 
85 	return -ENODEV;
86 }
87 
88 /**
89  * ice_eswitch_release_repr - clear PR VSI configuration
90  * @pf: poiner to PF struct
91  * @repr: pointer to PR
92  */
93 static void
94 ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
95 {
96 	struct ice_vsi *vsi = repr->src_vsi;
97 
98 	/* Skip representors that aren't configured */
99 	if (!repr->dst)
100 		return;
101 
102 	ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
103 	metadata_dst_free(repr->dst);
104 	repr->dst = NULL;
105 	ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
106 				       ICE_FWD_TO_VSI);
107 }
108 
109 /**
110  * ice_eswitch_setup_repr - configure PR to run in switchdev mode
111  * @pf: pointer to PF struct
112  * @repr: pointer to PR struct
113  */
114 static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
115 {
116 	struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
117 	struct ice_vsi *vsi = repr->src_vsi;
118 	struct metadata_dst *dst;
119 
120 	ice_remove_vsi_fltr(&pf->hw, vsi->idx);
121 	repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
122 				       GFP_KERNEL);
123 	if (!repr->dst)
124 		goto err_add_mac_fltr;
125 
126 	if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
127 		goto err_dst_free;
128 
129 	if (ice_vsi_add_vlan_zero(vsi))
130 		goto err_update_security;
131 
132 	netif_keep_dst(uplink_vsi->netdev);
133 
134 	dst = repr->dst;
135 	dst->u.port_info.port_id = vsi->vsi_num;
136 	dst->u.port_info.lower_dev = uplink_vsi->netdev;
137 
138 	return 0;
139 
140 err_update_security:
141 	ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
142 err_dst_free:
143 	metadata_dst_free(repr->dst);
144 	repr->dst = NULL;
145 err_add_mac_fltr:
146 	ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
147 
148 	return -ENODEV;
149 }
150 
151 /**
152  * ice_eswitch_update_repr - reconfigure port representor
153  * @repr_id: representor ID
154  * @vsi: VSI for which port representor is configured
155  */
156 void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
157 {
158 	struct ice_pf *pf = vsi->back;
159 	struct ice_repr *repr;
160 	int ret;
161 
162 	if (!ice_is_switchdev_running(pf))
163 		return;
164 
165 	repr = xa_load(&pf->eswitch.reprs, repr_id);
166 	if (!repr)
167 		return;
168 
169 	repr->src_vsi = vsi;
170 	repr->dst->u.port_info.port_id = vsi->vsi_num;
171 
172 	if (repr->br_port)
173 		repr->br_port->vsi = vsi;
174 
175 	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
176 	if (ret) {
177 		ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
178 					       ICE_FWD_TO_VSI);
179 		dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
180 			repr->id);
181 	}
182 }
183 
184 /**
185  * ice_eswitch_port_start_xmit - callback for packets transmit
186  * @skb: send buffer
187  * @netdev: network interface device structure
188  *
189  * Returns NETDEV_TX_OK if sent, else an error code
190  */
191 netdev_tx_t
192 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
193 {
194 	struct ice_repr *repr = ice_netdev_to_repr(netdev);
195 	unsigned int len = skb->len;
196 	int ret;
197 
198 	skb_dst_drop(skb);
199 	dst_hold((struct dst_entry *)repr->dst);
200 	skb_dst_set(skb, (struct dst_entry *)repr->dst);
201 	skb->dev = repr->dst->u.port_info.lower_dev;
202 
203 	ret = dev_queue_xmit(skb);
204 	ice_repr_inc_tx_stats(repr, len, ret);
205 
206 	return ret;
207 }
208 
209 /**
210  * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
211  * @skb: pointer to send buffer
212  * @off: pointer to offload struct
213  */
214 void
215 ice_eswitch_set_target_vsi(struct sk_buff *skb,
216 			   struct ice_tx_offload_params *off)
217 {
218 	struct metadata_dst *dst = skb_metadata_dst(skb);
219 	u64 cd_cmd, dst_vsi;
220 
221 	if (!dst) {
222 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
223 		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
224 	} else {
225 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
226 		dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M,
227 				     dst->u.port_info.port_id);
228 		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
229 	}
230 }
231 
232 /**
233  * ice_eswitch_release_env - clear eswitch HW filters
234  * @pf: pointer to PF struct
235  *
236  * This function removes HW filters configuration specific for switchdev
237  * mode and restores default legacy mode settings.
238  */
239 static void ice_eswitch_release_env(struct ice_pf *pf)
240 {
241 	struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
242 	struct ice_vsi_vlan_ops *vlan_ops;
243 
244 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
245 
246 	ice_vsi_update_local_lb(uplink_vsi, false);
247 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
248 	vlan_ops->ena_rx_filtering(uplink_vsi);
249 	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
250 			 ICE_FLTR_TX);
251 	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
252 			 ICE_FLTR_RX);
253 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
254 				       uplink_vsi->port_info->mac.perm_addr,
255 				       ICE_FWD_TO_VSI);
256 }
257 
258 /**
259  * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
260  * @pf: pointer to PF structure
261  */
262 static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
263 {
264 	struct ice_vsi *uplink_vsi;
265 
266 	uplink_vsi = ice_get_main_vsi(pf);
267 	if (!uplink_vsi)
268 		return -ENODEV;
269 
270 	if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
271 		dev_err(ice_pf_to_dev(pf),
272 			"Uplink port cannot be a bridge port\n");
273 		return -EINVAL;
274 	}
275 
276 	pf->eswitch.uplink_vsi = uplink_vsi;
277 
278 	if (ice_eswitch_setup_env(pf))
279 		return -ENODEV;
280 
281 	if (ice_eswitch_br_offloads_init(pf))
282 		goto err_br_offloads;
283 
284 	pf->eswitch.is_running = true;
285 
286 	return 0;
287 
288 err_br_offloads:
289 	ice_eswitch_release_env(pf);
290 	return -ENODEV;
291 }
292 
293 /**
294  * ice_eswitch_disable_switchdev - disable eswitch resources
295  * @pf: pointer to PF structure
296  */
297 static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
298 {
299 	ice_eswitch_br_offloads_deinit(pf);
300 	ice_eswitch_release_env(pf);
301 
302 	pf->eswitch.is_running = false;
303 }
304 
305 /**
306  * ice_eswitch_mode_set - set new eswitch mode
307  * @devlink: pointer to devlink structure
308  * @mode: eswitch mode to switch to
309  * @extack: pointer to extack structure
310  */
311 int
312 ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
313 		     struct netlink_ext_ack *extack)
314 {
315 	struct ice_pf *pf = devlink_priv(devlink);
316 
317 	if (pf->eswitch_mode == mode)
318 		return 0;
319 
320 	if (ice_has_vfs(pf)) {
321 		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
322 		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
323 		return -EOPNOTSUPP;
324 	}
325 
326 	switch (mode) {
327 	case DEVLINK_ESWITCH_MODE_LEGACY:
328 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
329 			 pf->hw.pf_id);
330 		xa_destroy(&pf->eswitch.reprs);
331 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
332 		break;
333 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
334 	{
335 		if (ice_is_adq_active(pf)) {
336 			dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
337 			NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
338 			return -EOPNOTSUPP;
339 		}
340 
341 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
342 			 pf->hw.pf_id);
343 		xa_init(&pf->eswitch.reprs);
344 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
345 		break;
346 	}
347 	default:
348 		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
349 		return -EINVAL;
350 	}
351 
352 	pf->eswitch_mode = mode;
353 	return 0;
354 }
355 
356 /**
357  * ice_eswitch_mode_get - get current eswitch mode
358  * @devlink: pointer to devlink structure
359  * @mode: output parameter for current eswitch mode
360  */
361 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
362 {
363 	struct ice_pf *pf = devlink_priv(devlink);
364 
365 	*mode = pf->eswitch_mode;
366 	return 0;
367 }
368 
369 /**
370  * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
371  * @pf: pointer to PF structure
372  *
373  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
374  * false otherwise.
375  */
376 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
377 {
378 	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
379 }
380 
381 /**
382  * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
383  * @pf: pointer to PF structure
384  */
385 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
386 {
387 	struct ice_repr *repr;
388 	unsigned long id;
389 
390 	if (test_bit(ICE_DOWN, pf->state))
391 		return;
392 
393 	xa_for_each(&pf->eswitch.reprs, id, repr)
394 		ice_repr_start_tx_queues(repr);
395 }
396 
397 /**
398  * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
399  * @pf: pointer to PF structure
400  */
401 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
402 {
403 	struct ice_repr *repr;
404 	unsigned long id;
405 
406 	if (test_bit(ICE_DOWN, pf->state))
407 		return;
408 
409 	xa_for_each(&pf->eswitch.reprs, id, repr)
410 		ice_repr_stop_tx_queues(repr);
411 }
412 
413 static void ice_eswitch_stop_reprs(struct ice_pf *pf)
414 {
415 	ice_eswitch_stop_all_tx_queues(pf);
416 }
417 
418 static void ice_eswitch_start_reprs(struct ice_pf *pf)
419 {
420 	ice_eswitch_start_all_tx_queues(pf);
421 }
422 
423 int
424 ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
425 {
426 	struct ice_repr *repr;
427 	int err;
428 
429 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
430 		return 0;
431 
432 	if (xa_empty(&pf->eswitch.reprs)) {
433 		err = ice_eswitch_enable_switchdev(pf);
434 		if (err)
435 			return err;
436 	}
437 
438 	ice_eswitch_stop_reprs(pf);
439 
440 	repr = ice_repr_add_vf(vf);
441 	if (IS_ERR(repr)) {
442 		err = PTR_ERR(repr);
443 		goto err_create_repr;
444 	}
445 
446 	err = ice_eswitch_setup_repr(pf, repr);
447 	if (err)
448 		goto err_setup_repr;
449 
450 	err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL);
451 	if (err)
452 		goto err_xa_alloc;
453 
454 	vf->repr_id = repr->id;
455 
456 	ice_eswitch_start_reprs(pf);
457 
458 	return 0;
459 
460 err_xa_alloc:
461 	ice_eswitch_release_repr(pf, repr);
462 err_setup_repr:
463 	ice_repr_rem_vf(repr);
464 err_create_repr:
465 	if (xa_empty(&pf->eswitch.reprs))
466 		ice_eswitch_disable_switchdev(pf);
467 	ice_eswitch_start_reprs(pf);
468 
469 	return err;
470 }
471 
472 void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
473 {
474 	struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
475 	struct devlink *devlink = priv_to_devlink(pf);
476 
477 	if (!repr)
478 		return;
479 
480 	ice_eswitch_stop_reprs(pf);
481 	xa_erase(&pf->eswitch.reprs, repr->id);
482 
483 	if (xa_empty(&pf->eswitch.reprs))
484 		ice_eswitch_disable_switchdev(pf);
485 
486 	ice_eswitch_release_repr(pf, repr);
487 	ice_repr_rem_vf(repr);
488 
489 	if (xa_empty(&pf->eswitch.reprs)) {
490 		/* since all port representors are destroyed, there is
491 		 * no point in keeping the nodes
492 		 */
493 		ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
494 		devl_lock(devlink);
495 		devl_rate_nodes_destroy(devlink);
496 		devl_unlock(devlink);
497 	} else {
498 		ice_eswitch_start_reprs(pf);
499 	}
500 }
501 
502 /**
503  * ice_eswitch_rebuild - rebuild eswitch
504  * @pf: pointer to PF structure
505  */
506 void ice_eswitch_rebuild(struct ice_pf *pf)
507 {
508 	struct ice_repr *repr;
509 	unsigned long id;
510 
511 	if (!ice_is_switchdev_running(pf))
512 		return;
513 
514 	xa_for_each(&pf->eswitch.reprs, id, repr)
515 		ice_eswitch_detach(pf, repr->vf);
516 }
517 
518 /**
519  * ice_eswitch_get_target - get netdev based on src_vsi from descriptor
520  * @rx_ring: ring used to receive the packet
521  * @rx_desc: descriptor used to get src_vsi value
522  *
523  * Get src_vsi value from descriptor and load correct representor. If it isn't
524  * found return rx_ring->netdev.
525  */
526 struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
527 					  union ice_32b_rx_flex_desc *rx_desc)
528 {
529 	struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch;
530 	struct ice_32b_rx_flex_desc_nic_2 *desc;
531 	struct ice_repr *repr;
532 
533 	desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
534 	repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi));
535 	if (!repr)
536 		return rx_ring->netdev;
537 
538 	return repr->netdev;
539 }
540