xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c (revision 1b294a1f35616977caddaddf3e9d28e576a1adbc)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include <net/nexthop.h>
6 #include "lag/lag.h"
7 #include "eswitch.h"
8 #include "esw/acl/ofld.h"
9 #include "lib/events.h"
10 
mlx5_mpesw_metadata_cleanup(struct mlx5_lag * ldev)11 static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
12 {
13 	struct mlx5_core_dev *dev;
14 	struct mlx5_eswitch *esw;
15 	u32 pf_metadata;
16 	int i;
17 
18 	for (i = 0; i < ldev->ports; i++) {
19 		dev = ldev->pf[i].dev;
20 		esw = dev->priv.eswitch;
21 		pf_metadata = ldev->lag_mpesw.pf_metadata[i];
22 		if (!pf_metadata)
23 			continue;
24 		mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK, 0);
25 		mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
26 					 (void *)0);
27 		mlx5_esw_match_metadata_free(esw, pf_metadata);
28 		ldev->lag_mpesw.pf_metadata[i] = 0;
29 	}
30 }
31 
mlx5_mpesw_metadata_set(struct mlx5_lag * ldev)32 static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
33 {
34 	struct mlx5_core_dev *dev;
35 	struct mlx5_eswitch *esw;
36 	u32 pf_metadata;
37 	int i, err;
38 
39 	for (i = 0; i < ldev->ports; i++) {
40 		dev = ldev->pf[i].dev;
41 		esw = dev->priv.eswitch;
42 		pf_metadata = mlx5_esw_match_metadata_alloc(esw);
43 		if (!pf_metadata) {
44 			err = -ENOSPC;
45 			goto err_metadata;
46 		}
47 
48 		ldev->lag_mpesw.pf_metadata[i] = pf_metadata;
49 		err = mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK,
50 								 pf_metadata);
51 		if (err)
52 			goto err_metadata;
53 	}
54 
55 	for (i = 0; i < ldev->ports; i++) {
56 		dev = ldev->pf[i].dev;
57 		mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
58 					 (void *)0);
59 	}
60 
61 	return 0;
62 
63 err_metadata:
64 	mlx5_mpesw_metadata_cleanup(ldev);
65 	return err;
66 }
67 
68 #define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
enable_mpesw(struct mlx5_lag * ldev)69 static int enable_mpesw(struct mlx5_lag *ldev)
70 {
71 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
72 	int err;
73 	int i;
74 
75 	if (ldev->mode != MLX5_LAG_MODE_NONE)
76 		return -EINVAL;
77 
78 	if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
79 		return -EOPNOTSUPP;
80 
81 	if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
82 	    !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
83 	    !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
84 	    !mlx5_lag_check_prereq(ldev))
85 		return -EOPNOTSUPP;
86 
87 	err = mlx5_mpesw_metadata_set(ldev);
88 	if (err)
89 		return err;
90 
91 	mlx5_lag_remove_devices(ldev);
92 
93 	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, true);
94 	if (err) {
95 		mlx5_core_warn(dev0, "Failed to create LAG in MPESW mode (%d)\n", err);
96 		goto err_add_devices;
97 	}
98 
99 	dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
100 	mlx5_rescan_drivers_locked(dev0);
101 	for (i = 0; i < ldev->ports; i++) {
102 		err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
103 		if (err)
104 			goto err_rescan_drivers;
105 	}
106 
107 	return 0;
108 
109 err_rescan_drivers:
110 	dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
111 	mlx5_rescan_drivers_locked(dev0);
112 	mlx5_deactivate_lag(ldev);
113 err_add_devices:
114 	mlx5_lag_add_devices(ldev);
115 	for (i = 0; i < ldev->ports; i++)
116 		mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
117 	mlx5_mpesw_metadata_cleanup(ldev);
118 	return err;
119 }
120 
disable_mpesw(struct mlx5_lag * ldev)121 static void disable_mpesw(struct mlx5_lag *ldev)
122 {
123 	if (ldev->mode == MLX5_LAG_MODE_MPESW) {
124 		mlx5_mpesw_metadata_cleanup(ldev);
125 		mlx5_disable_lag(ldev);
126 	}
127 }
128 
mlx5_mpesw_work(struct work_struct * work)129 static void mlx5_mpesw_work(struct work_struct *work)
130 {
131 	struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
132 	struct mlx5_devcom_comp_dev *devcom;
133 	struct mlx5_lag *ldev = mpesww->lag;
134 
135 	devcom = mlx5_lag_get_devcom_comp(ldev);
136 	if (!devcom)
137 		return;
138 
139 	mlx5_devcom_comp_lock(devcom);
140 	mutex_lock(&ldev->lock);
141 	if (ldev->mode_changes_in_progress) {
142 		mpesww->result = -EAGAIN;
143 		goto unlock;
144 	}
145 
146 	if (mpesww->op == MLX5_MPESW_OP_ENABLE)
147 		mpesww->result = enable_mpesw(ldev);
148 	else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
149 		disable_mpesw(ldev);
150 unlock:
151 	mutex_unlock(&ldev->lock);
152 	mlx5_devcom_comp_unlock(devcom);
153 	complete(&mpesww->comp);
154 }
155 
mlx5_lag_mpesw_queue_work(struct mlx5_core_dev * dev,enum mpesw_op op)156 static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
157 				     enum mpesw_op op)
158 {
159 	struct mlx5_lag *ldev = mlx5_lag_dev(dev);
160 	struct mlx5_mpesw_work_st *work;
161 	int err = 0;
162 
163 	if (!ldev)
164 		return 0;
165 
166 	work = kzalloc(sizeof(*work), GFP_KERNEL);
167 	if (!work)
168 		return -ENOMEM;
169 
170 	INIT_WORK(&work->work, mlx5_mpesw_work);
171 	init_completion(&work->comp);
172 	work->op = op;
173 	work->lag = ldev;
174 
175 	if (!queue_work(ldev->wq, &work->work)) {
176 		mlx5_core_warn(dev, "failed to queue mpesw work\n");
177 		err = -EINVAL;
178 		goto out;
179 	}
180 	wait_for_completion(&work->comp);
181 	err = work->result;
182 out:
183 	kfree(work);
184 	return err;
185 }
186 
mlx5_lag_mpesw_disable(struct mlx5_core_dev * dev)187 void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev)
188 {
189 	mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
190 }
191 
mlx5_lag_mpesw_enable(struct mlx5_core_dev * dev)192 int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev)
193 {
194 	return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
195 }
196 
mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev * mdev,struct net_device * out_dev,struct netlink_ext_ack * extack)197 int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
198 			     struct net_device *out_dev,
199 			     struct netlink_ext_ack *extack)
200 {
201 	struct mlx5_lag *ldev = mlx5_lag_dev(mdev);
202 
203 	if (!netif_is_bond_master(out_dev) || !ldev)
204 		return 0;
205 
206 	if (ldev->mode != MLX5_LAG_MODE_MPESW)
207 		return 0;
208 
209 	NL_SET_ERR_MSG_MOD(extack, "can't forward to bond in mpesw mode");
210 	return -EOPNOTSUPP;
211 }
212 
mlx5_lag_is_mpesw(struct mlx5_core_dev * dev)213 bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
214 {
215 	struct mlx5_lag *ldev = mlx5_lag_dev(dev);
216 
217 	return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
218 }
219 EXPORT_SYMBOL(mlx5_lag_is_mpesw);
220