1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include <linux/netdevice.h>
5 #include <net/nexthop.h>
6 #include "lag/lag.h"
7 #include "eswitch.h"
8 #include "esw/acl/ofld.h"
9 #include "lib/events.h"
10
mlx5_mpesw_metadata_cleanup(struct mlx5_lag * ldev)11 static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
12 {
13 struct mlx5_core_dev *dev;
14 struct mlx5_eswitch *esw;
15 u32 pf_metadata;
16 int i;
17
18 mlx5_ldev_for_each(i, 0, ldev) {
19 dev = mlx5_lag_pf(ldev, i)->dev;
20 esw = dev->priv.eswitch;
21 pf_metadata = ldev->lag_mpesw.pf_metadata[i];
22 if (!pf_metadata)
23 continue;
24 mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK, 0);
25 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
26 (void *)0);
27 mlx5_esw_match_metadata_free(esw, pf_metadata);
28 ldev->lag_mpesw.pf_metadata[i] = 0;
29 }
30 }
31
mlx5_mpesw_metadata_set(struct mlx5_lag * ldev)32 static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
33 {
34 struct mlx5_core_dev *dev;
35 struct mlx5_eswitch *esw;
36 u32 pf_metadata;
37 int i, err;
38
39 mlx5_ldev_for_each(i, 0, ldev) {
40 dev = mlx5_lag_pf(ldev, i)->dev;
41 esw = dev->priv.eswitch;
42 pf_metadata = mlx5_esw_match_metadata_alloc(esw);
43 if (!pf_metadata) {
44 err = -ENOSPC;
45 goto err_metadata;
46 }
47
48 ldev->lag_mpesw.pf_metadata[i] = pf_metadata;
49 err = mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK,
50 pf_metadata);
51 if (err)
52 goto err_metadata;
53 }
54
55 mlx5_ldev_for_each(i, 0, ldev) {
56 dev = mlx5_lag_pf(ldev, i)->dev;
57 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
58 (void *)0);
59 }
60
61 return 0;
62
63 err_metadata:
64 mlx5_mpesw_metadata_cleanup(ldev);
65 return err;
66 }
67
mlx5_lag_enable_mpesw(struct mlx5_lag * ldev)68 static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)
69 {
70 int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
71 struct mlx5_core_dev *dev0;
72 int err;
73 int i;
74
75 if (ldev->mode == MLX5_LAG_MODE_MPESW)
76 return 0;
77
78 if (ldev->mode != MLX5_LAG_MODE_NONE)
79 return -EINVAL;
80
81 if (idx < 0)
82 return -EINVAL;
83
84 dev0 = mlx5_lag_pf(ldev, idx)->dev;
85 if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
86 !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
87 !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
88 !mlx5_lag_check_prereq(ldev) ||
89 !mlx5_lag_shared_fdb_supported(ldev))
90 return -EOPNOTSUPP;
91
92 err = mlx5_mpesw_metadata_set(ldev);
93 if (err)
94 return err;
95
96 mlx5_lag_remove_devices(ldev);
97
98 err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, true);
99 if (err) {
100 mlx5_core_warn(dev0, "Failed to create LAG in MPESW mode (%d)\n", err);
101 goto err_add_devices;
102 }
103
104 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
105 mlx5_rescan_drivers_locked(dev0);
106 mlx5_ldev_for_each(i, 0, ldev) {
107 err = mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
108 if (err)
109 goto err_rescan_drivers;
110 }
111
112 mlx5_lag_set_vports_agg_speed(ldev);
113
114 return 0;
115
116 err_rescan_drivers:
117 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
118 mlx5_rescan_drivers_locked(dev0);
119 mlx5_deactivate_lag(ldev);
120 err_add_devices:
121 mlx5_lag_add_devices(ldev);
122 mlx5_ldev_for_each(i, 0, ldev)
123 mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
124 mlx5_mpesw_metadata_cleanup(ldev);
125 return err;
126 }
127
mlx5_lag_disable_mpesw(struct mlx5_lag * ldev)128 void mlx5_lag_disable_mpesw(struct mlx5_lag *ldev)
129 {
130 if (ldev->mode == MLX5_LAG_MODE_MPESW) {
131 mlx5_mpesw_metadata_cleanup(ldev);
132 mlx5_disable_lag(ldev);
133 }
134 }
135
mlx5_mpesw_work(struct work_struct * work)136 static void mlx5_mpesw_work(struct work_struct *work)
137 {
138 struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
139 struct mlx5_devcom_comp_dev *devcom;
140 struct mlx5_lag *ldev = mpesww->lag;
141
142 devcom = mlx5_lag_get_devcom_comp(ldev);
143 if (!devcom)
144 return;
145
146 mlx5_devcom_comp_lock(devcom);
147 mutex_lock(&ldev->lock);
148 if (ldev->mode_changes_in_progress) {
149 mpesww->result = -EAGAIN;
150 goto unlock;
151 }
152
153 if (mpesww->op == MLX5_MPESW_OP_ENABLE)
154 mpesww->result = mlx5_lag_enable_mpesw(ldev);
155 else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
156 mlx5_lag_disable_mpesw(ldev);
157 unlock:
158 mutex_unlock(&ldev->lock);
159 mlx5_devcom_comp_unlock(devcom);
160 complete(&mpesww->comp);
161 }
162
mlx5_lag_mpesw_queue_work(struct mlx5_core_dev * dev,enum mpesw_op op)163 static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
164 enum mpesw_op op)
165 {
166 struct mlx5_lag *ldev = mlx5_lag_dev(dev);
167 struct mlx5_mpesw_work_st *work;
168 int err = 0;
169
170 if (!ldev)
171 return 0;
172
173 work = kzalloc_obj(*work);
174 if (!work)
175 return -ENOMEM;
176
177 INIT_WORK(&work->work, mlx5_mpesw_work);
178 init_completion(&work->comp);
179 work->op = op;
180 work->lag = ldev;
181
182 if (!queue_work(ldev->wq, &work->work)) {
183 mlx5_core_warn(dev, "failed to queue mpesw work\n");
184 err = -EINVAL;
185 goto out;
186 }
187 wait_for_completion(&work->comp);
188 err = work->result;
189 out:
190 kfree(work);
191 return err;
192 }
193
mlx5_lag_mpesw_disable(struct mlx5_core_dev * dev)194 void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev)
195 {
196 mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
197 }
198
mlx5_lag_mpesw_enable(struct mlx5_core_dev * dev)199 int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev)
200 {
201 return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
202 }
203
mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev * mdev,struct net_device * out_dev,struct netlink_ext_ack * extack)204 int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
205 struct net_device *out_dev,
206 struct netlink_ext_ack *extack)
207 {
208 struct mlx5_lag *ldev = mlx5_lag_dev(mdev);
209
210 if (!netif_is_bond_master(out_dev) || !ldev)
211 return 0;
212
213 if (ldev->mode != MLX5_LAG_MODE_MPESW)
214 return 0;
215
216 NL_SET_ERR_MSG_MOD(extack, "can't forward to bond in mpesw mode");
217 return -EOPNOTSUPP;
218 }
219
mlx5_lag_is_mpesw(struct mlx5_core_dev * dev)220 bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
221 {
222 struct mlx5_lag *ldev = mlx5_lag_dev(dev);
223
224 return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
225 }
226 EXPORT_SYMBOL(mlx5_lag_is_mpesw);
227
mlx5_mpesw_speed_update_work(struct work_struct * work)228 void mlx5_mpesw_speed_update_work(struct work_struct *work)
229 {
230 struct mlx5_lag *ldev = container_of(work, struct mlx5_lag,
231 speed_update_work);
232
233 mutex_lock(&ldev->lock);
234 if (ldev->mode == MLX5_LAG_MODE_MPESW) {
235 if (ldev->mode_changes_in_progress)
236 queue_work(ldev->wq, &ldev->speed_update_work);
237 else
238 mlx5_lag_set_vports_agg_speed(ldev);
239 }
240
241 mutex_unlock(&ldev->lock);
242 }
243
mlx5_lag_mpesw_port_change_event(struct notifier_block * nb,unsigned long event,void * data)244 int mlx5_lag_mpesw_port_change_event(struct notifier_block *nb,
245 unsigned long event, void *data)
246 {
247 struct mlx5_nb *mlx5_nb = container_of(nb, struct mlx5_nb, nb);
248 struct lag_func *lag_func = container_of(mlx5_nb,
249 struct lag_func,
250 port_change_nb);
251 struct mlx5_core_dev *dev = lag_func->dev;
252 struct mlx5_lag *ldev = dev->priv.lag;
253 struct mlx5_eqe *eqe = data;
254
255 if (!ldev)
256 return NOTIFY_DONE;
257
258 if (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_DOWN ||
259 eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE)
260 queue_work(ldev->wq, &ldev->speed_update_work);
261
262 return NOTIFY_OK;
263 }
264