1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #include <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "bridge.h"
9 #include "esw/bridge.h"
10 #include "en_rep.h"
11
12 #define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
13
14 struct mlx5_bridge_switchdev_fdb_work {
15 struct work_struct work;
16 struct switchdev_notifier_fdb_info fdb_info;
17 struct net_device *dev;
18 struct mlx5_esw_bridge_offloads *br_offloads;
19 bool add;
20 };
21
mlx5_esw_bridge_dev_same_esw(struct net_device * dev,struct mlx5_eswitch * esw)22 static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw)
23 {
24 struct mlx5e_priv *priv = netdev_priv(dev);
25
26 return esw == priv->mdev->priv.eswitch;
27 }
28
mlx5_esw_bridge_dev_same_hw(struct net_device * dev,struct mlx5_eswitch * esw)29 static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw)
30 {
31 struct mlx5e_priv *priv = netdev_priv(dev);
32 struct mlx5_core_dev *mdev, *esw_mdev;
33
34 mdev = priv->mdev;
35 esw_mdev = esw->dev;
36
37 return mlx5_same_hw_devs(mdev, esw_mdev);
38 }
39
40 static struct net_device *
mlx5_esw_bridge_lag_rep_get(struct net_device * dev,struct mlx5_eswitch * esw)41 mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
42 {
43 struct net_device *lower;
44 struct list_head *iter;
45
46 netdev_for_each_lower_dev(dev, lower, iter) {
47 if (!mlx5e_eswitch_rep(lower))
48 continue;
49
50 if (mlx5_esw_bridge_dev_same_esw(lower, esw))
51 return lower;
52 }
53
54 return NULL;
55 }
56
57 static struct net_device *
mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device * dev,struct mlx5_eswitch * esw,u16 * vport_num,u16 * esw_owner_vhca_id)58 mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
59 u16 *vport_num, u16 *esw_owner_vhca_id)
60 {
61 struct mlx5e_rep_priv *rpriv;
62 struct mlx5e_priv *priv;
63
64 if (netif_is_lag_master(dev))
65 dev = mlx5_esw_bridge_lag_rep_get(dev, esw);
66
67 if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw))
68 return NULL;
69
70 priv = netdev_priv(dev);
71
72 if (!priv->mdev->priv.eswitch->br_offloads)
73 return NULL;
74
75 rpriv = priv->ppriv;
76 *vport_num = rpriv->rep->vport;
77 *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
78 return dev;
79 }
80
81 static struct net_device *
mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device * dev,struct mlx5_eswitch * esw,u16 * vport_num,u16 * esw_owner_vhca_id)82 mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
83 u16 *vport_num, u16 *esw_owner_vhca_id)
84 {
85 struct net_device *lower_dev;
86 struct list_head *iter;
87
88 if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev))
89 return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num,
90 esw_owner_vhca_id);
91
92 netdev_for_each_lower_dev(dev, lower_dev, iter) {
93 struct net_device *rep;
94
95 if (netif_is_bridge_master(lower_dev))
96 continue;
97
98 rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num,
99 esw_owner_vhca_id);
100 if (rep)
101 return rep;
102 }
103
104 return NULL;
105 }
106
mlx5_esw_bridge_is_local(struct net_device * dev,struct net_device * rep,struct mlx5_eswitch * esw)107 static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep,
108 struct mlx5_eswitch *esw)
109 {
110 struct mlx5_core_dev *mdev;
111 struct mlx5e_priv *priv;
112
113 if (!mlx5_esw_bridge_dev_same_esw(rep, esw))
114 return false;
115
116 priv = netdev_priv(rep);
117 mdev = priv->mdev;
118 if (netif_is_lag_master(dev))
119 return mlx5_lag_is_master(mdev);
120 return true;
121 }
122
mlx5_esw_bridge_port_changeupper(struct notifier_block * nb,void * ptr)123 static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
124 {
125 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
126 struct mlx5_esw_bridge_offloads,
127 netdev_nb);
128 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
129 struct netdev_notifier_changeupper_info *info = ptr;
130 struct net_device *upper = info->upper_dev, *rep;
131 struct mlx5_eswitch *esw = br_offloads->esw;
132 u16 vport_num, esw_owner_vhca_id;
133 struct netlink_ext_ack *extack;
134 int err = 0;
135
136 if (!netif_is_bridge_master(upper))
137 return 0;
138
139 rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
140 if (!rep)
141 return 0;
142
143 extack = netdev_notifier_info_to_extack(&info->info);
144
145 if (mlx5_esw_bridge_is_local(dev, rep, esw))
146 err = info->linking ?
147 mlx5_esw_bridge_vport_link(upper, vport_num, esw_owner_vhca_id,
148 br_offloads, extack) :
149 mlx5_esw_bridge_vport_unlink(upper, vport_num, esw_owner_vhca_id,
150 br_offloads, extack);
151 else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
152 err = info->linking ?
153 mlx5_esw_bridge_vport_peer_link(upper, vport_num, esw_owner_vhca_id,
154 br_offloads, extack) :
155 mlx5_esw_bridge_vport_peer_unlink(upper, vport_num, esw_owner_vhca_id,
156 br_offloads, extack);
157
158 return err;
159 }
160
161 static int
mlx5_esw_bridge_changeupper_validate_netdev(void * ptr)162 mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
163 {
164 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
165 struct netdev_notifier_changeupper_info *info = ptr;
166 struct net_device *upper = info->upper_dev;
167 struct net_device *lower;
168 struct list_head *iter;
169
170 if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
171 return 0;
172
173 netdev_for_each_lower_dev(dev, lower, iter) {
174 struct mlx5_core_dev *mdev;
175 struct mlx5e_priv *priv;
176
177 if (!mlx5e_eswitch_rep(lower))
178 continue;
179
180 priv = netdev_priv(lower);
181 mdev = priv->mdev;
182 if (!mlx5_lag_is_active(mdev))
183 return -EAGAIN;
184 if (!mlx5_lag_is_shared_fdb(mdev))
185 return -EOPNOTSUPP;
186 }
187
188 return 0;
189 }
190
mlx5_esw_bridge_switchdev_port_event(struct notifier_block * nb,unsigned long event,void * ptr)191 static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
192 unsigned long event, void *ptr)
193 {
194 int err = 0;
195
196 switch (event) {
197 case NETDEV_PRECHANGEUPPER:
198 err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
199 break;
200
201 case NETDEV_CHANGEUPPER:
202 err = mlx5_esw_bridge_port_changeupper(nb, ptr);
203 break;
204 }
205
206 return notifier_from_errno(err);
207 }
208
209 static int
mlx5_esw_bridge_port_obj_add(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,struct mlx5_esw_bridge_offloads * br_offloads)210 mlx5_esw_bridge_port_obj_add(struct net_device *dev,
211 struct switchdev_notifier_port_obj_info *port_obj_info,
212 struct mlx5_esw_bridge_offloads *br_offloads)
213 {
214 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
215 const struct switchdev_obj *obj = port_obj_info->obj;
216 const struct switchdev_obj_port_vlan *vlan;
217 const struct switchdev_obj_port_mdb *mdb;
218 u16 vport_num, esw_owner_vhca_id;
219 int err;
220
221 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
222 &esw_owner_vhca_id))
223 return 0;
224
225 port_obj_info->handled = true;
226
227 switch (obj->id) {
228 case SWITCHDEV_OBJ_ID_PORT_VLAN:
229 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
230 err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
231 vlan->flags, br_offloads, extack);
232 break;
233 case SWITCHDEV_OBJ_ID_PORT_MDB:
234 mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
235 err = mlx5_esw_bridge_port_mdb_add(dev, vport_num, esw_owner_vhca_id, mdb->addr,
236 mdb->vid, br_offloads, extack);
237 break;
238 default:
239 return -EOPNOTSUPP;
240 }
241 return err;
242 }
243
244 static int
mlx5_esw_bridge_port_obj_del(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,struct mlx5_esw_bridge_offloads * br_offloads)245 mlx5_esw_bridge_port_obj_del(struct net_device *dev,
246 struct switchdev_notifier_port_obj_info *port_obj_info,
247 struct mlx5_esw_bridge_offloads *br_offloads)
248 {
249 const struct switchdev_obj *obj = port_obj_info->obj;
250 const struct switchdev_obj_port_vlan *vlan;
251 const struct switchdev_obj_port_mdb *mdb;
252 u16 vport_num, esw_owner_vhca_id;
253
254 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
255 &esw_owner_vhca_id))
256 return 0;
257
258 port_obj_info->handled = true;
259
260 switch (obj->id) {
261 case SWITCHDEV_OBJ_ID_PORT_VLAN:
262 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
263 mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
264 break;
265 case SWITCHDEV_OBJ_ID_PORT_MDB:
266 mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
267 mlx5_esw_bridge_port_mdb_del(dev, vport_num, esw_owner_vhca_id, mdb->addr, mdb->vid,
268 br_offloads);
269 break;
270 default:
271 return -EOPNOTSUPP;
272 }
273 return 0;
274 }
275
276 static int
mlx5_esw_bridge_port_obj_attr_set(struct net_device * dev,struct switchdev_notifier_port_attr_info * port_attr_info,struct mlx5_esw_bridge_offloads * br_offloads)277 mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
278 struct switchdev_notifier_port_attr_info *port_attr_info,
279 struct mlx5_esw_bridge_offloads *br_offloads)
280 {
281 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
282 const struct switchdev_attr *attr = port_attr_info->attr;
283 u16 vport_num, esw_owner_vhca_id;
284 int err = 0;
285
286 if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
287 &esw_owner_vhca_id))
288 return 0;
289
290 port_attr_info->handled = true;
291
292 switch (attr->id) {
293 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
294 if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
295 NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
296 err = -EINVAL;
297 }
298 break;
299 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
300 break;
301 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
302 err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id,
303 attr->u.ageing_time, br_offloads);
304 break;
305 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
306 err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
307 attr->u.vlan_filtering, br_offloads);
308 break;
309 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
310 err = mlx5_esw_bridge_vlan_proto_set(vport_num,
311 esw_owner_vhca_id,
312 attr->u.vlan_protocol,
313 br_offloads);
314 break;
315 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
316 err = mlx5_esw_bridge_mcast_set(vport_num, esw_owner_vhca_id,
317 !attr->u.mc_disabled, br_offloads);
318 break;
319 default:
320 err = -EOPNOTSUPP;
321 }
322
323 return err;
324 }
325
mlx5_esw_bridge_event_blocking(struct notifier_block * nb,unsigned long event,void * ptr)326 static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb,
327 unsigned long event, void *ptr)
328 {
329 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
330 struct mlx5_esw_bridge_offloads,
331 nb_blk);
332 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
333 int err;
334
335 switch (event) {
336 case SWITCHDEV_PORT_OBJ_ADD:
337 err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads);
338 break;
339 case SWITCHDEV_PORT_OBJ_DEL:
340 err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads);
341 break;
342 case SWITCHDEV_PORT_ATTR_SET:
343 err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
344 break;
345 default:
346 err = 0;
347 }
348
349 return notifier_from_errno(err);
350 }
351
352 static void
mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work * fdb_work)353 mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
354 {
355 dev_put(fdb_work->dev);
356 kfree(fdb_work->fdb_info.addr);
357 kfree(fdb_work);
358 }
359
mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct * work)360 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
361 {
362 struct mlx5_bridge_switchdev_fdb_work *fdb_work =
363 container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
364 struct switchdev_notifier_fdb_info *fdb_info =
365 &fdb_work->fdb_info;
366 struct mlx5_esw_bridge_offloads *br_offloads =
367 fdb_work->br_offloads;
368 struct net_device *dev = fdb_work->dev;
369 u16 vport_num, esw_owner_vhca_id;
370
371 rtnl_lock();
372
373 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
374 &esw_owner_vhca_id))
375 goto out;
376
377 if (fdb_work->add)
378 mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads,
379 fdb_info);
380 else
381 mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads,
382 fdb_info);
383
384 out:
385 rtnl_unlock();
386 mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
387 }
388
389 static struct mlx5_bridge_switchdev_fdb_work *
mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device * dev,bool add,struct switchdev_notifier_fdb_info * fdb_info,struct mlx5_esw_bridge_offloads * br_offloads)390 mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
391 struct switchdev_notifier_fdb_info *fdb_info,
392 struct mlx5_esw_bridge_offloads *br_offloads)
393 {
394 struct mlx5_bridge_switchdev_fdb_work *work;
395 u8 *addr;
396
397 work = kzalloc_obj(*work, GFP_ATOMIC);
398 if (!work)
399 return ERR_PTR(-ENOMEM);
400
401 INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
402 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
403
404 addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
405 if (!addr) {
406 kfree(work);
407 return ERR_PTR(-ENOMEM);
408 }
409 ether_addr_copy(addr, fdb_info->addr);
410 work->fdb_info.addr = addr;
411
412 dev_hold(dev);
413 work->dev = dev;
414 work->br_offloads = br_offloads;
415 work->add = add;
416 return work;
417 }
418
mlx5_esw_bridge_switchdev_event(struct notifier_block * nb,unsigned long event,void * ptr)419 static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
420 unsigned long event, void *ptr)
421 {
422 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
423 struct mlx5_esw_bridge_offloads,
424 nb);
425 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
426 struct switchdev_notifier_fdb_info *fdb_info;
427 struct mlx5_bridge_switchdev_fdb_work *work;
428 struct mlx5_eswitch *esw = br_offloads->esw;
429 struct switchdev_notifier_info *info = ptr;
430 u16 vport_num, esw_owner_vhca_id;
431 struct net_device *upper, *rep;
432
433 if (event == SWITCHDEV_PORT_ATTR_SET) {
434 int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
435
436 return notifier_from_errno(err);
437 }
438
439 upper = netdev_master_upper_dev_get_rcu(dev);
440 if (!upper)
441 return NOTIFY_DONE;
442 if (!netif_is_bridge_master(upper))
443 return NOTIFY_DONE;
444
445 rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
446 if (!rep)
447 return NOTIFY_DONE;
448
449 if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
450 return NOTIFY_DONE;
451
452 switch (event) {
453 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
454 fdb_info = container_of(info,
455 struct switchdev_notifier_fdb_info,
456 info);
457 mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads,
458 fdb_info);
459 break;
460 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
461 /* only handle the event on peers */
462 if (mlx5_esw_bridge_is_local(dev, rep, esw))
463 break;
464
465 fdb_info = container_of(info,
466 struct switchdev_notifier_fdb_info,
467 info);
468 /* Mark for deletion to prevent the update wq task from
469 * spuriously refreshing the entry which would mark it again as
470 * offloaded in SW bridge. After this fallthrough to regular
471 * async delete code.
472 */
473 mlx5_esw_bridge_fdb_mark_deleted(dev, vport_num, esw_owner_vhca_id, br_offloads,
474 fdb_info);
475 fallthrough;
476 case SWITCHDEV_FDB_ADD_TO_DEVICE:
477 case SWITCHDEV_FDB_DEL_TO_DEVICE:
478 fdb_info = container_of(info,
479 struct switchdev_notifier_fdb_info,
480 info);
481
482 work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
483 event == SWITCHDEV_FDB_ADD_TO_DEVICE,
484 fdb_info,
485 br_offloads);
486 if (IS_ERR(work)) {
487 WARN_ONCE(1, "Failed to init switchdev work, err=%pe",
488 work);
489 return notifier_from_errno(PTR_ERR(work));
490 }
491
492 queue_work(br_offloads->wq, &work->work);
493 break;
494 default:
495 break;
496 }
497 return NOTIFY_DONE;
498 }
499
mlx5_esw_bridge_update_work(struct work_struct * work)500 static void mlx5_esw_bridge_update_work(struct work_struct *work)
501 {
502 struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
503 struct mlx5_esw_bridge_offloads,
504 update_work.work);
505
506 rtnl_lock();
507 mlx5_esw_bridge_update(br_offloads);
508 rtnl_unlock();
509
510 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
511 msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
512 }
513
mlx5e_rep_bridge_init(struct mlx5e_priv * priv)514 void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
515 {
516 struct mlx5_esw_bridge_offloads *br_offloads;
517 struct mlx5_core_dev *mdev = priv->mdev;
518 struct mlx5_eswitch *esw =
519 mdev->priv.eswitch;
520 int err;
521
522 rtnl_lock();
523 br_offloads = mlx5_esw_bridge_init(esw);
524 rtnl_unlock();
525 if (IS_ERR(br_offloads)) {
526 esw_warn(mdev, "Failed to init esw bridge (err=%pe)\n",
527 br_offloads);
528 return;
529 }
530
531 br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
532 if (!br_offloads->wq) {
533 esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
534 goto err_alloc_wq;
535 }
536
537 br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
538 err = register_switchdev_notifier(&br_offloads->nb);
539 if (err) {
540 esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
541 goto err_register_swdev;
542 }
543
544 br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
545 err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
546 if (err) {
547 esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
548 goto err_register_swdev_blk;
549 }
550
551 br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
552 err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
553 if (err) {
554 esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
555 err);
556 goto err_register_netdev;
557 }
558 INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
559 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
560 msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
561 return;
562
563 err_register_netdev:
564 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
565 err_register_swdev_blk:
566 unregister_switchdev_notifier(&br_offloads->nb);
567 err_register_swdev:
568 destroy_workqueue(br_offloads->wq);
569 err_alloc_wq:
570 rtnl_lock();
571 mlx5_esw_bridge_cleanup(esw);
572 rtnl_unlock();
573 }
574
mlx5e_rep_bridge_cleanup(struct mlx5e_priv * priv)575 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
576 {
577 struct mlx5_esw_bridge_offloads *br_offloads;
578 struct mlx5_core_dev *mdev = priv->mdev;
579 struct mlx5_eswitch *esw =
580 mdev->priv.eswitch;
581
582 br_offloads = esw->br_offloads;
583 if (!br_offloads)
584 return;
585
586 cancel_delayed_work_sync(&br_offloads->update_work);
587 unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
588 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
589 unregister_switchdev_notifier(&br_offloads->nb);
590 destroy_workqueue(br_offloads->wq);
591 rtnl_lock();
592 mlx5_esw_bridge_cleanup(esw);
593 rtnl_unlock();
594 }
595