1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/netdevice.h>
34 #include <net/bonding.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/eswitch.h>
37 #include <linux/mlx5/vport.h>
38 #include "lib/devcom.h"
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41 #include "esw/acl/ofld.h"
42 #include "lag.h"
43 #include "mp.h"
44 #include "mpesw.h"
45
46 enum {
47 MLX5_LAG_EGRESS_PORT_1 = 1,
48 MLX5_LAG_EGRESS_PORT_2,
49 };
50
51 /* General purpose, use for short periods of time.
52 * Beware of lock dependencies (preferably, no locks should be acquired
53 * under it).
54 */
55 static DEFINE_SPINLOCK(lag_lock);
56
get_port_sel_mode(enum mlx5_lag_mode mode,unsigned long flags)57 static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
58 {
59 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
60 return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT;
61
62 if (mode == MLX5_LAG_MODE_MPESW)
63 return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW;
64
65 return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY;
66 }
67
lag_active_port_bits(struct mlx5_lag * ldev)68 static u8 lag_active_port_bits(struct mlx5_lag *ldev)
69 {
70 u8 enabled_ports[MLX5_MAX_PORTS] = {};
71 u8 active_port = 0;
72 int num_enabled;
73 int idx;
74
75 mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
76 &num_enabled);
77 for (idx = 0; idx < num_enabled; idx++)
78 active_port |= BIT_MASK(enabled_ports[idx]);
79
80 return active_port;
81 }
82
mlx5_cmd_create_lag(struct mlx5_core_dev * dev,u8 * ports,int mode,unsigned long flags)83 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
84 unsigned long flags)
85 {
86 bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
87 &flags);
88 int port_sel_mode = get_port_sel_mode(mode, flags);
89 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
90 void *lag_ctx;
91
92 lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
93 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
94 MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
95
96 switch (port_sel_mode) {
97 case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
98 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
99 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
100 break;
101 case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
102 if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
103 break;
104
105 MLX5_SET(lagc, lag_ctx, active_port,
106 lag_active_port_bits(mlx5_lag_dev(dev)));
107 break;
108 default:
109 break;
110 }
111 MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode);
112
113 return mlx5_cmd_exec_in(dev, create_lag, in);
114 }
115
mlx5_cmd_modify_lag(struct mlx5_core_dev * dev,u8 num_ports,u8 * ports)116 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
117 u8 *ports)
118 {
119 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
120 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
121
122 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
123 MLX5_SET(modify_lag_in, in, field_select, 0x1);
124
125 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
126 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
127
128 return mlx5_cmd_exec_in(dev, modify_lag, in);
129 }
130
mlx5_cmd_create_vport_lag(struct mlx5_core_dev * dev)131 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
132 {
133 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
134
135 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
136
137 return mlx5_cmd_exec_in(dev, create_vport_lag, in);
138 }
139 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
140
mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev * dev)141 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
142 {
143 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
144
145 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
146
147 return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
148 }
149 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
150
mlx5_infer_tx_disabled(struct lag_tracker * tracker,u8 num_ports,u8 * ports,int * num_disabled)151 static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
152 u8 *ports, int *num_disabled)
153 {
154 int i;
155
156 *num_disabled = 0;
157 for (i = 0; i < num_ports; i++) {
158 if (!tracker->netdev_state[i].tx_enabled ||
159 !tracker->netdev_state[i].link_up)
160 ports[(*num_disabled)++] = i;
161 }
162 }
163
mlx5_infer_tx_enabled(struct lag_tracker * tracker,u8 num_ports,u8 * ports,int * num_enabled)164 void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
165 u8 *ports, int *num_enabled)
166 {
167 int i;
168
169 *num_enabled = 0;
170 for (i = 0; i < num_ports; i++) {
171 if (tracker->netdev_state[i].tx_enabled &&
172 tracker->netdev_state[i].link_up)
173 ports[(*num_enabled)++] = i;
174 }
175
176 if (*num_enabled == 0)
177 mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
178 }
179
mlx5_lag_print_mapping(struct mlx5_core_dev * dev,struct mlx5_lag * ldev,struct lag_tracker * tracker,unsigned long flags)180 static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
181 struct mlx5_lag *ldev,
182 struct lag_tracker *tracker,
183 unsigned long flags)
184 {
185 char buf[MLX5_MAX_PORTS * 10 + 1] = {};
186 u8 enabled_ports[MLX5_MAX_PORTS] = {};
187 int written = 0;
188 int num_enabled;
189 int idx;
190 int err;
191 int i;
192 int j;
193
194 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
195 mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
196 &num_enabled);
197 for (i = 0; i < num_enabled; i++) {
198 err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
199 if (err != 3)
200 return;
201 written += err;
202 }
203 buf[written - 2] = 0;
204 mlx5_core_info(dev, "lag map active ports: %s\n", buf);
205 } else {
206 for (i = 0; i < ldev->ports; i++) {
207 for (j = 0; j < ldev->buckets; j++) {
208 idx = i * ldev->buckets + j;
209 err = scnprintf(buf + written, 10,
210 " port %d:%d", i + 1, ldev->v2p_map[idx]);
211 if (err != 9)
212 return;
213 written += err;
214 }
215 }
216 mlx5_core_info(dev, "lag map:%s\n", buf);
217 }
218 }
219
220 static int mlx5_lag_netdev_event(struct notifier_block *this,
221 unsigned long event, void *ptr);
222 static void mlx5_do_bond_work(struct work_struct *work);
223
mlx5_ldev_free(struct kref * ref)224 static void mlx5_ldev_free(struct kref *ref)
225 {
226 struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
227
228 if (ldev->nb.notifier_call)
229 unregister_netdevice_notifier_net(&init_net, &ldev->nb);
230 mlx5_lag_mp_cleanup(ldev);
231 cancel_delayed_work_sync(&ldev->bond_work);
232 destroy_workqueue(ldev->wq);
233 mutex_destroy(&ldev->lock);
234 kfree(ldev);
235 }
236
mlx5_ldev_put(struct mlx5_lag * ldev)237 static void mlx5_ldev_put(struct mlx5_lag *ldev)
238 {
239 kref_put(&ldev->ref, mlx5_ldev_free);
240 }
241
mlx5_ldev_get(struct mlx5_lag * ldev)242 static void mlx5_ldev_get(struct mlx5_lag *ldev)
243 {
244 kref_get(&ldev->ref);
245 }
246
mlx5_lag_dev_alloc(struct mlx5_core_dev * dev)247 static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
248 {
249 struct mlx5_lag *ldev;
250 int err;
251
252 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
253 if (!ldev)
254 return NULL;
255
256 ldev->wq = create_singlethread_workqueue("mlx5_lag");
257 if (!ldev->wq) {
258 kfree(ldev);
259 return NULL;
260 }
261
262 kref_init(&ldev->ref);
263 mutex_init(&ldev->lock);
264 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
265
266 ldev->nb.notifier_call = mlx5_lag_netdev_event;
267 if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
268 ldev->nb.notifier_call = NULL;
269 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
270 }
271 ldev->mode = MLX5_LAG_MODE_NONE;
272
273 err = mlx5_lag_mp_init(ldev);
274 if (err)
275 mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
276 err);
277
278 ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports);
279 ldev->buckets = 1;
280
281 return ldev;
282 }
283
mlx5_lag_dev_get_netdev_idx(struct mlx5_lag * ldev,struct net_device * ndev)284 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
285 struct net_device *ndev)
286 {
287 int i;
288
289 for (i = 0; i < ldev->ports; i++)
290 if (ldev->pf[i].netdev == ndev)
291 return i;
292
293 return -ENOENT;
294 }
295
__mlx5_lag_is_roce(struct mlx5_lag * ldev)296 static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
297 {
298 return ldev->mode == MLX5_LAG_MODE_ROCE;
299 }
300
__mlx5_lag_is_sriov(struct mlx5_lag * ldev)301 static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
302 {
303 return ldev->mode == MLX5_LAG_MODE_SRIOV;
304 }
305
306 /* Create a mapping between steering slots and active ports.
307 * As we have ldev->buckets slots per port first assume the native
308 * mapping should be used.
309 * If there are ports that are disabled fill the relevant slots
310 * with mapping that points to active ports.
311 */
mlx5_infer_tx_affinity_mapping(struct lag_tracker * tracker,u8 num_ports,u8 buckets,u8 * ports)312 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
313 u8 num_ports,
314 u8 buckets,
315 u8 *ports)
316 {
317 int disabled[MLX5_MAX_PORTS] = {};
318 int enabled[MLX5_MAX_PORTS] = {};
319 int disabled_ports_num = 0;
320 int enabled_ports_num = 0;
321 int idx;
322 u32 rand;
323 int i;
324 int j;
325
326 for (i = 0; i < num_ports; i++) {
327 if (tracker->netdev_state[i].tx_enabled &&
328 tracker->netdev_state[i].link_up)
329 enabled[enabled_ports_num++] = i;
330 else
331 disabled[disabled_ports_num++] = i;
332 }
333
334 /* Use native mapping by default where each port's buckets
335 * point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
336 */
337 for (i = 0; i < num_ports; i++)
338 for (j = 0; j < buckets; j++) {
339 idx = i * buckets + j;
340 ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
341 }
342
343 /* If all ports are disabled/enabled keep native mapping */
344 if (enabled_ports_num == num_ports ||
345 disabled_ports_num == num_ports)
346 return;
347
348 /* Go over the disabled ports and for each assign a random active port */
349 for (i = 0; i < disabled_ports_num; i++) {
350 for (j = 0; j < buckets; j++) {
351 get_random_bytes(&rand, 4);
352 ports[disabled[i] * buckets + j] = enabled[rand % enabled_ports_num] + 1;
353 }
354 }
355 }
356
mlx5_lag_has_drop_rule(struct mlx5_lag * ldev)357 static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
358 {
359 int i;
360
361 for (i = 0; i < ldev->ports; i++)
362 if (ldev->pf[i].has_drop)
363 return true;
364 return false;
365 }
366
mlx5_lag_drop_rule_cleanup(struct mlx5_lag * ldev)367 static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
368 {
369 int i;
370
371 for (i = 0; i < ldev->ports; i++) {
372 if (!ldev->pf[i].has_drop)
373 continue;
374
375 mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
376 MLX5_VPORT_UPLINK);
377 ldev->pf[i].has_drop = false;
378 }
379 }
380
mlx5_lag_drop_rule_setup(struct mlx5_lag * ldev,struct lag_tracker * tracker)381 static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
382 struct lag_tracker *tracker)
383 {
384 u8 disabled_ports[MLX5_MAX_PORTS] = {};
385 struct mlx5_core_dev *dev;
386 int disabled_index;
387 int num_disabled;
388 int err;
389 int i;
390
391 /* First delete the current drop rule so there won't be any dropped
392 * packets
393 */
394 mlx5_lag_drop_rule_cleanup(ldev);
395
396 if (!ldev->tracker.has_inactive)
397 return;
398
399 mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
400
401 for (i = 0; i < num_disabled; i++) {
402 disabled_index = disabled_ports[i];
403 dev = ldev->pf[disabled_index].dev;
404 err = mlx5_esw_acl_ingress_vport_drop_rule_create(dev->priv.eswitch,
405 MLX5_VPORT_UPLINK);
406 if (!err)
407 ldev->pf[disabled_index].has_drop = true;
408 else
409 mlx5_core_err(dev,
410 "Failed to create lag drop rule, error: %d", err);
411 }
412 }
413
mlx5_cmd_modify_active_port(struct mlx5_core_dev * dev,u8 ports)414 static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
415 {
416 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
417 void *lag_ctx;
418
419 lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
420
421 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
422 MLX5_SET(modify_lag_in, in, field_select, 0x2);
423
424 MLX5_SET(lagc, lag_ctx, active_port, ports);
425
426 return mlx5_cmd_exec_in(dev, modify_lag, in);
427 }
428
_mlx5_modify_lag(struct mlx5_lag * ldev,u8 * ports)429 static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
430 {
431 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
432 u8 active_ports;
433 int ret;
434
435 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
436 ret = mlx5_lag_port_sel_modify(ldev, ports);
437 if (ret ||
438 !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table_bypass))
439 return ret;
440
441 active_ports = lag_active_port_bits(ldev);
442
443 return mlx5_cmd_modify_active_port(dev0, active_ports);
444 }
445 return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
446 }
447
mlx5_modify_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker)448 void mlx5_modify_lag(struct mlx5_lag *ldev,
449 struct lag_tracker *tracker)
450 {
451 u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
452 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
453 int idx;
454 int err;
455 int i;
456 int j;
457
458 mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
459
460 for (i = 0; i < ldev->ports; i++) {
461 for (j = 0; j < ldev->buckets; j++) {
462 idx = i * ldev->buckets + j;
463 if (ports[idx] == ldev->v2p_map[idx])
464 continue;
465 err = _mlx5_modify_lag(ldev, ports);
466 if (err) {
467 mlx5_core_err(dev0,
468 "Failed to modify LAG (%d)\n",
469 err);
470 return;
471 }
472 memcpy(ldev->v2p_map, ports, sizeof(ports));
473
474 mlx5_lag_print_mapping(dev0, ldev, tracker,
475 ldev->mode_flags);
476 break;
477 }
478 }
479
480 if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
481 !(ldev->mode == MLX5_LAG_MODE_ROCE))
482 mlx5_lag_drop_rule_setup(ldev, tracker);
483 }
484
mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag * ldev,unsigned long * flags)485 static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
486 unsigned long *flags)
487 {
488 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
489
490 if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
491 if (ldev->ports > 2)
492 return -EINVAL;
493 return 0;
494 }
495
496 if (ldev->ports > 2)
497 ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
498
499 set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
500
501 return 0;
502 }
503
mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag * ldev,struct lag_tracker * tracker,enum mlx5_lag_mode mode,unsigned long * flags)504 static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
505 struct lag_tracker *tracker,
506 enum mlx5_lag_mode mode,
507 unsigned long *flags)
508 {
509 struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
510
511 if (mode == MLX5_LAG_MODE_MPESW)
512 return;
513
514 if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
515 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
516 if (ldev->ports > 2)
517 ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
518 set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
519 }
520 }
521
mlx5_lag_set_flags(struct mlx5_lag * ldev,enum mlx5_lag_mode mode,struct lag_tracker * tracker,bool shared_fdb,unsigned long * flags)522 static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
523 struct lag_tracker *tracker, bool shared_fdb,
524 unsigned long *flags)
525 {
526 bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
527
528 *flags = 0;
529 if (shared_fdb) {
530 set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
531 set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
532 }
533
534 if (mode == MLX5_LAG_MODE_MPESW)
535 set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
536
537 if (roce_lag)
538 return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
539
540 mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
541 return 0;
542 }
543
mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode,unsigned long flags)544 char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
545 {
546 int port_sel_mode = get_port_sel_mode(mode, flags);
547
548 switch (port_sel_mode) {
549 case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity";
550 case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: return "hash";
551 case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW: return "mpesw";
552 default: return "invalid";
553 }
554 }
555
mlx5_lag_create_single_fdb(struct mlx5_lag * ldev)556 static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
557 {
558 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
559 struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
560 int err;
561 int i;
562
563 for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
564 struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
565
566 err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
567 slave_esw, ldev->ports);
568 if (err)
569 goto err;
570 }
571 return 0;
572 err:
573 for (; i > MLX5_LAG_P1; i--)
574 mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
575 ldev->pf[i].dev->priv.eswitch);
576 return err;
577 }
578
mlx5_create_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker,enum mlx5_lag_mode mode,unsigned long flags)579 static int mlx5_create_lag(struct mlx5_lag *ldev,
580 struct lag_tracker *tracker,
581 enum mlx5_lag_mode mode,
582 unsigned long flags)
583 {
584 bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
585 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
586 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
587 int err;
588
589 if (tracker)
590 mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
591 mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
592 shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
593
594 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
595 if (err) {
596 mlx5_core_err(dev0,
597 "Failed to create LAG (%d)\n",
598 err);
599 return err;
600 }
601
602 if (shared_fdb) {
603 err = mlx5_lag_create_single_fdb(ldev);
604 if (err)
605 mlx5_core_err(dev0, "Can't enable single FDB mode\n");
606 else
607 mlx5_core_info(dev0, "Operation mode is single FDB\n");
608 }
609
610 if (err) {
611 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
612 if (mlx5_cmd_exec_in(dev0, destroy_lag, in))
613 mlx5_core_err(dev0,
614 "Failed to deactivate RoCE LAG; driver restart required\n");
615 }
616
617 return err;
618 }
619
mlx5_activate_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker,enum mlx5_lag_mode mode,bool shared_fdb)620 int mlx5_activate_lag(struct mlx5_lag *ldev,
621 struct lag_tracker *tracker,
622 enum mlx5_lag_mode mode,
623 bool shared_fdb)
624 {
625 bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
626 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
627 unsigned long flags = 0;
628 int err;
629
630 err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
631 if (err)
632 return err;
633
634 if (mode != MLX5_LAG_MODE_MPESW) {
635 mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
636 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
637 err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
638 ldev->v2p_map);
639 if (err) {
640 mlx5_core_err(dev0,
641 "Failed to create LAG port selection(%d)\n",
642 err);
643 return err;
644 }
645 }
646 }
647
648 err = mlx5_create_lag(ldev, tracker, mode, flags);
649 if (err) {
650 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
651 mlx5_lag_port_sel_destroy(ldev);
652 if (roce_lag)
653 mlx5_core_err(dev0,
654 "Failed to activate RoCE LAG\n");
655 else
656 mlx5_core_err(dev0,
657 "Failed to activate VF LAG\n"
658 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
659 return err;
660 }
661
662 if (tracker && tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
663 !roce_lag)
664 mlx5_lag_drop_rule_setup(ldev, tracker);
665
666 ldev->mode = mode;
667 ldev->mode_flags = flags;
668 return 0;
669 }
670
mlx5_deactivate_lag(struct mlx5_lag * ldev)671 int mlx5_deactivate_lag(struct mlx5_lag *ldev)
672 {
673 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
674 struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
675 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
676 bool roce_lag = __mlx5_lag_is_roce(ldev);
677 unsigned long flags = ldev->mode_flags;
678 int err;
679 int i;
680
681 ldev->mode = MLX5_LAG_MODE_NONE;
682 ldev->mode_flags = 0;
683 mlx5_lag_mp_reset(ldev);
684
685 if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
686 for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
687 mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
688 ldev->pf[i].dev->priv.eswitch);
689 clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
690 }
691
692 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
693 err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
694 if (err) {
695 if (roce_lag) {
696 mlx5_core_err(dev0,
697 "Failed to deactivate RoCE LAG; driver restart required\n");
698 } else {
699 mlx5_core_err(dev0,
700 "Failed to deactivate VF LAG; driver restart required\n"
701 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
702 }
703 return err;
704 }
705
706 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
707 mlx5_lag_port_sel_destroy(ldev);
708 ldev->buckets = 1;
709 }
710 if (mlx5_lag_has_drop_rule(ldev))
711 mlx5_lag_drop_rule_cleanup(ldev);
712
713 return 0;
714 }
715
mlx5_lag_check_prereq(struct mlx5_lag * ldev)716 bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
717 {
718 #ifdef CONFIG_MLX5_ESWITCH
719 struct mlx5_core_dev *dev;
720 u8 mode;
721 #endif
722 bool roce_support;
723 int i;
724
725 for (i = 0; i < ldev->ports; i++)
726 if (!ldev->pf[i].dev)
727 return false;
728
729 #ifdef CONFIG_MLX5_ESWITCH
730 for (i = 0; i < ldev->ports; i++) {
731 dev = ldev->pf[i].dev;
732 if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
733 return false;
734 }
735
736 dev = ldev->pf[MLX5_LAG_P1].dev;
737 mode = mlx5_eswitch_mode(dev);
738 for (i = 0; i < ldev->ports; i++)
739 if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
740 return false;
741
742 #else
743 for (i = 0; i < ldev->ports; i++)
744 if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
745 return false;
746 #endif
747 roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
748 for (i = 1; i < ldev->ports; i++)
749 if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
750 return false;
751
752 return true;
753 }
754
mlx5_lag_add_devices(struct mlx5_lag * ldev)755 void mlx5_lag_add_devices(struct mlx5_lag *ldev)
756 {
757 int i;
758
759 for (i = 0; i < ldev->ports; i++) {
760 if (!ldev->pf[i].dev)
761 continue;
762
763 if (ldev->pf[i].dev->priv.flags &
764 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
765 continue;
766
767 ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
768 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
769 }
770 }
771
mlx5_lag_remove_devices(struct mlx5_lag * ldev)772 void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
773 {
774 int i;
775
776 for (i = 0; i < ldev->ports; i++) {
777 if (!ldev->pf[i].dev)
778 continue;
779
780 if (ldev->pf[i].dev->priv.flags &
781 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
782 continue;
783
784 ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
785 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
786 }
787 }
788
mlx5_disable_lag(struct mlx5_lag * ldev)789 void mlx5_disable_lag(struct mlx5_lag *ldev)
790 {
791 bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
792 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
793 bool roce_lag;
794 int err;
795 int i;
796
797 roce_lag = __mlx5_lag_is_roce(ldev);
798
799 if (shared_fdb) {
800 mlx5_lag_remove_devices(ldev);
801 } else if (roce_lag) {
802 if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
803 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
804 mlx5_rescan_drivers_locked(dev0);
805 }
806 for (i = 1; i < ldev->ports; i++)
807 mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
808 }
809
810 err = mlx5_deactivate_lag(ldev);
811 if (err)
812 return;
813
814 if (shared_fdb || roce_lag)
815 mlx5_lag_add_devices(ldev);
816
817 if (shared_fdb)
818 for (i = 0; i < ldev->ports; i++)
819 if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
820 mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
821 }
822
mlx5_shared_fdb_supported(struct mlx5_lag * ldev)823 static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
824 {
825 struct mlx5_core_dev *dev;
826 int i;
827
828 for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
829 dev = ldev->pf[i].dev;
830 if (is_mdev_switchdev_mode(dev) &&
831 mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
832 MLX5_CAP_GEN(dev, lag_native_fdb_selection) &&
833 MLX5_CAP_ESW(dev, root_ft_on_other_esw) &&
834 mlx5_eswitch_get_npeers(dev->priv.eswitch) ==
835 MLX5_CAP_GEN(dev, num_lag_ports) - 1)
836 continue;
837 return false;
838 }
839
840 dev = ldev->pf[MLX5_LAG_P1].dev;
841 if (is_mdev_switchdev_mode(dev) &&
842 mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
843 mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
844 MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
845 mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
846 return true;
847
848 return false;
849 }
850
mlx5_lag_is_roce_lag(struct mlx5_lag * ldev)851 static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
852 {
853 bool roce_lag = true;
854 int i;
855
856 for (i = 0; i < ldev->ports; i++)
857 roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
858
859 #ifdef CONFIG_MLX5_ESWITCH
860 for (i = 0; i < ldev->ports; i++)
861 roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
862 #endif
863
864 return roce_lag;
865 }
866
mlx5_lag_should_modify_lag(struct mlx5_lag * ldev,bool do_bond)867 static bool mlx5_lag_should_modify_lag(struct mlx5_lag *ldev, bool do_bond)
868 {
869 return do_bond && __mlx5_lag_is_active(ldev) &&
870 ldev->mode != MLX5_LAG_MODE_MPESW;
871 }
872
mlx5_lag_should_disable_lag(struct mlx5_lag * ldev,bool do_bond)873 static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
874 {
875 return !do_bond && __mlx5_lag_is_active(ldev) &&
876 ldev->mode != MLX5_LAG_MODE_MPESW;
877 }
878
mlx5_do_bond(struct mlx5_lag * ldev)879 static void mlx5_do_bond(struct mlx5_lag *ldev)
880 {
881 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
882 struct lag_tracker tracker = { };
883 bool do_bond, roce_lag;
884 int err;
885 int i;
886
887 if (!mlx5_lag_is_ready(ldev)) {
888 do_bond = false;
889 } else {
890 /* VF LAG is in multipath mode, ignore bond change requests */
891 if (mlx5_lag_is_multipath(dev0))
892 return;
893
894 tracker = ldev->tracker;
895
896 do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
897 }
898
899 if (do_bond && !__mlx5_lag_is_active(ldev)) {
900 bool shared_fdb = mlx5_shared_fdb_supported(ldev);
901
902 roce_lag = mlx5_lag_is_roce_lag(ldev);
903
904 if (shared_fdb || roce_lag)
905 mlx5_lag_remove_devices(ldev);
906
907 err = mlx5_activate_lag(ldev, &tracker,
908 roce_lag ? MLX5_LAG_MODE_ROCE :
909 MLX5_LAG_MODE_SRIOV,
910 shared_fdb);
911 if (err) {
912 if (shared_fdb || roce_lag)
913 mlx5_lag_add_devices(ldev);
914
915 return;
916 } else if (roce_lag) {
917 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
918 mlx5_rescan_drivers_locked(dev0);
919 for (i = 1; i < ldev->ports; i++) {
920 if (mlx5_get_roce_state(ldev->pf[i].dev))
921 mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
922 }
923 } else if (shared_fdb) {
924 int i;
925
926 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
927 mlx5_rescan_drivers_locked(dev0);
928
929 for (i = 0; i < ldev->ports; i++) {
930 err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
931 if (err)
932 break;
933 }
934
935 if (err) {
936 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
937 mlx5_rescan_drivers_locked(dev0);
938 mlx5_deactivate_lag(ldev);
939 mlx5_lag_add_devices(ldev);
940 for (i = 0; i < ldev->ports; i++)
941 mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
942 mlx5_core_err(dev0, "Failed to enable lag\n");
943 return;
944 }
945 }
946 } else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
947 mlx5_modify_lag(ldev, &tracker);
948 } else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
949 mlx5_disable_lag(ldev);
950 }
951 }
952
953 /* The last mdev to unregister will destroy the workqueue before removing the
954 * devcom component, and as all the mdevs use the same devcom component we are
955 * guaranteed that the devcom is valid while the calling work is running.
956 */
mlx5_lag_get_devcom_comp(struct mlx5_lag * ldev)957 struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
958 {
959 struct mlx5_devcom_comp_dev *devcom = NULL;
960 int i;
961
962 mutex_lock(&ldev->lock);
963 for (i = 0; i < ldev->ports; i++) {
964 if (ldev->pf[i].dev) {
965 devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
966 break;
967 }
968 }
969 mutex_unlock(&ldev->lock);
970 return devcom;
971 }
972
mlx5_queue_bond_work(struct mlx5_lag * ldev,unsigned long delay)973 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
974 {
975 queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
976 }
977
mlx5_do_bond_work(struct work_struct * work)978 static void mlx5_do_bond_work(struct work_struct *work)
979 {
980 struct delayed_work *delayed_work = to_delayed_work(work);
981 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
982 bond_work);
983 struct mlx5_devcom_comp_dev *devcom;
984 int status;
985
986 devcom = mlx5_lag_get_devcom_comp(ldev);
987 if (!devcom)
988 return;
989
990 status = mlx5_devcom_comp_trylock(devcom);
991 if (!status) {
992 mlx5_queue_bond_work(ldev, HZ);
993 return;
994 }
995
996 mutex_lock(&ldev->lock);
997 if (ldev->mode_changes_in_progress) {
998 mutex_unlock(&ldev->lock);
999 mlx5_devcom_comp_unlock(devcom);
1000 mlx5_queue_bond_work(ldev, HZ);
1001 return;
1002 }
1003
1004 mlx5_do_bond(ldev);
1005 mutex_unlock(&ldev->lock);
1006 mlx5_devcom_comp_unlock(devcom);
1007 }
1008
mlx5_handle_changeupper_event(struct mlx5_lag * ldev,struct lag_tracker * tracker,struct netdev_notifier_changeupper_info * info)1009 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
1010 struct lag_tracker *tracker,
1011 struct netdev_notifier_changeupper_info *info)
1012 {
1013 struct net_device *upper = info->upper_dev, *ndev_tmp;
1014 struct netdev_lag_upper_info *lag_upper_info = NULL;
1015 bool is_bonded, is_in_lag, mode_supported;
1016 bool has_inactive = 0;
1017 struct slave *slave;
1018 u8 bond_status = 0;
1019 int num_slaves = 0;
1020 int changed = 0;
1021 int idx;
1022
1023 if (!netif_is_lag_master(upper))
1024 return 0;
1025
1026 if (info->linking)
1027 lag_upper_info = info->upper_info;
1028
1029 /* The event may still be of interest if the slave does not belong to
1030 * us, but is enslaved to a master which has one or more of our netdevs
1031 * as slaves (e.g., if a new slave is added to a master that bonds two
1032 * of our netdevs, we should unbond).
1033 */
1034 rcu_read_lock();
1035 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
1036 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
1037 if (idx >= 0) {
1038 slave = bond_slave_get_rcu(ndev_tmp);
1039 if (slave)
1040 has_inactive |= bond_is_slave_inactive(slave);
1041 bond_status |= (1 << idx);
1042 }
1043
1044 num_slaves++;
1045 }
1046 rcu_read_unlock();
1047
1048 /* None of this lagdev's netdevs are slaves of this master. */
1049 if (!(bond_status & GENMASK(ldev->ports - 1, 0)))
1050 return 0;
1051
1052 if (lag_upper_info) {
1053 tracker->tx_type = lag_upper_info->tx_type;
1054 tracker->hash_type = lag_upper_info->hash_type;
1055 }
1056
1057 tracker->has_inactive = has_inactive;
1058 /* Determine bonding status:
1059 * A device is considered bonded if both its physical ports are slaves
1060 * of the same lag master, and only them.
1061 */
1062 is_in_lag = num_slaves == ldev->ports &&
1063 bond_status == GENMASK(ldev->ports - 1, 0);
1064
1065 /* Lag mode must be activebackup or hash. */
1066 mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
1067 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
1068
1069 is_bonded = is_in_lag && mode_supported;
1070 if (tracker->is_bonded != is_bonded) {
1071 tracker->is_bonded = is_bonded;
1072 changed = 1;
1073 }
1074
1075 if (!is_in_lag)
1076 return changed;
1077
1078 if (!mlx5_lag_is_ready(ldev))
1079 NL_SET_ERR_MSG_MOD(info->info.extack,
1080 "Can't activate LAG offload, PF is configured with more than 64 VFs");
1081 else if (!mode_supported)
1082 NL_SET_ERR_MSG_MOD(info->info.extack,
1083 "Can't activate LAG offload, TX type isn't supported");
1084
1085 return changed;
1086 }
1087
mlx5_handle_changelowerstate_event(struct mlx5_lag * ldev,struct lag_tracker * tracker,struct net_device * ndev,struct netdev_notifier_changelowerstate_info * info)1088 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
1089 struct lag_tracker *tracker,
1090 struct net_device *ndev,
1091 struct netdev_notifier_changelowerstate_info *info)
1092 {
1093 struct netdev_lag_lower_state_info *lag_lower_info;
1094 int idx;
1095
1096 if (!netif_is_lag_port(ndev))
1097 return 0;
1098
1099 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
1100 if (idx < 0)
1101 return 0;
1102
1103 /* This information is used to determine virtual to physical
1104 * port mapping.
1105 */
1106 lag_lower_info = info->lower_state_info;
1107 if (!lag_lower_info)
1108 return 0;
1109
1110 tracker->netdev_state[idx] = *lag_lower_info;
1111
1112 return 1;
1113 }
1114
mlx5_handle_changeinfodata_event(struct mlx5_lag * ldev,struct lag_tracker * tracker,struct net_device * ndev)1115 static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
1116 struct lag_tracker *tracker,
1117 struct net_device *ndev)
1118 {
1119 struct net_device *ndev_tmp;
1120 struct slave *slave;
1121 bool has_inactive = 0;
1122 int idx;
1123
1124 if (!netif_is_lag_master(ndev))
1125 return 0;
1126
1127 rcu_read_lock();
1128 for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
1129 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
1130 if (idx < 0)
1131 continue;
1132
1133 slave = bond_slave_get_rcu(ndev_tmp);
1134 if (slave)
1135 has_inactive |= bond_is_slave_inactive(slave);
1136 }
1137 rcu_read_unlock();
1138
1139 if (tracker->has_inactive == has_inactive)
1140 return 0;
1141
1142 tracker->has_inactive = has_inactive;
1143
1144 return 1;
1145 }
1146
1147 /* this handler is always registered to netdev events */
mlx5_lag_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)1148 static int mlx5_lag_netdev_event(struct notifier_block *this,
1149 unsigned long event, void *ptr)
1150 {
1151 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1152 struct lag_tracker tracker;
1153 struct mlx5_lag *ldev;
1154 int changed = 0;
1155
1156 if (event != NETDEV_CHANGEUPPER &&
1157 event != NETDEV_CHANGELOWERSTATE &&
1158 event != NETDEV_CHANGEINFODATA)
1159 return NOTIFY_DONE;
1160
1161 ldev = container_of(this, struct mlx5_lag, nb);
1162
1163 tracker = ldev->tracker;
1164
1165 switch (event) {
1166 case NETDEV_CHANGEUPPER:
1167 changed = mlx5_handle_changeupper_event(ldev, &tracker, ptr);
1168 break;
1169 case NETDEV_CHANGELOWERSTATE:
1170 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
1171 ndev, ptr);
1172 break;
1173 case NETDEV_CHANGEINFODATA:
1174 changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
1175 break;
1176 }
1177
1178 ldev->tracker = tracker;
1179
1180 if (changed)
1181 mlx5_queue_bond_work(ldev, 0);
1182
1183 return NOTIFY_DONE;
1184 }
1185
mlx5_ldev_add_netdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev,struct net_device * netdev)1186 static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
1187 struct mlx5_core_dev *dev,
1188 struct net_device *netdev)
1189 {
1190 unsigned int fn = mlx5_get_dev_index(dev);
1191 unsigned long flags;
1192
1193 if (fn >= ldev->ports)
1194 return;
1195
1196 spin_lock_irqsave(&lag_lock, flags);
1197 ldev->pf[fn].netdev = netdev;
1198 ldev->tracker.netdev_state[fn].link_up = 0;
1199 ldev->tracker.netdev_state[fn].tx_enabled = 0;
1200 spin_unlock_irqrestore(&lag_lock, flags);
1201 }
1202
mlx5_ldev_remove_netdev(struct mlx5_lag * ldev,struct net_device * netdev)1203 static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
1204 struct net_device *netdev)
1205 {
1206 unsigned long flags;
1207 int i;
1208
1209 spin_lock_irqsave(&lag_lock, flags);
1210 for (i = 0; i < ldev->ports; i++) {
1211 if (ldev->pf[i].netdev == netdev) {
1212 ldev->pf[i].netdev = NULL;
1213 break;
1214 }
1215 }
1216 spin_unlock_irqrestore(&lag_lock, flags);
1217 }
1218
mlx5_ldev_add_mdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev)1219 static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
1220 struct mlx5_core_dev *dev)
1221 {
1222 unsigned int fn = mlx5_get_dev_index(dev);
1223
1224 if (fn >= ldev->ports)
1225 return;
1226
1227 ldev->pf[fn].dev = dev;
1228 dev->priv.lag = ldev;
1229 }
1230
mlx5_ldev_remove_mdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev)1231 static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
1232 struct mlx5_core_dev *dev)
1233 {
1234 int i;
1235
1236 for (i = 0; i < ldev->ports; i++)
1237 if (ldev->pf[i].dev == dev)
1238 break;
1239
1240 if (i == ldev->ports)
1241 return;
1242
1243 ldev->pf[i].dev = NULL;
1244 dev->priv.lag = NULL;
1245 }
1246
1247 /* Must be called with HCA devcom component lock held */
__mlx5_lag_dev_add_mdev(struct mlx5_core_dev * dev)1248 static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
1249 {
1250 struct mlx5_devcom_comp_dev *pos = NULL;
1251 struct mlx5_lag *ldev = NULL;
1252 struct mlx5_core_dev *tmp_dev;
1253
1254 tmp_dev = mlx5_devcom_get_next_peer_data(dev->priv.hca_devcom_comp, &pos);
1255 if (tmp_dev)
1256 ldev = mlx5_lag_dev(tmp_dev);
1257
1258 if (!ldev) {
1259 ldev = mlx5_lag_dev_alloc(dev);
1260 if (!ldev) {
1261 mlx5_core_err(dev, "Failed to alloc lag dev\n");
1262 return 0;
1263 }
1264 mlx5_ldev_add_mdev(ldev, dev);
1265 return 0;
1266 }
1267
1268 mutex_lock(&ldev->lock);
1269 if (ldev->mode_changes_in_progress) {
1270 mutex_unlock(&ldev->lock);
1271 return -EAGAIN;
1272 }
1273 mlx5_ldev_get(ldev);
1274 mlx5_ldev_add_mdev(ldev, dev);
1275 mutex_unlock(&ldev->lock);
1276
1277 return 0;
1278 }
1279
mlx5_lag_remove_mdev(struct mlx5_core_dev * dev)1280 void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
1281 {
1282 struct mlx5_lag *ldev;
1283
1284 ldev = mlx5_lag_dev(dev);
1285 if (!ldev)
1286 return;
1287
1288 /* mdev is being removed, might as well remove debugfs
1289 * as early as possible.
1290 */
1291 mlx5_ldev_remove_debugfs(dev->priv.dbg.lag_debugfs);
1292 recheck:
1293 mutex_lock(&ldev->lock);
1294 if (ldev->mode_changes_in_progress) {
1295 mutex_unlock(&ldev->lock);
1296 msleep(100);
1297 goto recheck;
1298 }
1299 mlx5_ldev_remove_mdev(ldev, dev);
1300 mutex_unlock(&ldev->lock);
1301 mlx5_ldev_put(ldev);
1302 }
1303
mlx5_lag_add_mdev(struct mlx5_core_dev * dev)1304 void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
1305 {
1306 int err;
1307
1308 if (!mlx5_lag_is_supported(dev))
1309 return;
1310
1311 if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
1312 return;
1313
1314 recheck:
1315 mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
1316 err = __mlx5_lag_dev_add_mdev(dev);
1317 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
1318
1319 if (err) {
1320 msleep(100);
1321 goto recheck;
1322 }
1323 mlx5_ldev_add_debugfs(dev);
1324 }
1325
mlx5_lag_remove_netdev(struct mlx5_core_dev * dev,struct net_device * netdev)1326 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
1327 struct net_device *netdev)
1328 {
1329 struct mlx5_lag *ldev;
1330 bool lag_is_active;
1331
1332 ldev = mlx5_lag_dev(dev);
1333 if (!ldev)
1334 return;
1335
1336 mutex_lock(&ldev->lock);
1337 mlx5_ldev_remove_netdev(ldev, netdev);
1338 clear_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
1339
1340 lag_is_active = __mlx5_lag_is_active(ldev);
1341 mutex_unlock(&ldev->lock);
1342
1343 if (lag_is_active)
1344 mlx5_queue_bond_work(ldev, 0);
1345 }
1346
mlx5_lag_add_netdev(struct mlx5_core_dev * dev,struct net_device * netdev)1347 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
1348 struct net_device *netdev)
1349 {
1350 struct mlx5_lag *ldev;
1351 int i;
1352
1353 ldev = mlx5_lag_dev(dev);
1354 if (!ldev)
1355 return;
1356
1357 mutex_lock(&ldev->lock);
1358 mlx5_ldev_add_netdev(ldev, dev, netdev);
1359
1360 for (i = 0; i < ldev->ports; i++)
1361 if (!ldev->pf[i].netdev)
1362 break;
1363
1364 if (i >= ldev->ports)
1365 set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
1366 mutex_unlock(&ldev->lock);
1367 mlx5_queue_bond_work(ldev, 0);
1368 }
1369
mlx5_lag_is_roce(struct mlx5_core_dev * dev)1370 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
1371 {
1372 struct mlx5_lag *ldev;
1373 unsigned long flags;
1374 bool res;
1375
1376 spin_lock_irqsave(&lag_lock, flags);
1377 ldev = mlx5_lag_dev(dev);
1378 res = ldev && __mlx5_lag_is_roce(ldev);
1379 spin_unlock_irqrestore(&lag_lock, flags);
1380
1381 return res;
1382 }
1383 EXPORT_SYMBOL(mlx5_lag_is_roce);
1384
mlx5_lag_is_active(struct mlx5_core_dev * dev)1385 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
1386 {
1387 struct mlx5_lag *ldev;
1388 unsigned long flags;
1389 bool res;
1390
1391 spin_lock_irqsave(&lag_lock, flags);
1392 ldev = mlx5_lag_dev(dev);
1393 res = ldev && __mlx5_lag_is_active(ldev);
1394 spin_unlock_irqrestore(&lag_lock, flags);
1395
1396 return res;
1397 }
1398 EXPORT_SYMBOL(mlx5_lag_is_active);
1399
mlx5_lag_mode_is_hash(struct mlx5_core_dev * dev)1400 bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev)
1401 {
1402 struct mlx5_lag *ldev;
1403 unsigned long flags;
1404 bool res = 0;
1405
1406 spin_lock_irqsave(&lag_lock, flags);
1407 ldev = mlx5_lag_dev(dev);
1408 if (ldev)
1409 res = test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags);
1410 spin_unlock_irqrestore(&lag_lock, flags);
1411
1412 return res;
1413 }
1414 EXPORT_SYMBOL(mlx5_lag_mode_is_hash);
1415
mlx5_lag_is_master(struct mlx5_core_dev * dev)1416 bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
1417 {
1418 struct mlx5_lag *ldev;
1419 unsigned long flags;
1420 bool res;
1421
1422 spin_lock_irqsave(&lag_lock, flags);
1423 ldev = mlx5_lag_dev(dev);
1424 res = ldev && __mlx5_lag_is_active(ldev) &&
1425 dev == ldev->pf[MLX5_LAG_P1].dev;
1426 spin_unlock_irqrestore(&lag_lock, flags);
1427
1428 return res;
1429 }
1430 EXPORT_SYMBOL(mlx5_lag_is_master);
1431
mlx5_lag_is_sriov(struct mlx5_core_dev * dev)1432 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
1433 {
1434 struct mlx5_lag *ldev;
1435 unsigned long flags;
1436 bool res;
1437
1438 spin_lock_irqsave(&lag_lock, flags);
1439 ldev = mlx5_lag_dev(dev);
1440 res = ldev && __mlx5_lag_is_sriov(ldev);
1441 spin_unlock_irqrestore(&lag_lock, flags);
1442
1443 return res;
1444 }
1445 EXPORT_SYMBOL(mlx5_lag_is_sriov);
1446
mlx5_lag_is_shared_fdb(struct mlx5_core_dev * dev)1447 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
1448 {
1449 struct mlx5_lag *ldev;
1450 unsigned long flags;
1451 bool res;
1452
1453 spin_lock_irqsave(&lag_lock, flags);
1454 ldev = mlx5_lag_dev(dev);
1455 res = ldev && test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
1456 spin_unlock_irqrestore(&lag_lock, flags);
1457
1458 return res;
1459 }
1460 EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
1461
mlx5_lag_disable_change(struct mlx5_core_dev * dev)1462 void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
1463 {
1464 struct mlx5_lag *ldev;
1465
1466 ldev = mlx5_lag_dev(dev);
1467 if (!ldev)
1468 return;
1469
1470 mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
1471 mutex_lock(&ldev->lock);
1472
1473 ldev->mode_changes_in_progress++;
1474 if (__mlx5_lag_is_active(ldev))
1475 mlx5_disable_lag(ldev);
1476
1477 mutex_unlock(&ldev->lock);
1478 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
1479 }
1480
mlx5_lag_enable_change(struct mlx5_core_dev * dev)1481 void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
1482 {
1483 struct mlx5_lag *ldev;
1484
1485 ldev = mlx5_lag_dev(dev);
1486 if (!ldev)
1487 return;
1488
1489 mutex_lock(&ldev->lock);
1490 ldev->mode_changes_in_progress--;
1491 mutex_unlock(&ldev->lock);
1492 mlx5_queue_bond_work(ldev, 0);
1493 }
1494
mlx5_lag_get_roce_netdev(struct mlx5_core_dev * dev)1495 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
1496 {
1497 struct net_device *ndev = NULL;
1498 struct mlx5_lag *ldev;
1499 unsigned long flags;
1500 int i;
1501
1502 spin_lock_irqsave(&lag_lock, flags);
1503 ldev = mlx5_lag_dev(dev);
1504
1505 if (!(ldev && __mlx5_lag_is_roce(ldev)))
1506 goto unlock;
1507
1508 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1509 for (i = 0; i < ldev->ports; i++)
1510 if (ldev->tracker.netdev_state[i].tx_enabled)
1511 ndev = ldev->pf[i].netdev;
1512 if (!ndev)
1513 ndev = ldev->pf[ldev->ports - 1].netdev;
1514 } else {
1515 ndev = ldev->pf[MLX5_LAG_P1].netdev;
1516 }
1517 if (ndev)
1518 dev_hold(ndev);
1519
1520 unlock:
1521 spin_unlock_irqrestore(&lag_lock, flags);
1522
1523 return ndev;
1524 }
1525 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
1526
mlx5_lag_get_slave_port(struct mlx5_core_dev * dev,struct net_device * slave)1527 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1528 struct net_device *slave)
1529 {
1530 struct mlx5_lag *ldev;
1531 unsigned long flags;
1532 u8 port = 0;
1533 int i;
1534
1535 spin_lock_irqsave(&lag_lock, flags);
1536 ldev = mlx5_lag_dev(dev);
1537 if (!(ldev && __mlx5_lag_is_roce(ldev)))
1538 goto unlock;
1539
1540 for (i = 0; i < ldev->ports; i++) {
1541 if (ldev->pf[i].netdev == slave) {
1542 port = i;
1543 break;
1544 }
1545 }
1546
1547 port = ldev->v2p_map[port * ldev->buckets];
1548
1549 unlock:
1550 spin_unlock_irqrestore(&lag_lock, flags);
1551 return port;
1552 }
1553 EXPORT_SYMBOL(mlx5_lag_get_slave_port);
1554
mlx5_lag_get_num_ports(struct mlx5_core_dev * dev)1555 u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
1556 {
1557 struct mlx5_lag *ldev;
1558
1559 ldev = mlx5_lag_dev(dev);
1560 if (!ldev)
1561 return 0;
1562
1563 return ldev->ports;
1564 }
1565 EXPORT_SYMBOL(mlx5_lag_get_num_ports);
1566
mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev * dev,int * i)1567 struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i)
1568 {
1569 struct mlx5_core_dev *peer_dev = NULL;
1570 struct mlx5_lag *ldev;
1571 unsigned long flags;
1572 int idx;
1573
1574 spin_lock_irqsave(&lag_lock, flags);
1575 ldev = mlx5_lag_dev(dev);
1576 if (!ldev)
1577 goto unlock;
1578
1579 if (*i == ldev->ports)
1580 goto unlock;
1581 for (idx = *i; idx < ldev->ports; idx++)
1582 if (ldev->pf[idx].dev != dev)
1583 break;
1584
1585 if (idx == ldev->ports) {
1586 *i = idx;
1587 goto unlock;
1588 }
1589 *i = idx + 1;
1590
1591 peer_dev = ldev->pf[idx].dev;
1592
1593 unlock:
1594 spin_unlock_irqrestore(&lag_lock, flags);
1595 return peer_dev;
1596 }
1597 EXPORT_SYMBOL(mlx5_lag_get_next_peer_mdev);
1598
mlx5_lag_query_cong_counters(struct mlx5_core_dev * dev,u64 * values,int num_counters,size_t * offsets)1599 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1600 u64 *values,
1601 int num_counters,
1602 size_t *offsets)
1603 {
1604 int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
1605 struct mlx5_core_dev **mdev;
1606 struct mlx5_lag *ldev;
1607 unsigned long flags;
1608 int num_ports;
1609 int ret, i, j;
1610 void *out;
1611
1612 out = kvzalloc(outlen, GFP_KERNEL);
1613 if (!out)
1614 return -ENOMEM;
1615
1616 mdev = kvzalloc(sizeof(mdev[0]) * MLX5_MAX_PORTS, GFP_KERNEL);
1617 if (!mdev) {
1618 ret = -ENOMEM;
1619 goto free_out;
1620 }
1621
1622 memset(values, 0, sizeof(*values) * num_counters);
1623
1624 spin_lock_irqsave(&lag_lock, flags);
1625 ldev = mlx5_lag_dev(dev);
1626 if (ldev && __mlx5_lag_is_active(ldev)) {
1627 num_ports = ldev->ports;
1628 for (i = 0; i < ldev->ports; i++)
1629 mdev[i] = ldev->pf[i].dev;
1630 } else {
1631 num_ports = 1;
1632 mdev[MLX5_LAG_P1] = dev;
1633 }
1634 spin_unlock_irqrestore(&lag_lock, flags);
1635
1636 for (i = 0; i < num_ports; ++i) {
1637 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
1638
1639 MLX5_SET(query_cong_statistics_in, in, opcode,
1640 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
1641 ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
1642 out);
1643 if (ret)
1644 goto free_mdev;
1645
1646 for (j = 0; j < num_counters; ++j)
1647 values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
1648 }
1649
1650 free_mdev:
1651 kvfree(mdev);
1652 free_out:
1653 kvfree(out);
1654 return ret;
1655 }
1656 EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
1657