1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies Ltd */
3
4 #include <linux/etherdevice.h>
5 #include <linux/mlx5/driver.h>
6 #include <linux/mlx5/mlx5_ifc.h>
7 #include <linux/mlx5/vport.h>
8 #include <linux/mlx5/fs.h>
9 #include "esw/acl/lgcy.h"
10 #include "esw/legacy.h"
11 #include "mlx5_core.h"
12 #include "eswitch.h"
13 #include "fs_core.h"
14 #include "fs_ft_pool.h"
15 #include "esw/qos.h"
16
17 enum {
18 LEGACY_VEPA_PRIO = 0,
19 LEGACY_FDB_PRIO,
20 };
21
esw_create_legacy_vepa_table(struct mlx5_eswitch * esw)22 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
23 {
24 struct mlx5_flow_table_attr ft_attr = {};
25 struct mlx5_core_dev *dev = esw->dev;
26 struct mlx5_flow_namespace *root_ns;
27 struct mlx5_flow_table *fdb;
28 int err;
29
30 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
31 if (!root_ns) {
32 esw_warn(dev, "Failed to get FDB flow namespace\n");
33 return -EOPNOTSUPP;
34 }
35
36 /* num FTE 2, num FG 2 */
37 ft_attr.prio = LEGACY_VEPA_PRIO;
38 ft_attr.max_fte = 2;
39 ft_attr.autogroup.max_num_groups = 2;
40 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
41 if (IS_ERR(fdb)) {
42 err = PTR_ERR(fdb);
43 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
44 return err;
45 }
46 esw->fdb_table.legacy.vepa_fdb = fdb;
47
48 return 0;
49 }
50
esw_destroy_legacy_fdb_table(struct mlx5_eswitch * esw)51 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
52 {
53 esw_debug(esw->dev, "Destroy FDB Table\n");
54 if (!esw->fdb_table.legacy.fdb)
55 return;
56
57 if (esw->fdb_table.legacy.promisc_grp)
58 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
59 if (esw->fdb_table.legacy.allmulti_grp)
60 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
61 if (esw->fdb_table.legacy.addr_grp)
62 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
63 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
64
65 esw->fdb_table.legacy.fdb = NULL;
66 esw->fdb_table.legacy.addr_grp = NULL;
67 esw->fdb_table.legacy.allmulti_grp = NULL;
68 esw->fdb_table.legacy.promisc_grp = NULL;
69 }
70
esw_create_legacy_fdb_table(struct mlx5_eswitch * esw)71 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
72 {
73 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
74 struct mlx5_flow_table_attr ft_attr = {};
75 struct mlx5_core_dev *dev = esw->dev;
76 struct mlx5_flow_namespace *root_ns;
77 struct mlx5_flow_table *fdb;
78 struct mlx5_flow_group *g;
79 void *match_criteria;
80 int table_size;
81 u32 *flow_group_in;
82 u8 *dmac;
83 int err = 0;
84
85 esw_debug(dev, "Create FDB log_max_size(%d)\n",
86 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
87
88 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
89 if (!root_ns) {
90 esw_warn(dev, "Failed to get FDB flow namespace\n");
91 return -EOPNOTSUPP;
92 }
93
94 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
95 if (!flow_group_in)
96 return -ENOMEM;
97
98 ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
99 ft_attr.prio = LEGACY_FDB_PRIO;
100 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
101 if (IS_ERR(fdb)) {
102 err = PTR_ERR(fdb);
103 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
104 goto out;
105 }
106 esw->fdb_table.legacy.fdb = fdb;
107 table_size = fdb->max_fte;
108
109 /* Addresses group : Full match unicast/multicast addresses */
110 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
111 MLX5_MATCH_OUTER_HEADERS);
112 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
113 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
114 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
115 /* Preserve 2 entries for allmulti and promisc rules*/
116 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
117 eth_broadcast_addr(dmac);
118 g = mlx5_create_flow_group(fdb, flow_group_in);
119 if (IS_ERR(g)) {
120 err = PTR_ERR(g);
121 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
122 goto out;
123 }
124 esw->fdb_table.legacy.addr_grp = g;
125
126 /* Allmulti group : One rule that forwards any mcast traffic */
127 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
128 MLX5_MATCH_OUTER_HEADERS);
129 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
130 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
131 eth_zero_addr(dmac);
132 dmac[0] = 0x01;
133 g = mlx5_create_flow_group(fdb, flow_group_in);
134 if (IS_ERR(g)) {
135 err = PTR_ERR(g);
136 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
137 goto out;
138 }
139 esw->fdb_table.legacy.allmulti_grp = g;
140
141 /* Promiscuous group :
142 * One rule that forward all unmatched traffic from previous groups
143 */
144 eth_zero_addr(dmac);
145 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
146 MLX5_MATCH_MISC_PARAMETERS);
147 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
148 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
149 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
150 g = mlx5_create_flow_group(fdb, flow_group_in);
151 if (IS_ERR(g)) {
152 err = PTR_ERR(g);
153 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
154 goto out;
155 }
156 esw->fdb_table.legacy.promisc_grp = g;
157
158 out:
159 if (err)
160 esw_destroy_legacy_fdb_table(esw);
161
162 kvfree(flow_group_in);
163 return err;
164 }
165
esw_destroy_legacy_vepa_table(struct mlx5_eswitch * esw)166 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
167 {
168 esw_debug(esw->dev, "Destroy VEPA Table\n");
169 if (!esw->fdb_table.legacy.vepa_fdb)
170 return;
171
172 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
173 esw->fdb_table.legacy.vepa_fdb = NULL;
174 }
175
esw_create_legacy_table(struct mlx5_eswitch * esw)176 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
177 {
178 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
179 atomic64_set(&esw->user_count, 0);
180
181 return esw_create_legacy_fdb_table(esw);
182 }
183
esw_cleanup_vepa_rules(struct mlx5_eswitch * esw)184 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
185 {
186 if (esw->fdb_table.legacy.vepa_uplink_rule)
187 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
188
189 if (esw->fdb_table.legacy.vepa_star_rule)
190 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
191
192 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
193 esw->fdb_table.legacy.vepa_star_rule = NULL;
194 }
195
esw_destroy_legacy_table(struct mlx5_eswitch * esw)196 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
197 {
198 esw_cleanup_vepa_rules(esw);
199 esw_destroy_legacy_fdb_table(esw);
200 esw_destroy_legacy_vepa_table(esw);
201 }
202
203 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
204 MLX5_VPORT_MC_ADDR_CHANGE | \
205 MLX5_VPORT_PROMISC_CHANGE)
206
esw_legacy_enable(struct mlx5_eswitch * esw)207 int esw_legacy_enable(struct mlx5_eswitch *esw)
208 {
209 struct mlx5_vport *vport;
210 unsigned long i;
211 int ret;
212
213 ret = esw_create_legacy_table(esw);
214 if (ret)
215 return ret;
216
217 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
218 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
219
220 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
221 if (ret)
222 esw_destroy_legacy_table(esw);
223 return ret;
224 }
225
esw_legacy_disable(struct mlx5_eswitch * esw)226 void esw_legacy_disable(struct mlx5_eswitch *esw)
227 {
228 struct esw_mc_addr *mc_promisc;
229
230 mlx5_eswitch_disable_pf_vf_vports(esw);
231
232 mc_promisc = &esw->mc_promisc;
233 if (mc_promisc->uplink_rule)
234 mlx5_del_flow_rules(mc_promisc->uplink_rule);
235
236 esw_destroy_legacy_table(esw);
237 }
238
_mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch * esw,u8 setting)239 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
240 u8 setting)
241 {
242 struct mlx5_flow_destination dest = {};
243 struct mlx5_flow_act flow_act = {};
244 struct mlx5_flow_handle *flow_rule;
245 struct mlx5_flow_spec *spec;
246 int err = 0;
247 void *misc;
248
249 if (!setting) {
250 esw_cleanup_vepa_rules(esw);
251 esw_destroy_legacy_vepa_table(esw);
252 return 0;
253 }
254
255 if (esw->fdb_table.legacy.vepa_uplink_rule)
256 return 0;
257
258 err = esw_create_legacy_vepa_table(esw);
259 if (err)
260 return err;
261
262 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
263 if (!spec) {
264 err = -ENOMEM;
265 goto out;
266 }
267
268 /* Uplink rule forward uplink traffic to FDB */
269 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
270 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
271
272 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
273 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
274
275 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
276 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
277 dest.ft = esw->fdb_table.legacy.fdb;
278 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
279 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
280 &flow_act, &dest, 1);
281 if (IS_ERR(flow_rule)) {
282 err = PTR_ERR(flow_rule);
283 goto out;
284 }
285 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
286
287 /* Star rule to forward all traffic to uplink vport */
288 memset(&dest, 0, sizeof(dest));
289 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
290 dest.vport.num = MLX5_VPORT_UPLINK;
291 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
292 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
293 &flow_act, &dest, 1);
294 if (IS_ERR(flow_rule)) {
295 err = PTR_ERR(flow_rule);
296 goto out;
297 }
298 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
299
300 out:
301 kvfree(spec);
302 if (err) {
303 esw_cleanup_vepa_rules(esw);
304 esw_destroy_legacy_vepa_table(esw);
305 }
306 return err;
307 }
308
mlx5_eswitch_set_vepa(struct mlx5_eswitch * esw,u8 setting)309 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
310 {
311 int err = 0;
312
313 if (!esw)
314 return -EOPNOTSUPP;
315
316 if (!mlx5_esw_allowed(esw))
317 return -EPERM;
318
319 mutex_lock(&esw->state_lock);
320 if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
321 err = -EOPNOTSUPP;
322 goto out;
323 }
324
325 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
326
327 out:
328 mutex_unlock(&esw->state_lock);
329 return err;
330 }
331
mlx5_eswitch_get_vepa(struct mlx5_eswitch * esw,u8 * setting)332 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
333 {
334 if (!esw)
335 return -EOPNOTSUPP;
336
337 if (!mlx5_esw_allowed(esw))
338 return -EPERM;
339
340 if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
341 return -EOPNOTSUPP;
342
343 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
344 return 0;
345 }
346
esw_legacy_vport_acl_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)347 int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
348 {
349 int ret;
350
351 /* Only non manager vports need ACL in legacy mode */
352 if (mlx5_esw_is_manager_vport(esw, vport->vport))
353 return 0;
354
355 ret = esw_acl_ingress_lgcy_setup(esw, vport);
356 if (ret)
357 goto ingress_err;
358
359 ret = esw_acl_egress_lgcy_setup(esw, vport);
360 if (ret)
361 goto egress_err;
362
363 return 0;
364
365 egress_err:
366 esw_acl_ingress_lgcy_cleanup(esw, vport);
367 ingress_err:
368 return ret;
369 }
370
esw_legacy_vport_acl_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)371 void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
372 {
373 if (mlx5_esw_is_manager_vport(esw, vport->vport))
374 return;
375
376 esw_acl_egress_lgcy_cleanup(esw, vport);
377 esw_acl_ingress_lgcy_cleanup(esw, vport);
378 }
379
mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev * dev,struct mlx5_vport * vport,struct mlx5_vport_drop_stats * stats)380 int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
381 struct mlx5_vport *vport,
382 struct mlx5_vport_drop_stats *stats)
383 {
384 u64 rx_discard_vport_down, tx_discard_vport_down;
385 struct mlx5_eswitch *esw = dev->priv.eswitch;
386 u64 bytes = 0;
387 int err = 0;
388
389 if (esw->mode != MLX5_ESWITCH_LEGACY)
390 return 0;
391
392 mutex_lock(&esw->state_lock);
393 if (!vport->enabled)
394 goto unlock;
395
396 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
397 mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
398 &stats->rx_dropped, &bytes);
399
400 if (vport->ingress.legacy.drop_counter)
401 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
402 &stats->tx_dropped, &bytes);
403
404 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
405 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
406 goto unlock;
407
408 err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
409 &rx_discard_vport_down,
410 &tx_discard_vport_down);
411 if (err)
412 goto unlock;
413
414 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
415 stats->rx_dropped += rx_discard_vport_down;
416 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
417 stats->tx_dropped += tx_discard_vport_down;
418
419 unlock:
420 mutex_unlock(&esw->state_lock);
421 return err;
422 }
423
mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos)424 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
425 u16 vport, u16 vlan, u8 qos)
426 {
427 u8 set_flags = 0;
428 int err = 0;
429
430 if (!mlx5_esw_allowed(esw))
431 return vlan ? -EPERM : 0;
432
433 if (vlan || qos)
434 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
435
436 mutex_lock(&esw->state_lock);
437 if (esw->mode != MLX5_ESWITCH_LEGACY) {
438 if (!vlan)
439 goto unlock; /* compatibility with libvirt */
440
441 err = -EOPNOTSUPP;
442 goto unlock;
443 }
444
445 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
446
447 unlock:
448 mutex_unlock(&esw->state_lock);
449 return err;
450 }
451
mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch * esw,u16 vport,bool spoofchk)452 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
453 u16 vport, bool spoofchk)
454 {
455 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
456 bool pschk;
457 int err = 0;
458
459 if (!mlx5_esw_allowed(esw))
460 return -EPERM;
461 if (IS_ERR(evport))
462 return PTR_ERR(evport);
463
464 mutex_lock(&esw->state_lock);
465 if (esw->mode != MLX5_ESWITCH_LEGACY) {
466 err = -EOPNOTSUPP;
467 goto unlock;
468 }
469 pschk = evport->info.spoofchk;
470 evport->info.spoofchk = spoofchk;
471 if (pschk && !is_valid_ether_addr(evport->info.mac))
472 mlx5_core_warn(esw->dev,
473 "Spoofchk in set while MAC is invalid, vport(%d)\n",
474 evport->vport);
475 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
476 err = esw_acl_ingress_lgcy_setup(esw, evport);
477 if (err)
478 evport->info.spoofchk = pschk;
479
480 unlock:
481 mutex_unlock(&esw->state_lock);
482 return err;
483 }
484
mlx5_eswitch_set_vport_trust(struct mlx5_eswitch * esw,u16 vport,bool setting)485 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
486 u16 vport, bool setting)
487 {
488 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
489 int err = 0;
490
491 if (!mlx5_esw_allowed(esw))
492 return -EPERM;
493 if (IS_ERR(evport))
494 return PTR_ERR(evport);
495
496 mutex_lock(&esw->state_lock);
497 if (esw->mode != MLX5_ESWITCH_LEGACY) {
498 err = -EOPNOTSUPP;
499 goto unlock;
500 }
501 evport->info.trusted = setting;
502 if (evport->enabled)
503 esw_vport_change_handle_locked(evport);
504
505 unlock:
506 mutex_unlock(&esw->state_lock);
507 return err;
508 }
509
mlx5_eswitch_set_vport_rate(struct mlx5_eswitch * esw,u16 vport,u32 max_rate,u32 min_rate)510 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
511 u32 max_rate, u32 min_rate)
512 {
513 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
514
515 if (!mlx5_esw_allowed(esw))
516 return -EPERM;
517 if (IS_ERR(evport))
518 return PTR_ERR(evport);
519
520 return mlx5_esw_qos_set_vport_rate(evport, max_rate, min_rate);
521 }
522