1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies Ltd */
3
4 #include <linux/etherdevice.h>
5 #include <linux/mlx5/driver.h>
6 #include <linux/mlx5/mlx5_ifc.h>
7 #include <linux/mlx5/vport.h>
8 #include <linux/mlx5/fs.h>
9 #include "esw/acl/lgcy.h"
10 #include "esw/legacy.h"
11 #include "mlx5_core.h"
12 #include "eswitch.h"
13 #include "fs_core.h"
14 #include "fs_ft_pool.h"
15 #include "esw/qos.h"
16
17 enum {
18 LEGACY_VEPA_PRIO = 0,
19 LEGACY_FDB_PRIO,
20 };
21
esw_create_legacy_vepa_table(struct mlx5_eswitch * esw)22 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
23 {
24 struct mlx5_flow_table_attr ft_attr = {};
25 struct mlx5_core_dev *dev = esw->dev;
26 struct mlx5_flow_namespace *root_ns;
27 struct mlx5_flow_table *fdb;
28 int err;
29
30 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
31 if (!root_ns) {
32 esw_warn(dev, "Failed to get FDB flow namespace\n");
33 return -EOPNOTSUPP;
34 }
35
36 /* num FTE 2, num FG 2 */
37 ft_attr.prio = LEGACY_VEPA_PRIO;
38 ft_attr.max_fte = 2;
39 ft_attr.autogroup.max_num_groups = 2;
40 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
41 if (IS_ERR(fdb)) {
42 err = PTR_ERR(fdb);
43 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
44 return err;
45 }
46 esw->fdb_table.legacy.vepa_fdb = fdb;
47
48 return 0;
49 }
50
esw_destroy_legacy_fdb_table(struct mlx5_eswitch * esw)51 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
52 {
53 esw_debug(esw->dev, "Destroy FDB Table\n");
54 if (!esw->fdb_table.legacy.fdb)
55 return;
56
57 if (esw->fdb_table.legacy.promisc_grp)
58 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
59 if (esw->fdb_table.legacy.allmulti_grp)
60 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
61 if (esw->fdb_table.legacy.addr_grp)
62 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
63 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
64
65 esw->fdb_table.legacy.fdb = NULL;
66 esw->fdb_table.legacy.addr_grp = NULL;
67 esw->fdb_table.legacy.allmulti_grp = NULL;
68 esw->fdb_table.legacy.promisc_grp = NULL;
69 atomic64_set(&esw->user_count, 0);
70 }
71
esw_create_legacy_fdb_table(struct mlx5_eswitch * esw)72 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
73 {
74 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
75 struct mlx5_flow_table_attr ft_attr = {};
76 struct mlx5_core_dev *dev = esw->dev;
77 struct mlx5_flow_namespace *root_ns;
78 struct mlx5_flow_table *fdb;
79 struct mlx5_flow_group *g;
80 void *match_criteria;
81 int table_size;
82 u32 *flow_group_in;
83 u8 *dmac;
84 int err = 0;
85
86 esw_debug(dev, "Create FDB log_max_size(%d)\n",
87 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
88
89 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
90 if (!root_ns) {
91 esw_warn(dev, "Failed to get FDB flow namespace\n");
92 return -EOPNOTSUPP;
93 }
94
95 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
96 if (!flow_group_in)
97 return -ENOMEM;
98
99 ft_attr.max_fte = POOL_NEXT_SIZE;
100 ft_attr.prio = LEGACY_FDB_PRIO;
101 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
102 if (IS_ERR(fdb)) {
103 err = PTR_ERR(fdb);
104 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
105 goto out;
106 }
107 esw->fdb_table.legacy.fdb = fdb;
108 table_size = fdb->max_fte;
109
110 /* Addresses group : Full match unicast/multicast addresses */
111 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
112 MLX5_MATCH_OUTER_HEADERS);
113 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
114 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
115 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
116 /* Preserve 2 entries for allmulti and promisc rules*/
117 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
118 eth_broadcast_addr(dmac);
119 g = mlx5_create_flow_group(fdb, flow_group_in);
120 if (IS_ERR(g)) {
121 err = PTR_ERR(g);
122 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
123 goto out;
124 }
125 esw->fdb_table.legacy.addr_grp = g;
126
127 /* Allmulti group : One rule that forwards any mcast traffic */
128 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
129 MLX5_MATCH_OUTER_HEADERS);
130 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
131 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
132 eth_zero_addr(dmac);
133 dmac[0] = 0x01;
134 g = mlx5_create_flow_group(fdb, flow_group_in);
135 if (IS_ERR(g)) {
136 err = PTR_ERR(g);
137 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
138 goto out;
139 }
140 esw->fdb_table.legacy.allmulti_grp = g;
141
142 /* Promiscuous group :
143 * One rule that forward all unmatched traffic from previous groups
144 */
145 eth_zero_addr(dmac);
146 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
147 MLX5_MATCH_MISC_PARAMETERS);
148 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
149 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
150 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
151 g = mlx5_create_flow_group(fdb, flow_group_in);
152 if (IS_ERR(g)) {
153 err = PTR_ERR(g);
154 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
155 goto out;
156 }
157 esw->fdb_table.legacy.promisc_grp = g;
158
159 out:
160 if (err)
161 esw_destroy_legacy_fdb_table(esw);
162
163 kvfree(flow_group_in);
164 return err;
165 }
166
esw_destroy_legacy_vepa_table(struct mlx5_eswitch * esw)167 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
168 {
169 esw_debug(esw->dev, "Destroy VEPA Table\n");
170 if (!esw->fdb_table.legacy.vepa_fdb)
171 return;
172
173 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
174 esw->fdb_table.legacy.vepa_fdb = NULL;
175 }
176
esw_create_legacy_table(struct mlx5_eswitch * esw)177 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
178 {
179 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
180 atomic64_set(&esw->user_count, 0);
181
182 return esw_create_legacy_fdb_table(esw);
183 }
184
esw_cleanup_vepa_rules(struct mlx5_eswitch * esw)185 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
186 {
187 if (esw->fdb_table.legacy.vepa_uplink_rule)
188 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
189
190 if (esw->fdb_table.legacy.vepa_star_rule)
191 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
192
193 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
194 esw->fdb_table.legacy.vepa_star_rule = NULL;
195 }
196
esw_destroy_legacy_table(struct mlx5_eswitch * esw)197 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
198 {
199 esw_cleanup_vepa_rules(esw);
200 esw_destroy_legacy_fdb_table(esw);
201 esw_destroy_legacy_vepa_table(esw);
202 }
203
204 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
205 MLX5_VPORT_MC_ADDR_CHANGE | \
206 MLX5_VPORT_PROMISC_CHANGE)
207
esw_legacy_enable(struct mlx5_eswitch * esw)208 int esw_legacy_enable(struct mlx5_eswitch *esw)
209 {
210 struct mlx5_vport *vport;
211 unsigned long i;
212 int ret;
213
214 ret = esw_create_legacy_table(esw);
215 if (ret)
216 return ret;
217
218 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
219 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
220
221 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
222 if (ret)
223 esw_destroy_legacy_table(esw);
224 return ret;
225 }
226
esw_legacy_disable(struct mlx5_eswitch * esw)227 void esw_legacy_disable(struct mlx5_eswitch *esw)
228 {
229 struct esw_mc_addr *mc_promisc;
230
231 mlx5_eswitch_disable_pf_vf_vports(esw);
232
233 mc_promisc = &esw->mc_promisc;
234 if (mc_promisc->uplink_rule)
235 mlx5_del_flow_rules(mc_promisc->uplink_rule);
236
237 esw_destroy_legacy_table(esw);
238 }
239
_mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch * esw,u8 setting)240 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
241 u8 setting)
242 {
243 struct mlx5_flow_destination dest = {};
244 struct mlx5_flow_act flow_act = {};
245 struct mlx5_flow_handle *flow_rule;
246 struct mlx5_flow_spec *spec;
247 int err = 0;
248 void *misc;
249
250 if (!setting) {
251 esw_cleanup_vepa_rules(esw);
252 esw_destroy_legacy_vepa_table(esw);
253 return 0;
254 }
255
256 if (esw->fdb_table.legacy.vepa_uplink_rule)
257 return 0;
258
259 err = esw_create_legacy_vepa_table(esw);
260 if (err)
261 return err;
262
263 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
264 if (!spec) {
265 err = -ENOMEM;
266 goto out;
267 }
268
269 /* Uplink rule forward uplink traffic to FDB */
270 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
271 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
272
273 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
274 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
275
276 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
277 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
278 dest.ft = esw->fdb_table.legacy.fdb;
279 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
280 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
281 &flow_act, &dest, 1);
282 if (IS_ERR(flow_rule)) {
283 err = PTR_ERR(flow_rule);
284 goto out;
285 }
286 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
287
288 /* Star rule to forward all traffic to uplink vport */
289 memset(&dest, 0, sizeof(dest));
290 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
291 dest.vport.num = MLX5_VPORT_UPLINK;
292 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
293 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
294 &flow_act, &dest, 1);
295 if (IS_ERR(flow_rule)) {
296 err = PTR_ERR(flow_rule);
297 goto out;
298 }
299 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
300
301 out:
302 kvfree(spec);
303 if (err) {
304 esw_cleanup_vepa_rules(esw);
305 esw_destroy_legacy_vepa_table(esw);
306 }
307 return err;
308 }
309
mlx5_eswitch_set_vepa(struct mlx5_eswitch * esw,u8 setting)310 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
311 {
312 int err = 0;
313
314 if (!esw)
315 return -EOPNOTSUPP;
316
317 if (!mlx5_esw_allowed(esw))
318 return -EPERM;
319
320 mutex_lock(&esw->state_lock);
321 if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
322 err = -EOPNOTSUPP;
323 goto out;
324 }
325
326 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
327
328 out:
329 mutex_unlock(&esw->state_lock);
330 return err;
331 }
332
mlx5_eswitch_get_vepa(struct mlx5_eswitch * esw,u8 * setting)333 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
334 {
335 if (!esw)
336 return -EOPNOTSUPP;
337
338 if (!mlx5_esw_allowed(esw))
339 return -EPERM;
340
341 if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
342 return -EOPNOTSUPP;
343
344 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
345 return 0;
346 }
347
esw_legacy_vport_acl_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)348 int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
349 {
350 int ret;
351
352 /* Only non manager vports need ACL in legacy mode */
353 if (mlx5_esw_is_manager_vport(esw, vport->vport))
354 return 0;
355
356 ret = esw_acl_ingress_lgcy_setup(esw, vport);
357 if (ret)
358 goto ingress_err;
359
360 ret = esw_acl_egress_lgcy_setup(esw, vport);
361 if (ret)
362 goto egress_err;
363
364 return 0;
365
366 egress_err:
367 esw_acl_ingress_lgcy_cleanup(esw, vport);
368 ingress_err:
369 return ret;
370 }
371
esw_legacy_vport_acl_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)372 void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
373 {
374 if (mlx5_esw_is_manager_vport(esw, vport->vport))
375 return;
376
377 esw_acl_egress_lgcy_cleanup(esw, vport);
378 esw_acl_ingress_lgcy_cleanup(esw, vport);
379 }
380
mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev * dev,struct mlx5_vport * vport,struct mlx5_vport_drop_stats * stats)381 int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
382 struct mlx5_vport *vport,
383 struct mlx5_vport_drop_stats *stats)
384 {
385 u64 rx_discard_vport_down, tx_discard_vport_down;
386 struct mlx5_eswitch *esw = dev->priv.eswitch;
387 u64 bytes = 0;
388 int err = 0;
389
390 if (esw->mode != MLX5_ESWITCH_LEGACY)
391 return 0;
392
393 mutex_lock(&esw->state_lock);
394 if (!vport->enabled)
395 goto unlock;
396
397 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
398 mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
399 &stats->rx_dropped, &bytes);
400
401 if (vport->ingress.legacy.drop_counter)
402 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
403 &stats->tx_dropped, &bytes);
404
405 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
406 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
407 goto unlock;
408
409 err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
410 &rx_discard_vport_down,
411 &tx_discard_vport_down);
412 if (err)
413 goto unlock;
414
415 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
416 stats->rx_dropped += rx_discard_vport_down;
417 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
418 stats->tx_dropped += tx_discard_vport_down;
419
420 unlock:
421 mutex_unlock(&esw->state_lock);
422 return err;
423 }
424
mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos)425 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
426 u16 vport, u16 vlan, u8 qos)
427 {
428 u8 set_flags = 0;
429 int err = 0;
430
431 if (!mlx5_esw_allowed(esw))
432 return vlan ? -EPERM : 0;
433
434 if (vlan || qos)
435 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
436
437 mutex_lock(&esw->state_lock);
438 if (esw->mode != MLX5_ESWITCH_LEGACY) {
439 if (!vlan)
440 goto unlock; /* compatibility with libvirt */
441
442 err = -EOPNOTSUPP;
443 goto unlock;
444 }
445
446 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
447
448 unlock:
449 mutex_unlock(&esw->state_lock);
450 return err;
451 }
452
mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch * esw,u16 vport,bool spoofchk)453 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
454 u16 vport, bool spoofchk)
455 {
456 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
457 bool pschk;
458 int err = 0;
459
460 if (!mlx5_esw_allowed(esw))
461 return -EPERM;
462 if (IS_ERR(evport))
463 return PTR_ERR(evport);
464
465 mutex_lock(&esw->state_lock);
466 if (esw->mode != MLX5_ESWITCH_LEGACY) {
467 err = -EOPNOTSUPP;
468 goto unlock;
469 }
470 pschk = evport->info.spoofchk;
471 evport->info.spoofchk = spoofchk;
472 if (pschk && !is_valid_ether_addr(evport->info.mac))
473 mlx5_core_warn(esw->dev,
474 "Spoofchk in set while MAC is invalid, vport(%d)\n",
475 evport->vport);
476 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
477 err = esw_acl_ingress_lgcy_setup(esw, evport);
478 if (err)
479 evport->info.spoofchk = pschk;
480
481 unlock:
482 mutex_unlock(&esw->state_lock);
483 return err;
484 }
485
mlx5_eswitch_set_vport_trust(struct mlx5_eswitch * esw,u16 vport,bool setting)486 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
487 u16 vport, bool setting)
488 {
489 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
490 int err = 0;
491
492 if (!mlx5_esw_allowed(esw))
493 return -EPERM;
494 if (IS_ERR(evport))
495 return PTR_ERR(evport);
496
497 mutex_lock(&esw->state_lock);
498 if (esw->mode != MLX5_ESWITCH_LEGACY) {
499 err = -EOPNOTSUPP;
500 goto unlock;
501 }
502 evport->info.trusted = setting;
503 if (evport->enabled)
504 esw_vport_change_handle_locked(evport);
505
506 unlock:
507 mutex_unlock(&esw->state_lock);
508 return err;
509 }
510
mlx5_eswitch_set_vport_rate(struct mlx5_eswitch * esw,u16 vport,u32 max_rate,u32 min_rate)511 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
512 u32 max_rate, u32 min_rate)
513 {
514 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
515
516 if (!mlx5_esw_allowed(esw))
517 return -EPERM;
518 if (IS_ERR(evport))
519 return PTR_ERR(evport);
520
521 return mlx5_esw_qos_set_vport_rate(evport, max_rate, min_rate);
522 }
523