1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <linux/etherdevice.h>
30 #include <dev/mlx5/driver.h>
31 #include <dev/mlx5/mlx5_ifc.h>
32 #include <dev/mlx5/vport.h>
33 #include <dev/mlx5/fs.h>
34 #include <dev/mlx5/mpfs.h>
35 #include <dev/mlx5/mlx5_core/mlx5_core.h>
36 #include <dev/mlx5/mlx5_core/eswitch.h>
37
38 #define UPLINK_VPORT 0xFFFF
39
40 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
41
42 #define esw_info(dev, format, ...) \
43 printf("mlx5_core: INFO: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
44
45 #define esw_warn(dev, format, ...) \
46 printf("mlx5_core: WARN: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
47
48 #define esw_debug(dev, format, ...) \
49 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
50
51 enum {
52 MLX5_ACTION_NONE = 0,
53 MLX5_ACTION_ADD = 1,
54 MLX5_ACTION_DEL = 2,
55 };
56
57 /* E-Switch UC L2 table hash node */
58 struct esw_uc_addr {
59 struct l2addr_node node;
60 u32 table_index;
61 u32 vport;
62 };
63
64 /* E-Switch MC FDB table hash node */
65 struct esw_mc_addr { /* SRIOV only */
66 struct l2addr_node node;
67 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
68 u32 refcnt;
69 };
70
71 /* Vport UC/MC hash node */
72 struct vport_addr {
73 struct l2addr_node node;
74 u8 action;
75 u32 vport;
76 struct mlx5_flow_handle *flow_rule; /* SRIOV only */
77 };
78
79 enum {
80 UC_ADDR_CHANGE = BIT(0),
81 MC_ADDR_CHANGE = BIT(1),
82 };
83
84 /* Vport context events */
85 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
86 MC_ADDR_CHANGE)
87
arm_vport_context_events_cmd(struct mlx5_core_dev * dev,u16 vport,u32 events_mask)88 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
89 u32 events_mask)
90 {
91 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
92 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
93 void *nic_vport_ctx;
94
95 MLX5_SET(modify_nic_vport_context_in, in,
96 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
97 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
98 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
99 if (vport)
100 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
101 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
102 in, nic_vport_context);
103
104 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
105
106 if (events_mask & UC_ADDR_CHANGE)
107 MLX5_SET(nic_vport_context, nic_vport_ctx,
108 event_on_uc_address_change, 1);
109 if (events_mask & MC_ADDR_CHANGE)
110 MLX5_SET(nic_vport_context, nic_vport_ctx,
111 event_on_mc_address_change, 1);
112
113 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
114 }
115
116 /* E-Switch vport context HW commands */
query_esw_vport_context_cmd(struct mlx5_core_dev * mdev,u32 vport,u32 * out,int outlen)117 static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
118 u32 *out, int outlen)
119 {
120 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0};
121
122 MLX5_SET(query_nic_vport_context_in, in, opcode,
123 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
124
125 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
126 if (vport)
127 MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
128
129 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
130 }
131
query_esw_vport_cvlan(struct mlx5_core_dev * dev,u32 vport,u16 * vlan,u8 * qos)132 static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
133 u16 *vlan, u8 *qos)
134 {
135 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0};
136 int err;
137 bool cvlan_strip;
138 bool cvlan_insert;
139
140 *vlan = 0;
141 *qos = 0;
142
143 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
144 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
145 return -ENOTSUPP;
146
147 err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
148 if (err)
149 goto out;
150
151 cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
152 esw_vport_context.vport_cvlan_strip);
153
154 cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
155 esw_vport_context.vport_cvlan_insert);
156
157 if (cvlan_strip || cvlan_insert) {
158 *vlan = MLX5_GET(query_esw_vport_context_out, out,
159 esw_vport_context.cvlan_id);
160 *qos = MLX5_GET(query_esw_vport_context_out, out,
161 esw_vport_context.cvlan_pcp);
162 }
163
164 esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
165 vport, *vlan, *qos);
166 out:
167 return err;
168 }
169
modify_esw_vport_context_cmd(struct mlx5_core_dev * dev,u16 vport,void * in,int inlen)170 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
171 void *in, int inlen)
172 {
173 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
174
175 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
176 if (vport)
177 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
178
179 MLX5_SET(modify_esw_vport_context_in, in, opcode,
180 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
181
182 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
183 }
184
modify_esw_vport_cvlan(struct mlx5_core_dev * dev,u32 vport,u16 vlan,u8 qos,bool set)185 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
186 u16 vlan, u8 qos, bool set)
187 {
188 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
189
190 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
191 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
192 return -ENOTSUPP;
193
194 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
195 vport, vlan, qos, set);
196
197 if (set) {
198 MLX5_SET(modify_esw_vport_context_in, in,
199 esw_vport_context.vport_cvlan_strip, 1);
200 /* insert only if no vlan in packet */
201 MLX5_SET(modify_esw_vport_context_in, in,
202 esw_vport_context.vport_cvlan_insert, 1);
203 MLX5_SET(modify_esw_vport_context_in, in,
204 esw_vport_context.cvlan_pcp, qos);
205 MLX5_SET(modify_esw_vport_context_in, in,
206 esw_vport_context.cvlan_id, vlan);
207 }
208
209 MLX5_SET(modify_esw_vport_context_in, in,
210 field_select.vport_cvlan_strip, 1);
211 MLX5_SET(modify_esw_vport_context_in, in,
212 field_select.vport_cvlan_insert, 1);
213
214 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
215 }
216
217 /* E-Switch FDB */
218 static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch * esw,u8 mac[ETH_ALEN],u32 vport)219 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
220 {
221 struct mlx5_flow_destination dest = {};
222 struct mlx5_flow_handle *flow_rule = NULL;
223 struct mlx5_flow_act flow_act = {};
224 struct mlx5_flow_spec *spec;
225 u8 *dmac_v;
226 u8 *dmac_c;
227
228 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
229 if (!spec) {
230 printf("mlx5_core: WARN: ""FDB: Failed to alloc flow spec\n");
231 goto out;
232 }
233 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
234 outer_headers.dmac_47_16);
235 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
236 outer_headers.dmac_47_16);
237
238 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
239 ether_addr_copy(dmac_v, mac);
240 /* Match criteria mask */
241 memset(dmac_c, 0xff, 6);
242
243 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
244 dest.vport.num = vport;
245
246 esw_debug(esw->dev,
247 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
248 dmac_v, dmac_c, vport);
249 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
250 flow_rule =
251 mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
252 &flow_act, &dest, 1);
253 if (IS_ERR_OR_NULL(flow_rule)) {
254 printf("mlx5_core: WARN: ""FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
255 flow_rule = NULL;
256 }
257 out:
258 kfree(spec);
259 return flow_rule;
260 }
261
esw_create_fdb_table(struct mlx5_eswitch * esw)262 static int esw_create_fdb_table(struct mlx5_eswitch *esw)
263 {
264 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
265 struct mlx5_flow_table_attr ft_attr = {};
266 struct mlx5_core_dev *dev = esw->dev;
267 struct mlx5_flow_namespace *root_ns;
268 struct mlx5_flow_table *fdb;
269 struct mlx5_flow_group *g;
270 void *match_criteria;
271 int table_size;
272 u32 *flow_group_in;
273 u8 *dmac;
274 int err = 0;
275
276 esw_debug(dev, "Create FDB log_max_size(%d)\n",
277 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
278
279 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
280 if (!root_ns) {
281 esw_warn(dev, "Failed to get FDB flow namespace\n");
282 return -ENOMEM;
283 }
284
285 flow_group_in = mlx5_vzalloc(inlen);
286 if (!flow_group_in)
287 return -ENOMEM;
288 memset(flow_group_in, 0, inlen);
289
290 /* (-2) Since MaorG said so .. */
291 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)) - 2;
292
293 ft_attr.prio = FDB_SLOW_PATH;
294 ft_attr.max_fte = table_size;
295 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
296 if (IS_ERR_OR_NULL(fdb)) {
297 err = PTR_ERR(fdb);
298 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
299 goto out;
300 }
301
302 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
303 MLX5_MATCH_OUTER_HEADERS);
304 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
305 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
306 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
307 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
308 eth_broadcast_addr(dmac);
309
310 g = mlx5_create_flow_group(fdb, flow_group_in);
311 if (IS_ERR_OR_NULL(g)) {
312 err = PTR_ERR(g);
313 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
314 goto out;
315 }
316
317 esw->fdb_table.addr_grp = g;
318 esw->fdb_table.fdb = fdb;
319 out:
320 kfree(flow_group_in);
321 if (err && !IS_ERR_OR_NULL(fdb))
322 mlx5_destroy_flow_table(fdb);
323 return err;
324 }
325
esw_destroy_fdb_table(struct mlx5_eswitch * esw)326 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
327 {
328 if (!esw->fdb_table.fdb)
329 return;
330
331 esw_debug(esw->dev, "Destroy FDB Table\n");
332 mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
333 mlx5_destroy_flow_table(esw->fdb_table.fdb);
334 esw->fdb_table.fdb = NULL;
335 esw->fdb_table.addr_grp = NULL;
336 }
337
338 /* E-Switch vport UC/MC lists management */
339 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
340 struct vport_addr *vaddr);
341
esw_add_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)342 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
343 {
344 struct hlist_head *hash = esw->l2_table.l2_hash;
345 struct esw_uc_addr *esw_uc;
346 u8 *mac = vaddr->node.addr;
347 u32 vport = vaddr->vport;
348 int err;
349
350 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
351 if (esw_uc) {
352 esw_warn(esw->dev,
353 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
354 mac, vport, esw_uc->vport);
355 return -EEXIST;
356 }
357
358 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
359 if (!esw_uc)
360 return -ENOMEM;
361 esw_uc->vport = vport;
362
363 err = mlx5_mpfs_add_mac(esw->dev, &esw_uc->table_index, mac, 0, 0);
364 if (err)
365 goto abort;
366
367 if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
368 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
369
370 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
371 vport, mac, esw_uc->table_index, vaddr->flow_rule);
372 return err;
373 abort:
374 l2addr_hash_del(esw_uc);
375 return err;
376 }
377
esw_del_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)378 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
379 {
380 struct hlist_head *hash = esw->l2_table.l2_hash;
381 struct esw_uc_addr *esw_uc;
382 u8 *mac = vaddr->node.addr;
383 u32 vport = vaddr->vport;
384
385 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
386 if (!esw_uc || esw_uc->vport != vport) {
387 esw_debug(esw->dev,
388 "MAC(%pM) doesn't belong to vport (%d)\n",
389 mac, vport);
390 return -EINVAL;
391 }
392 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
393 vport, mac, esw_uc->table_index, vaddr->flow_rule);
394
395 mlx5_mpfs_del_mac(esw->dev, esw_uc->table_index);
396
397 mlx5_del_flow_rules(&vaddr->flow_rule);
398
399 l2addr_hash_del(esw_uc);
400 return 0;
401 }
402
esw_add_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)403 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
404 {
405 struct hlist_head *hash = esw->mc_table;
406 struct esw_mc_addr *esw_mc;
407 u8 *mac = vaddr->node.addr;
408 u32 vport = vaddr->vport;
409
410 if (!esw->fdb_table.fdb)
411 return 0;
412
413 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
414 if (esw_mc)
415 goto add;
416
417 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
418 if (!esw_mc)
419 return -ENOMEM;
420
421 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
422 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
423 add:
424 esw_mc->refcnt++;
425 /* Forward MC MAC to vport */
426 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
427 esw_debug(esw->dev,
428 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
429 vport, mac, vaddr->flow_rule,
430 esw_mc->refcnt, esw_mc->uplink_rule);
431 return 0;
432 }
433
esw_del_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)434 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
435 {
436 struct hlist_head *hash = esw->mc_table;
437 struct esw_mc_addr *esw_mc;
438 u8 *mac = vaddr->node.addr;
439 u32 vport = vaddr->vport;
440
441 if (!esw->fdb_table.fdb)
442 return 0;
443
444 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
445 if (!esw_mc) {
446 esw_warn(esw->dev,
447 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
448 mac, vport);
449 return -EINVAL;
450 }
451 esw_debug(esw->dev,
452 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
453 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
454 esw_mc->uplink_rule);
455
456 mlx5_del_flow_rules(&vaddr->flow_rule);
457
458 if (--esw_mc->refcnt)
459 return 0;
460
461 mlx5_del_flow_rules(&esw_mc->uplink_rule);
462
463 l2addr_hash_del(esw_mc);
464 return 0;
465 }
466
467 /* Apply vport UC/MC list to HW l2 table and FDB table */
esw_apply_vport_addr_list(struct mlx5_eswitch * esw,u32 vport_num,int list_type)468 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
469 u32 vport_num, int list_type)
470 {
471 struct mlx5_vport *vport = &esw->vports[vport_num];
472 bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC;
473 vport_addr_action vport_addr_add;
474 vport_addr_action vport_addr_del;
475 struct vport_addr *addr;
476 struct l2addr_node *node;
477 struct hlist_head *hash;
478 struct hlist_node *tmp;
479 int hi;
480
481 vport_addr_add = is_uc ? esw_add_uc_addr :
482 esw_add_mc_addr;
483 vport_addr_del = is_uc ? esw_del_uc_addr :
484 esw_del_mc_addr;
485
486 hash = is_uc ? vport->uc_list : vport->mc_list;
487 for_each_l2hash_node(node, tmp, hash, hi) {
488 addr = container_of(node, struct vport_addr, node);
489 switch (addr->action) {
490 case MLX5_ACTION_ADD:
491 vport_addr_add(esw, addr);
492 addr->action = MLX5_ACTION_NONE;
493 break;
494 case MLX5_ACTION_DEL:
495 vport_addr_del(esw, addr);
496 l2addr_hash_del(addr);
497 break;
498 }
499 }
500 }
501
502 /* Sync vport UC/MC list from vport context */
esw_update_vport_addr_list(struct mlx5_eswitch * esw,u32 vport_num,int list_type)503 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
504 u32 vport_num, int list_type)
505 {
506 struct mlx5_vport *vport = &esw->vports[vport_num];
507 bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC;
508 u8 (*mac_list)[ETH_ALEN];
509 struct l2addr_node *node;
510 struct vport_addr *addr;
511 struct hlist_head *hash;
512 struct hlist_node *tmp;
513 int size;
514 int err;
515 int hi;
516 int i;
517
518 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
519 MLX5_MAX_MC_PER_VPORT(esw->dev);
520
521 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
522 if (!mac_list)
523 return;
524
525 hash = is_uc ? vport->uc_list : vport->mc_list;
526
527 for_each_l2hash_node(node, tmp, hash, hi) {
528 addr = container_of(node, struct vport_addr, node);
529 addr->action = MLX5_ACTION_DEL;
530 }
531
532 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
533 mac_list, &size);
534 if (err)
535 return;
536 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
537 vport_num, is_uc ? "UC" : "MC", size);
538
539 for (i = 0; i < size; i++) {
540 if (is_uc && !is_valid_ether_addr(mac_list[i]))
541 continue;
542
543 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
544 continue;
545
546 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
547 if (addr) {
548 addr->action = MLX5_ACTION_NONE;
549 continue;
550 }
551
552 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
553 GFP_KERNEL);
554 if (!addr) {
555 esw_warn(esw->dev,
556 "Failed to add MAC(%pM) to vport[%d] DB\n",
557 mac_list[i], vport_num);
558 continue;
559 }
560 addr->vport = vport_num;
561 addr->action = MLX5_ACTION_ADD;
562 }
563 kfree(mac_list);
564 }
565
esw_vport_change_handler(struct work_struct * work)566 static void esw_vport_change_handler(struct work_struct *work)
567 {
568 struct mlx5_vport *vport =
569 container_of(work, struct mlx5_vport, vport_change_handler);
570 struct mlx5_core_dev *dev = vport->dev;
571 struct mlx5_eswitch *esw = dev->priv.eswitch;
572 u8 mac[ETH_ALEN];
573
574 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
575 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
576 vport->vport, mac);
577
578 if (vport->enabled_events & UC_ADDR_CHANGE) {
579 esw_update_vport_addr_list(esw, vport->vport,
580 MLX5_NIC_VPORT_LIST_TYPE_UC);
581 esw_apply_vport_addr_list(esw, vport->vport,
582 MLX5_NIC_VPORT_LIST_TYPE_UC);
583 }
584
585 if (vport->enabled_events & MC_ADDR_CHANGE) {
586 esw_update_vport_addr_list(esw, vport->vport,
587 MLX5_NIC_VPORT_LIST_TYPE_MC);
588 esw_apply_vport_addr_list(esw, vport->vport,
589 MLX5_NIC_VPORT_LIST_TYPE_MC);
590 }
591
592 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
593 if (vport->enabled)
594 arm_vport_context_events_cmd(dev, vport->vport,
595 vport->enabled_events);
596 }
597
esw_vport_enable_egress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)598 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
599 struct mlx5_vport *vport)
600 {
601 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
602 struct mlx5_flow_table_attr ft_attr = {};
603 struct mlx5_flow_group *vlan_grp = NULL;
604 struct mlx5_flow_group *drop_grp = NULL;
605 struct mlx5_core_dev *dev = esw->dev;
606 struct mlx5_flow_namespace *root_ns;
607 struct mlx5_flow_table *acl;
608 void *match_criteria;
609 u32 *flow_group_in;
610 int table_size = 2;
611 int err = 0;
612
613 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
614 return;
615
616 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
617 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
618
619 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS, vport->vport);
620 if (!root_ns) {
621 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
622 return;
623 }
624
625 flow_group_in = mlx5_vzalloc(inlen);
626 if (!flow_group_in)
627 return;
628
629 ft_attr.max_fte = table_size;
630 if (vport->vport)
631 ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
632 acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport->vport);
633 if (IS_ERR_OR_NULL(acl)) {
634 err = PTR_ERR(acl);
635 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
636 vport->vport, err);
637 goto out;
638 }
639
640 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
641 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
642 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
643 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
644 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
645 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
646
647 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
648 if (IS_ERR_OR_NULL(vlan_grp)) {
649 err = PTR_ERR(vlan_grp);
650 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
651 vport->vport, err);
652 goto out;
653 }
654
655 memset(flow_group_in, 0, inlen);
656 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
657 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
658 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
659 if (IS_ERR_OR_NULL(drop_grp)) {
660 err = PTR_ERR(drop_grp);
661 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
662 vport->vport, err);
663 goto out;
664 }
665
666 vport->egress.acl = acl;
667 vport->egress.drop_grp = drop_grp;
668 vport->egress.allowed_vlans_grp = vlan_grp;
669 out:
670 kfree(flow_group_in);
671 if (err && !IS_ERR_OR_NULL(vlan_grp))
672 mlx5_destroy_flow_group(vlan_grp);
673 if (err && !IS_ERR_OR_NULL(acl))
674 mlx5_destroy_flow_table(acl);
675 }
676
esw_vport_cleanup_egress_rules(struct mlx5_eswitch * esw,struct mlx5_vport * vport)677 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
678 struct mlx5_vport *vport)
679 {
680 mlx5_del_flow_rules(&vport->egress.allowed_vlan);
681 mlx5_del_flow_rules(&vport->egress.drop_rule);
682 }
683
esw_vport_disable_egress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)684 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
685 struct mlx5_vport *vport)
686 {
687 if (IS_ERR_OR_NULL(vport->egress.acl))
688 return;
689
690 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
691
692 esw_vport_cleanup_egress_rules(esw, vport);
693 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
694 mlx5_destroy_flow_group(vport->egress.drop_grp);
695 mlx5_destroy_flow_table(vport->egress.acl);
696 vport->egress.allowed_vlans_grp = NULL;
697 vport->egress.drop_grp = NULL;
698 vport->egress.acl = NULL;
699 }
700
esw_vport_enable_ingress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)701 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
702 struct mlx5_vport *vport)
703 {
704 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
705 struct mlx5_flow_table_attr ft_attr = {};
706 struct mlx5_core_dev *dev = esw->dev;
707 struct mlx5_flow_namespace *root_ns;
708 struct mlx5_flow_table *acl;
709 struct mlx5_flow_group *g;
710 void *match_criteria;
711 u32 *flow_group_in;
712 int table_size = 1;
713 int err = 0;
714
715 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
716 return;
717
718 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
719 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
720
721 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, vport->vport);
722 if (!root_ns) {
723 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
724 return;
725 }
726
727 flow_group_in = mlx5_vzalloc(inlen);
728 if (!flow_group_in)
729 return;
730
731 ft_attr.max_fte = table_size;
732 if (vport->vport)
733 ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
734 acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport->vport);
735 if (IS_ERR_OR_NULL(acl)) {
736 err = PTR_ERR(acl);
737 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
738 vport->vport, err);
739 goto out;
740 }
741
742 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
743 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
744 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
745 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
746 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
747
748 g = mlx5_create_flow_group(acl, flow_group_in);
749 if (IS_ERR_OR_NULL(g)) {
750 err = PTR_ERR(g);
751 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow group, err(%d)\n",
752 vport->vport, err);
753 goto out;
754 }
755
756 vport->ingress.acl = acl;
757 vport->ingress.drop_grp = g;
758 out:
759 kfree(flow_group_in);
760 if (err && !IS_ERR_OR_NULL(acl))
761 mlx5_destroy_flow_table(acl);
762 }
763
esw_vport_cleanup_ingress_rules(struct mlx5_eswitch * esw,struct mlx5_vport * vport)764 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
765 struct mlx5_vport *vport)
766 {
767 mlx5_del_flow_rules(&vport->ingress.drop_rule);
768 }
769
esw_vport_disable_ingress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)770 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
771 struct mlx5_vport *vport)
772 {
773 if (IS_ERR_OR_NULL(vport->ingress.acl))
774 return;
775
776 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
777
778 esw_vport_cleanup_ingress_rules(esw, vport);
779 mlx5_destroy_flow_group(vport->ingress.drop_grp);
780 mlx5_destroy_flow_table(vport->ingress.acl);
781 vport->ingress.acl = NULL;
782 vport->ingress.drop_grp = NULL;
783 }
784
esw_vport_ingress_config(struct mlx5_eswitch * esw,struct mlx5_vport * vport)785 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
786 struct mlx5_vport *vport)
787 {
788 struct mlx5_flow_act flow_act = {};
789 struct mlx5_flow_spec *spec;
790 int err = 0;
791
792 if (IS_ERR_OR_NULL(vport->ingress.acl)) {
793 esw_warn(esw->dev,
794 "vport[%d] configure ingress rules failed, ingress acl is not initialized!\n",
795 vport->vport);
796 return -EPERM;
797 }
798
799 esw_vport_cleanup_ingress_rules(esw, vport);
800
801 if (!vport->vlan && !vport->qos)
802 return 0;
803
804 esw_debug(esw->dev,
805 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
806 vport->vport, vport->vlan, vport->qos);
807
808 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
809 if (!spec) {
810 err = -ENOMEM;
811 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
812 vport->vport, err);
813 goto out;
814 }
815 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
816 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
817
818 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
819 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
820 vport->ingress.drop_rule =
821 mlx5_add_flow_rules(vport->ingress.acl, spec,
822 &flow_act, NULL, 0);
823 if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
824 err = PTR_ERR(vport->ingress.drop_rule);
825 printf("mlx5_core: WARN: ""vport[%d] configure ingress rules, err(%d)\n", vport->vport, err);
826 vport->ingress.drop_rule = NULL;
827 }
828 out:
829 kfree(spec);
830 return err;
831 }
832
esw_vport_egress_config(struct mlx5_eswitch * esw,struct mlx5_vport * vport)833 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
834 struct mlx5_vport *vport)
835 {
836 struct mlx5_flow_act flow_act = {};
837 struct mlx5_flow_spec *spec;
838 int err = 0;
839
840 if (IS_ERR_OR_NULL(vport->egress.acl)) {
841 esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n",
842 vport->vport);
843 return -EPERM;
844 }
845
846 esw_vport_cleanup_egress_rules(esw, vport);
847
848 if (!vport->vlan && !vport->qos)
849 return 0;
850
851 esw_debug(esw->dev,
852 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
853 vport->vport, vport->vlan, vport->qos);
854
855 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
856 if (!spec) {
857 err = -ENOMEM;
858 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
859 vport->vport, err);
860 goto out;
861 }
862
863 /* Allowed vlan rule */
864 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
865 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
866 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
867 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
868
869 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
870 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
871
872 vport->egress.allowed_vlan =
873 mlx5_add_flow_rules(vport->egress.acl, spec,
874 &flow_act, NULL, 0);
875 if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
876 err = PTR_ERR(vport->egress.allowed_vlan);
877 printf("mlx5_core: WARN: ""vport[%d] configure egress allowed vlan rule failed, err(%d)\n", vport->vport, err);
878 vport->egress.allowed_vlan = NULL;
879 goto out;
880 }
881
882 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
883 vport->egress.drop_rule =
884 mlx5_add_flow_rules(vport->egress.acl, NULL,
885 &flow_act, NULL, 0);
886 if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
887 err = PTR_ERR(vport->egress.drop_rule);
888 printf("mlx5_core: WARN: ""vport[%d] configure egress drop rule failed, err(%d)\n", vport->vport, err);
889 vport->egress.drop_rule = NULL;
890 }
891 out:
892 kfree(spec);
893 return err;
894 }
895
esw_enable_vport(struct mlx5_eswitch * esw,int vport_num,int enable_events)896 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
897 int enable_events)
898 {
899 struct mlx5_vport *vport = &esw->vports[vport_num];
900 unsigned long flags;
901
902 mutex_lock(&vport->state_lock);
903 WARN_ON(vport->enabled);
904
905 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
906
907 if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
908 esw_vport_enable_ingress_acl(esw, vport);
909 esw_vport_enable_egress_acl(esw, vport);
910 esw_vport_ingress_config(esw, vport);
911 esw_vport_egress_config(esw, vport);
912 }
913
914 mlx5_modify_vport_admin_state(esw->dev,
915 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
916 vport_num,
917 MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
918
919 /* Sync with current vport context */
920 vport->enabled_events = enable_events;
921 esw_vport_change_handler(&vport->vport_change_handler);
922
923 spin_lock_irqsave(&vport->lock, flags);
924 vport->enabled = true;
925 spin_unlock_irqrestore(&vport->lock, flags);
926
927 arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
928
929 esw->enabled_vports++;
930 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
931 mutex_unlock(&vport->state_lock);
932 }
933
esw_cleanup_vport(struct mlx5_eswitch * esw,u16 vport_num)934 static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
935 {
936 struct mlx5_vport *vport = &esw->vports[vport_num];
937 struct l2addr_node *node;
938 struct vport_addr *addr;
939 struct hlist_node *tmp;
940 int hi;
941
942 for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
943 addr = container_of(node, struct vport_addr, node);
944 addr->action = MLX5_ACTION_DEL;
945 }
946 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_UC);
947
948 for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
949 addr = container_of(node, struct vport_addr, node);
950 addr->action = MLX5_ACTION_DEL;
951 }
952 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_MC);
953 }
954
esw_disable_vport(struct mlx5_eswitch * esw,int vport_num)955 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
956 {
957 struct mlx5_vport *vport = &esw->vports[vport_num];
958 unsigned long flags;
959
960 mutex_lock(&vport->state_lock);
961 if (!vport->enabled) {
962 mutex_unlock(&vport->state_lock);
963 return;
964 }
965
966 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
967 /* Mark this vport as disabled to discard new events */
968 spin_lock_irqsave(&vport->lock, flags);
969 vport->enabled = false;
970 vport->enabled_events = 0;
971 spin_unlock_irqrestore(&vport->lock, flags);
972
973 mlx5_modify_vport_admin_state(esw->dev,
974 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
975 vport_num,
976 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
977 /* Wait for current already scheduled events to complete */
978 flush_workqueue(esw->work_queue);
979 /* Disable events from this vport */
980 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
981 /* We don't assume VFs will cleanup after themselves */
982 esw_cleanup_vport(esw, vport_num);
983 if (vport_num) {
984 esw_vport_disable_egress_acl(esw, vport);
985 esw_vport_disable_ingress_acl(esw, vport);
986 }
987 esw->enabled_vports--;
988 mutex_unlock(&vport->state_lock);
989 }
990
991 /* Public E-Switch API */
mlx5_eswitch_enable_sriov(struct mlx5_eswitch * esw,int nvfs)992 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
993 {
994 int err;
995 int i;
996
997 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
998 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
999 return 0;
1000
1001 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1002 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1003 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1004 return -ENOTSUPP;
1005 }
1006
1007 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1008 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1009
1010 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1011 esw_warn(esw->dev, "E-Switch egress ACL is not supported by FW\n");
1012
1013 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
1014
1015 esw_disable_vport(esw, 0);
1016
1017 err = esw_create_fdb_table(esw);
1018 if (err)
1019 goto abort;
1020
1021 for (i = 0; i <= nvfs; i++)
1022 esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
1023
1024 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1025 esw->enabled_vports);
1026 return 0;
1027
1028 abort:
1029 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1030 return err;
1031 }
1032
mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw)1033 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1034 {
1035 int i;
1036
1037 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1038 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1039 return;
1040
1041 esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
1042 esw->enabled_vports);
1043
1044 for (i = 0; i < esw->total_vports; i++)
1045 esw_disable_vport(esw, i);
1046
1047 esw_destroy_fdb_table(esw);
1048
1049 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1050 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1051 }
1052
mlx5_eswitch_init(struct mlx5_core_dev * dev,int total_vports)1053 int mlx5_eswitch_init(struct mlx5_core_dev *dev, int total_vports)
1054 {
1055 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1056 struct mlx5_eswitch *esw;
1057 int vport_num;
1058 int err;
1059
1060 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1061 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1062 return 0;
1063
1064 esw_info(dev,
1065 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1066 total_vports, l2_table_size,
1067 MLX5_MAX_UC_PER_VPORT(dev),
1068 MLX5_MAX_MC_PER_VPORT(dev));
1069
1070 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1071 if (!esw)
1072 return -ENOMEM;
1073
1074 esw->dev = dev;
1075
1076 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1077 sizeof(uintptr_t), GFP_KERNEL);
1078 if (!esw->l2_table.bitmap) {
1079 err = -ENOMEM;
1080 goto abort;
1081 }
1082 esw->l2_table.size = l2_table_size;
1083
1084 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1085 if (!esw->work_queue) {
1086 err = -ENOMEM;
1087 goto abort;
1088 }
1089
1090 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1091 GFP_KERNEL);
1092 if (!esw->vports) {
1093 err = -ENOMEM;
1094 goto abort;
1095 }
1096
1097 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1098 struct mlx5_vport *vport = &esw->vports[vport_num];
1099
1100 vport->vport = vport_num;
1101 vport->dev = dev;
1102 INIT_WORK(&vport->vport_change_handler,
1103 esw_vport_change_handler);
1104 spin_lock_init(&vport->lock);
1105 mutex_init(&vport->state_lock);
1106 }
1107
1108 esw->total_vports = total_vports;
1109 esw->enabled_vports = 0;
1110
1111 dev->priv.eswitch = esw;
1112 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1113 /* VF Vports will be enabled when SRIOV is enabled */
1114 return 0;
1115 abort:
1116 if (esw->work_queue)
1117 destroy_workqueue(esw->work_queue);
1118 kfree(esw->l2_table.bitmap);
1119 kfree(esw->vports);
1120 kfree(esw);
1121 return err;
1122 }
1123
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)1124 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1125 {
1126 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1127 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1128 return;
1129
1130 esw_info(esw->dev, "cleanup\n");
1131 esw_disable_vport(esw, 0);
1132
1133 esw->dev->priv.eswitch = NULL;
1134 destroy_workqueue(esw->work_queue);
1135 kfree(esw->l2_table.bitmap);
1136 kfree(esw->vports);
1137 kfree(esw);
1138 }
1139
mlx5_eswitch_vport_event(struct mlx5_eswitch * esw,struct mlx5_eqe * eqe)1140 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1141 {
1142 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1143 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1144 struct mlx5_vport *vport;
1145
1146 if (!esw) {
1147 printf("mlx5_core: WARN: ""MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n", vport_num);
1148 return;
1149 }
1150
1151 vport = &esw->vports[vport_num];
1152 spin_lock(&vport->lock);
1153 if (vport->enabled)
1154 queue_work(esw->work_queue, &vport->vport_change_handler);
1155 spin_unlock(&vport->lock);
1156 }
1157
1158 /* Vport Administration */
1159 #define ESW_ALLOWED(esw) \
1160 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1161 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1162
node_guid_gen_from_mac(u64 * node_guid,u8 mac[ETH_ALEN])1163 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1164 {
1165 ((u8 *)node_guid)[7] = mac[0];
1166 ((u8 *)node_guid)[6] = mac[1];
1167 ((u8 *)node_guid)[5] = mac[2];
1168 ((u8 *)node_guid)[4] = 0xff;
1169 ((u8 *)node_guid)[3] = 0xfe;
1170 ((u8 *)node_guid)[2] = mac[3];
1171 ((u8 *)node_guid)[1] = mac[4];
1172 ((u8 *)node_guid)[0] = mac[5];
1173 }
1174
mlx5_eswitch_set_vport_mac(struct mlx5_eswitch * esw,int vport,u8 mac[ETH_ALEN])1175 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1176 int vport, u8 mac[ETH_ALEN])
1177 {
1178 int err = 0;
1179 u64 node_guid;
1180
1181 if (!ESW_ALLOWED(esw))
1182 return -EPERM;
1183 if (!LEGAL_VPORT(esw, vport))
1184 return -EINVAL;
1185
1186 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1187 if (err) {
1188 mlx5_core_warn(esw->dev,
1189 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1190 vport, err);
1191 return err;
1192 }
1193
1194 node_guid_gen_from_mac(&node_guid, mac);
1195 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1196 if (err) {
1197 mlx5_core_warn(esw->dev,
1198 "Failed to mlx5_modify_nic_vport_node_guid vport(%d) err=(%d)\n",
1199 vport, err);
1200 return err;
1201 }
1202
1203 return err;
1204 }
1205
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,int vport,int link_state)1206 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1207 int vport, int link_state)
1208 {
1209 if (!ESW_ALLOWED(esw))
1210 return -EPERM;
1211 if (!LEGAL_VPORT(esw, vport))
1212 return -EINVAL;
1213
1214 return mlx5_modify_vport_admin_state(esw->dev,
1215 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1216 vport, link_state);
1217 }
1218
mlx5_eswitch_get_vport_config(struct mlx5_eswitch * esw,int vport,struct mlx5_esw_vport_info * ivi)1219 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1220 int vport, struct mlx5_esw_vport_info *ivi)
1221 {
1222 u16 vlan;
1223 u8 qos;
1224
1225 if (!ESW_ALLOWED(esw))
1226 return -EPERM;
1227 if (!LEGAL_VPORT(esw, vport))
1228 return -EINVAL;
1229
1230 memset(ivi, 0, sizeof(*ivi));
1231 ivi->vf = vport - 1;
1232
1233 mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
1234 ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
1235 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1236 vport);
1237 query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
1238 ivi->vlan = vlan;
1239 ivi->qos = qos;
1240 ivi->spoofchk = 0;
1241
1242 return 0;
1243 }
1244
mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,int vport,u16 vlan,u8 qos)1245 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1246 int vport, u16 vlan, u8 qos)
1247 {
1248 struct mlx5_vport *evport;
1249 int err = 0;
1250 int set = 0;
1251
1252 if (!ESW_ALLOWED(esw))
1253 return -EPERM;
1254 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1255 return -EINVAL;
1256
1257 if (vlan || qos)
1258 set = 1;
1259
1260 evport = &esw->vports[vport];
1261
1262 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1263 if (err)
1264 return err;
1265
1266 mutex_lock(&evport->state_lock);
1267 evport->vlan = vlan;
1268 evport->qos = qos;
1269 if (evport->enabled) {
1270 esw_vport_ingress_config(esw, evport);
1271 esw_vport_egress_config(esw, evport);
1272 }
1273 mutex_unlock(&evport->state_lock);
1274 return err;
1275 }
1276
1277