1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/debugfs.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/mpfs.h>
40 #include "esw/acl/lgcy.h"
41 #include "esw/legacy.h"
42 #include "esw/qos.h"
43 #include "mlx5_core.h"
44 #include "lib/eq.h"
45 #include "lag/lag.h"
46 #include "eswitch.h"
47 #include "fs_core.h"
48 #include "devlink.h"
49 #include "ecpf.h"
50 #include "en/mod_hdr.h"
51 #include "en_accel/ipsec.h"
52
53 enum {
54 MLX5_ACTION_NONE = 0,
55 MLX5_ACTION_ADD = 1,
56 MLX5_ACTION_DEL = 2,
57 };
58
59 /* Vport UC/MC hash node */
60 struct vport_addr {
61 struct l2addr_node node;
62 u8 action;
63 u16 vport;
64 struct mlx5_flow_handle *flow_rule;
65 bool mpfs; /* UC MAC was added to MPFs */
66 /* A flag indicating that mac was added due to mc promiscuous vport */
67 bool mc_promisc;
68 };
69
mlx5_eswitch_check(const struct mlx5_core_dev * dev)70 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
71 {
72 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
73 return -EOPNOTSUPP;
74
75 if (!MLX5_ESWITCH_MANAGER(dev))
76 return -EOPNOTSUPP;
77
78 return 0;
79 }
80
__mlx5_devlink_eswitch_get(struct devlink * devlink,bool check)81 static struct mlx5_eswitch *__mlx5_devlink_eswitch_get(struct devlink *devlink, bool check)
82 {
83 struct mlx5_core_dev *dev = devlink_priv(devlink);
84 int err;
85
86 if (check) {
87 err = mlx5_eswitch_check(dev);
88 if (err)
89 return ERR_PTR(err);
90 }
91
92 return dev->priv.eswitch;
93 }
94
95 struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink * devlink)96 mlx5_devlink_eswitch_get(struct devlink *devlink)
97 {
98 return __mlx5_devlink_eswitch_get(devlink, true);
99 }
100
mlx5_devlink_eswitch_nocheck_get(struct devlink * devlink)101 struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink)
102 {
103 return __mlx5_devlink_eswitch_get(devlink, false);
104 }
105
106 struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch * esw,u16 vport_num)107 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
108 {
109 struct mlx5_vport *vport;
110
111 if (!esw)
112 return ERR_PTR(-EPERM);
113
114 vport = xa_load(&esw->vports, vport_num);
115 if (!vport) {
116 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
117 return ERR_PTR(-EINVAL);
118 }
119 return vport;
120 }
121
arm_vport_context_events_cmd(struct mlx5_core_dev * dev,u16 vport,u32 events_mask)122 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
123 u32 events_mask)
124 {
125 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
126 void *nic_vport_ctx;
127
128 MLX5_SET(modify_nic_vport_context_in, in,
129 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
130 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
131 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
132 if (vport || mlx5_core_is_ecpf(dev))
133 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
134 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
135 in, nic_vport_context);
136
137 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
138
139 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
140 MLX5_SET(nic_vport_context, nic_vport_ctx,
141 event_on_uc_address_change, 1);
142 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
143 MLX5_SET(nic_vport_context, nic_vport_ctx,
144 event_on_mc_address_change, 1);
145 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
146 MLX5_SET(nic_vport_context, nic_vport_ctx,
147 event_on_promisc_change, 1);
148
149 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
150 }
151
152 /* E-Switch vport context HW commands */
mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev * dev,u16 vport,bool other_vport,void * in)153 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
154 bool other_vport, void *in)
155 {
156 MLX5_SET(modify_esw_vport_context_in, in, opcode,
157 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
158 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
159 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
160 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
161 }
162
modify_esw_vport_cvlan(struct mlx5_core_dev * dev,u16 vport,u16 vlan,u8 qos,u8 set_flags)163 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
164 u16 vlan, u8 qos, u8 set_flags)
165 {
166 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
167
168 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
169 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
170 return -EOPNOTSUPP;
171
172 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
173 vport, vlan, qos, set_flags);
174
175 if (set_flags & SET_VLAN_STRIP)
176 MLX5_SET(modify_esw_vport_context_in, in,
177 esw_vport_context.vport_cvlan_strip, 1);
178
179 if (set_flags & SET_VLAN_INSERT) {
180 if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) {
181 /* insert either if vlan exist in packet or not */
182 MLX5_SET(modify_esw_vport_context_in, in,
183 esw_vport_context.vport_cvlan_insert,
184 MLX5_VPORT_CVLAN_INSERT_ALWAYS);
185 } else {
186 /* insert only if no vlan in packet */
187 MLX5_SET(modify_esw_vport_context_in, in,
188 esw_vport_context.vport_cvlan_insert,
189 MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
190 }
191 MLX5_SET(modify_esw_vport_context_in, in,
192 esw_vport_context.cvlan_pcp, qos);
193 MLX5_SET(modify_esw_vport_context_in, in,
194 esw_vport_context.cvlan_id, vlan);
195 }
196
197 MLX5_SET(modify_esw_vport_context_in, in,
198 field_select.vport_cvlan_strip, 1);
199 MLX5_SET(modify_esw_vport_context_in, in,
200 field_select.vport_cvlan_insert, 1);
201
202 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
203 }
204
205 /* E-Switch FDB */
206 static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch * esw,u16 vport,bool rx_rule,u8 mac_c[ETH_ALEN],u8 mac_v[ETH_ALEN])207 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
208 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
209 {
210 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
211 MLX5_MATCH_OUTER_HEADERS);
212 struct mlx5_flow_handle *flow_rule = NULL;
213 struct mlx5_flow_act flow_act = {0};
214 struct mlx5_flow_destination dest = {};
215 struct mlx5_flow_spec *spec;
216 void *mv_misc = NULL;
217 void *mc_misc = NULL;
218 u8 *dmac_v = NULL;
219 u8 *dmac_c = NULL;
220
221 if (rx_rule)
222 match_header |= MLX5_MATCH_MISC_PARAMETERS;
223
224 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
225 if (!spec)
226 return NULL;
227
228 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
229 outer_headers.dmac_47_16);
230 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
231 outer_headers.dmac_47_16);
232
233 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
234 ether_addr_copy(dmac_v, mac_v);
235 ether_addr_copy(dmac_c, mac_c);
236 }
237
238 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
239 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
240 misc_parameters);
241 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
242 misc_parameters);
243 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
244 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
245 }
246
247 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
248 dest.vport.num = vport;
249
250 esw_debug(esw->dev,
251 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
252 dmac_v, dmac_c, vport);
253 spec->match_criteria_enable = match_header;
254 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
255 flow_rule =
256 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
257 &flow_act, &dest, 1);
258 if (IS_ERR(flow_rule)) {
259 esw_warn(esw->dev,
260 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
261 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
262 flow_rule = NULL;
263 }
264
265 kvfree(spec);
266 return flow_rule;
267 }
268
269 static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch * esw,u8 mac[ETH_ALEN],u16 vport)270 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
271 {
272 u8 mac_c[ETH_ALEN];
273
274 eth_broadcast_addr(mac_c);
275 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
276 }
277
278 static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch * esw,u16 vport)279 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
280 {
281 u8 mac_c[ETH_ALEN];
282 u8 mac_v[ETH_ALEN];
283
284 eth_zero_addr(mac_c);
285 eth_zero_addr(mac_v);
286 mac_c[0] = 0x01;
287 mac_v[0] = 0x01;
288 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
289 }
290
291 static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch * esw,u16 vport)292 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
293 {
294 u8 mac_c[ETH_ALEN];
295 u8 mac_v[ETH_ALEN];
296
297 eth_zero_addr(mac_c);
298 eth_zero_addr(mac_v);
299 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
300 }
301
302 /* E-Switch vport UC/MC lists management */
303 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
304 struct vport_addr *vaddr);
305
esw_add_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)306 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
307 {
308 u8 *mac = vaddr->node.addr;
309 u16 vport = vaddr->vport;
310 int err;
311
312 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
313 * it is already done by its netdev in mlx5e_execute_l2_action
314 */
315 if (mlx5_esw_is_manager_vport(esw, vport))
316 goto fdb_add;
317
318 err = mlx5_mpfs_add_mac(esw->dev, mac);
319 if (err) {
320 esw_warn(esw->dev,
321 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
322 mac, vport, err);
323 return err;
324 }
325 vaddr->mpfs = true;
326
327 fdb_add:
328 /* SRIOV is enabled: Forward UC MAC to vport */
329 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) {
330 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
331
332 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
333 vport, mac, vaddr->flow_rule);
334 }
335
336 return 0;
337 }
338
esw_del_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)339 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
340 {
341 u8 *mac = vaddr->node.addr;
342 u16 vport = vaddr->vport;
343 int err = 0;
344
345 /* Skip mlx5_mpfs_del_mac for eswitch managers,
346 * it is already done by its netdev in mlx5e_execute_l2_action
347 */
348 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
349 goto fdb_del;
350
351 err = mlx5_mpfs_del_mac(esw->dev, mac);
352 if (err)
353 esw_warn(esw->dev,
354 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
355 mac, vport, err);
356 vaddr->mpfs = false;
357
358 fdb_del:
359 if (vaddr->flow_rule)
360 mlx5_del_flow_rules(vaddr->flow_rule);
361 vaddr->flow_rule = NULL;
362
363 return 0;
364 }
365
update_allmulti_vports(struct mlx5_eswitch * esw,struct vport_addr * vaddr,struct esw_mc_addr * esw_mc)366 static void update_allmulti_vports(struct mlx5_eswitch *esw,
367 struct vport_addr *vaddr,
368 struct esw_mc_addr *esw_mc)
369 {
370 u8 *mac = vaddr->node.addr;
371 struct mlx5_vport *vport;
372 unsigned long i;
373 u16 vport_num;
374
375 mlx5_esw_for_each_vport(esw, i, vport) {
376 struct hlist_head *vport_hash = vport->mc_list;
377 struct vport_addr *iter_vaddr =
378 l2addr_hash_find(vport_hash,
379 mac,
380 struct vport_addr);
381 vport_num = vport->vport;
382 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
383 vaddr->vport == vport_num)
384 continue;
385 switch (vaddr->action) {
386 case MLX5_ACTION_ADD:
387 if (iter_vaddr)
388 continue;
389 iter_vaddr = l2addr_hash_add(vport_hash, mac,
390 struct vport_addr,
391 GFP_KERNEL);
392 if (!iter_vaddr) {
393 esw_warn(esw->dev,
394 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
395 mac, vport_num);
396 continue;
397 }
398 iter_vaddr->vport = vport_num;
399 iter_vaddr->flow_rule =
400 esw_fdb_set_vport_rule(esw,
401 mac,
402 vport_num);
403 iter_vaddr->mc_promisc = true;
404 break;
405 case MLX5_ACTION_DEL:
406 if (!iter_vaddr)
407 continue;
408 mlx5_del_flow_rules(iter_vaddr->flow_rule);
409 l2addr_hash_del(iter_vaddr);
410 break;
411 }
412 }
413 }
414
esw_add_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)415 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
416 {
417 struct hlist_head *hash = esw->mc_table;
418 struct esw_mc_addr *esw_mc;
419 u8 *mac = vaddr->node.addr;
420 u16 vport = vaddr->vport;
421
422 if (!esw->fdb_table.legacy.fdb)
423 return 0;
424
425 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
426 if (esw_mc)
427 goto add;
428
429 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
430 if (!esw_mc)
431 return -ENOMEM;
432
433 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
434 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
435
436 /* Add this multicast mac to all the mc promiscuous vports */
437 update_allmulti_vports(esw, vaddr, esw_mc);
438
439 add:
440 /* If the multicast mac is added as a result of mc promiscuous vport,
441 * don't increment the multicast ref count
442 */
443 if (!vaddr->mc_promisc)
444 esw_mc->refcnt++;
445
446 /* Forward MC MAC to vport */
447 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
448 esw_debug(esw->dev,
449 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
450 vport, mac, vaddr->flow_rule,
451 esw_mc->refcnt, esw_mc->uplink_rule);
452 return 0;
453 }
454
esw_del_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)455 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
456 {
457 struct hlist_head *hash = esw->mc_table;
458 struct esw_mc_addr *esw_mc;
459 u8 *mac = vaddr->node.addr;
460 u16 vport = vaddr->vport;
461
462 if (!esw->fdb_table.legacy.fdb)
463 return 0;
464
465 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
466 if (!esw_mc) {
467 esw_warn(esw->dev,
468 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
469 mac, vport);
470 return -EINVAL;
471 }
472 esw_debug(esw->dev,
473 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
474 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
475 esw_mc->uplink_rule);
476
477 if (vaddr->flow_rule)
478 mlx5_del_flow_rules(vaddr->flow_rule);
479 vaddr->flow_rule = NULL;
480
481 /* If the multicast mac is added as a result of mc promiscuous vport,
482 * don't decrement the multicast ref count.
483 */
484 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
485 return 0;
486
487 /* Remove this multicast mac from all the mc promiscuous vports */
488 update_allmulti_vports(esw, vaddr, esw_mc);
489
490 if (esw_mc->uplink_rule)
491 mlx5_del_flow_rules(esw_mc->uplink_rule);
492
493 l2addr_hash_del(esw_mc);
494 return 0;
495 }
496
497 /* Apply vport UC/MC list to HW l2 table and FDB table */
esw_apply_vport_addr_list(struct mlx5_eswitch * esw,struct mlx5_vport * vport,int list_type)498 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
499 struct mlx5_vport *vport, int list_type)
500 {
501 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
502 vport_addr_action vport_addr_add;
503 vport_addr_action vport_addr_del;
504 struct vport_addr *addr;
505 struct l2addr_node *node;
506 struct hlist_head *hash;
507 struct hlist_node *tmp;
508 int hi;
509
510 vport_addr_add = is_uc ? esw_add_uc_addr :
511 esw_add_mc_addr;
512 vport_addr_del = is_uc ? esw_del_uc_addr :
513 esw_del_mc_addr;
514
515 hash = is_uc ? vport->uc_list : vport->mc_list;
516 for_each_l2hash_node(node, tmp, hash, hi) {
517 addr = container_of(node, struct vport_addr, node);
518 switch (addr->action) {
519 case MLX5_ACTION_ADD:
520 vport_addr_add(esw, addr);
521 addr->action = MLX5_ACTION_NONE;
522 break;
523 case MLX5_ACTION_DEL:
524 vport_addr_del(esw, addr);
525 l2addr_hash_del(addr);
526 break;
527 }
528 }
529 }
530
531 /* Sync vport UC/MC list from vport context */
esw_update_vport_addr_list(struct mlx5_eswitch * esw,struct mlx5_vport * vport,int list_type)532 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
533 struct mlx5_vport *vport, int list_type)
534 {
535 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
536 u8 (*mac_list)[ETH_ALEN];
537 struct l2addr_node *node;
538 struct vport_addr *addr;
539 struct hlist_head *hash;
540 struct hlist_node *tmp;
541 int size;
542 int err;
543 int hi;
544 int i;
545
546 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
547 MLX5_MAX_MC_PER_VPORT(esw->dev);
548
549 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
550 if (!mac_list)
551 return;
552
553 hash = is_uc ? vport->uc_list : vport->mc_list;
554
555 for_each_l2hash_node(node, tmp, hash, hi) {
556 addr = container_of(node, struct vport_addr, node);
557 addr->action = MLX5_ACTION_DEL;
558 }
559
560 if (!vport->enabled)
561 goto out;
562
563 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
564 mac_list, &size);
565 if (err)
566 goto out;
567 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
568 vport->vport, is_uc ? "UC" : "MC", size);
569
570 for (i = 0; i < size; i++) {
571 if (is_uc && !is_valid_ether_addr(mac_list[i]))
572 continue;
573
574 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
575 continue;
576
577 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
578 if (addr) {
579 addr->action = MLX5_ACTION_NONE;
580 /* If this mac was previously added because of allmulti
581 * promiscuous rx mode, its now converted to be original
582 * vport mac.
583 */
584 if (addr->mc_promisc) {
585 struct esw_mc_addr *esw_mc =
586 l2addr_hash_find(esw->mc_table,
587 mac_list[i],
588 struct esw_mc_addr);
589 if (!esw_mc) {
590 esw_warn(esw->dev,
591 "Failed to MAC(%pM) in mcast DB\n",
592 mac_list[i]);
593 continue;
594 }
595 esw_mc->refcnt++;
596 addr->mc_promisc = false;
597 }
598 continue;
599 }
600
601 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
602 GFP_KERNEL);
603 if (!addr) {
604 esw_warn(esw->dev,
605 "Failed to add MAC(%pM) to vport[%d] DB\n",
606 mac_list[i], vport->vport);
607 continue;
608 }
609 addr->vport = vport->vport;
610 addr->action = MLX5_ACTION_ADD;
611 }
612 out:
613 kfree(mac_list);
614 }
615
616 /* Sync vport UC/MC list from vport context
617 * Must be called after esw_update_vport_addr_list
618 */
esw_update_vport_mc_promisc(struct mlx5_eswitch * esw,struct mlx5_vport * vport)619 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
620 struct mlx5_vport *vport)
621 {
622 struct l2addr_node *node;
623 struct vport_addr *addr;
624 struct hlist_head *hash;
625 struct hlist_node *tmp;
626 int hi;
627
628 hash = vport->mc_list;
629
630 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
631 u8 *mac = node->addr;
632
633 addr = l2addr_hash_find(hash, mac, struct vport_addr);
634 if (addr) {
635 if (addr->action == MLX5_ACTION_DEL)
636 addr->action = MLX5_ACTION_NONE;
637 continue;
638 }
639 addr = l2addr_hash_add(hash, mac, struct vport_addr,
640 GFP_KERNEL);
641 if (!addr) {
642 esw_warn(esw->dev,
643 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
644 mac, vport->vport);
645 continue;
646 }
647 addr->vport = vport->vport;
648 addr->action = MLX5_ACTION_ADD;
649 addr->mc_promisc = true;
650 }
651 }
652
653 /* Apply vport rx mode to HW FDB table */
esw_apply_vport_rx_mode(struct mlx5_eswitch * esw,struct mlx5_vport * vport,bool promisc,bool mc_promisc)654 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
655 struct mlx5_vport *vport,
656 bool promisc, bool mc_promisc)
657 {
658 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
659
660 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
661 goto promisc;
662
663 if (mc_promisc) {
664 vport->allmulti_rule =
665 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
666 if (!allmulti_addr->uplink_rule)
667 allmulti_addr->uplink_rule =
668 esw_fdb_set_vport_allmulti_rule(esw,
669 MLX5_VPORT_UPLINK);
670 allmulti_addr->refcnt++;
671 } else if (vport->allmulti_rule) {
672 mlx5_del_flow_rules(vport->allmulti_rule);
673 vport->allmulti_rule = NULL;
674
675 if (--allmulti_addr->refcnt > 0)
676 goto promisc;
677
678 if (allmulti_addr->uplink_rule)
679 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
680 allmulti_addr->uplink_rule = NULL;
681 }
682
683 promisc:
684 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
685 return;
686
687 if (promisc) {
688 vport->promisc_rule =
689 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
690 } else if (vport->promisc_rule) {
691 mlx5_del_flow_rules(vport->promisc_rule);
692 vport->promisc_rule = NULL;
693 }
694 }
695
696 /* Sync vport rx mode from vport context */
esw_update_vport_rx_mode(struct mlx5_eswitch * esw,struct mlx5_vport * vport)697 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
698 struct mlx5_vport *vport)
699 {
700 int promisc_all = 0;
701 int promisc_uc = 0;
702 int promisc_mc = 0;
703 int err;
704
705 err = mlx5_query_nic_vport_promisc(esw->dev,
706 vport->vport,
707 &promisc_uc,
708 &promisc_mc,
709 &promisc_all);
710 if (err)
711 return;
712 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
713 vport->vport, promisc_all, promisc_mc);
714
715 if (!vport->info.trusted || !vport->enabled) {
716 promisc_uc = 0;
717 promisc_mc = 0;
718 promisc_all = 0;
719 }
720
721 esw_apply_vport_rx_mode(esw, vport, promisc_all,
722 (promisc_all || promisc_mc));
723 }
724
esw_vport_change_handle_locked(struct mlx5_vport * vport)725 void esw_vport_change_handle_locked(struct mlx5_vport *vport)
726 {
727 struct mlx5_core_dev *dev = vport->dev;
728 struct mlx5_eswitch *esw = dev->priv.eswitch;
729 u8 mac[ETH_ALEN];
730
731 if (!MLX5_CAP_GEN(dev, log_max_l2_table))
732 return;
733
734 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
735 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
736 vport->vport, mac);
737
738 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
739 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
740 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
741 }
742
743 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
744 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
745
746 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
747 esw_update_vport_rx_mode(esw, vport);
748 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
749 esw_update_vport_mc_promisc(esw, vport);
750 }
751
752 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
753 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
754
755 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
756 if (vport->enabled)
757 arm_vport_context_events_cmd(dev, vport->vport,
758 vport->enabled_events);
759 }
760
esw_vport_change_handler(struct work_struct * work)761 static void esw_vport_change_handler(struct work_struct *work)
762 {
763 struct mlx5_vport *vport =
764 container_of(work, struct mlx5_vport, vport_change_handler);
765 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
766
767 mutex_lock(&esw->state_lock);
768 esw_vport_change_handle_locked(vport);
769 mutex_unlock(&esw->state_lock);
770 }
771
node_guid_gen_from_mac(u64 * node_guid,const u8 * mac)772 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
773 {
774 ((u8 *)node_guid)[7] = mac[0];
775 ((u8 *)node_guid)[6] = mac[1];
776 ((u8 *)node_guid)[5] = mac[2];
777 ((u8 *)node_guid)[4] = 0xff;
778 ((u8 *)node_guid)[3] = 0xfe;
779 ((u8 *)node_guid)[2] = mac[3];
780 ((u8 *)node_guid)[1] = mac[4];
781 ((u8 *)node_guid)[0] = mac[5];
782 }
783
esw_vport_setup_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)784 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
785 struct mlx5_vport *vport)
786 {
787 if (esw->mode == MLX5_ESWITCH_LEGACY)
788 return esw_legacy_vport_acl_setup(esw, vport);
789 else
790 return esw_vport_create_offloads_acl_tables(esw, vport);
791 }
792
esw_vport_cleanup_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)793 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
794 struct mlx5_vport *vport)
795 {
796 if (esw->mode == MLX5_ESWITCH_LEGACY)
797 esw_legacy_vport_acl_cleanup(esw, vport);
798 else
799 esw_vport_destroy_offloads_acl_tables(esw, vport);
800 }
801
mlx5_esw_vport_caps_get(struct mlx5_eswitch * esw,struct mlx5_vport * vport)802 static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
803 {
804 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
805 void *query_ctx;
806 void *hca_caps;
807 int err;
808
809 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
810 return 0;
811
812 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
813 if (!query_ctx)
814 return -ENOMEM;
815
816 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
817 MLX5_CAP_GENERAL);
818 if (err)
819 goto out_free;
820
821 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
822 vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
823
824 if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2))
825 goto out_free;
826
827 memset(query_ctx, 0, query_out_sz);
828 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
829 MLX5_CAP_GENERAL_2);
830 if (err)
831 goto out_free;
832
833 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
834 vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
835
836 err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport);
837 out_free:
838 kfree(query_ctx);
839 return err;
840 }
841
esw_vport_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)842 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
843 {
844 bool vst_mode_steering = esw_vst_mode_is_steering(esw);
845 u16 vport_num = vport->vport;
846 int flags;
847 int err;
848
849 err = esw_vport_setup_acl(esw, vport);
850 if (err)
851 return err;
852
853 if (mlx5_esw_is_manager_vport(esw, vport_num))
854 return 0;
855
856 err = mlx5_esw_vport_caps_get(esw, vport);
857 if (err)
858 goto err_caps;
859
860 mlx5_modify_vport_admin_state(esw->dev,
861 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
862 vport_num, 1,
863 vport->info.link_state);
864
865 /* Host PF has its own mac/guid. */
866 if (vport_num) {
867 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
868 vport->info.mac);
869 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
870 vport->info.node_guid);
871 }
872
873 flags = (vport->info.vlan || vport->info.qos) ?
874 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
875 if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering)
876 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
877 vport->info.qos, flags);
878
879 return 0;
880
881 err_caps:
882 esw_vport_cleanup_acl(esw, vport);
883 return err;
884 }
885
886 /* Don't cleanup vport->info, it's needed to restore vport configuration */
esw_vport_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)887 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
888 {
889 u16 vport_num = vport->vport;
890
891 if (!mlx5_esw_is_manager_vport(esw, vport_num))
892 mlx5_modify_vport_admin_state(esw->dev,
893 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
894 vport_num, 1,
895 MLX5_VPORT_ADMIN_STATE_DOWN);
896
897 mlx5_esw_qos_vport_disable(vport);
898 esw_vport_cleanup_acl(esw, vport);
899 }
900
mlx5_esw_vport_enable(struct mlx5_eswitch * esw,struct mlx5_vport * vport,enum mlx5_eswitch_vport_event enabled_events)901 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
902 enum mlx5_eswitch_vport_event enabled_events)
903 {
904 u16 vport_num = vport->vport;
905 int ret;
906
907 mutex_lock(&esw->state_lock);
908 WARN_ON(vport->enabled);
909
910 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
911
912 ret = esw_vport_setup(esw, vport);
913 if (ret)
914 goto done;
915
916 /* Sync with current vport context */
917 vport->enabled_events = enabled_events;
918 vport->enabled = true;
919 if (vport->vport != MLX5_VPORT_PF &&
920 (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
921 esw->enabled_ipsec_vf_count++;
922
923 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
924 * in smartNIC as it's a vport group manager.
925 */
926 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
927 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
928 vport->info.trusted = true;
929
930 if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
931 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
932 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
933 if (ret)
934 goto err_vhca_mapping;
935 }
936
937 /* External controller host PF has factory programmed MAC.
938 * Read it from the device.
939 */
940 if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
941 mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
942
943 esw_vport_change_handle_locked(vport);
944
945 esw->enabled_vports++;
946 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
947 done:
948 mutex_unlock(&esw->state_lock);
949 return ret;
950
951 err_vhca_mapping:
952 esw_vport_cleanup(esw, vport);
953 mutex_unlock(&esw->state_lock);
954 return ret;
955 }
956
mlx5_esw_vport_disable(struct mlx5_eswitch * esw,struct mlx5_vport * vport)957 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
958 {
959 u16 vport_num = vport->vport;
960
961 mutex_lock(&esw->state_lock);
962
963 if (!vport->enabled)
964 goto done;
965
966 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
967 /* Mark this vport as disabled to discard new events */
968 vport->enabled = false;
969
970 /* Disable events from this vport */
971 if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
972 arm_vport_context_events_cmd(esw->dev, vport_num, 0);
973
974 if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
975 MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
976 mlx5_esw_vport_vhca_id_clear(esw, vport_num);
977
978 if (vport->vport != MLX5_VPORT_PF &&
979 (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
980 esw->enabled_ipsec_vf_count--;
981
982 /* We don't assume VFs will cleanup after themselves.
983 * Calling vport change handler while vport is disabled will cleanup
984 * the vport resources.
985 */
986 esw_vport_change_handle_locked(vport);
987 vport->enabled_events = 0;
988 esw_apply_vport_rx_mode(esw, vport, false, false);
989 esw_vport_cleanup(esw, vport);
990 esw->enabled_vports--;
991
992 done:
993 mutex_unlock(&esw->state_lock);
994 }
995
eswitch_vport_event(struct notifier_block * nb,unsigned long type,void * data)996 static int eswitch_vport_event(struct notifier_block *nb,
997 unsigned long type, void *data)
998 {
999 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1000 struct mlx5_eqe *eqe = data;
1001 struct mlx5_vport *vport;
1002 u16 vport_num;
1003
1004 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1005 vport = mlx5_eswitch_get_vport(esw, vport_num);
1006 if (!IS_ERR(vport))
1007 queue_work(esw->work_queue, &vport->vport_change_handler);
1008 return NOTIFY_OK;
1009 }
1010
1011 /**
1012 * mlx5_esw_query_functions - Returns raw output about functions state
1013 * @dev: Pointer to device to query
1014 *
1015 * mlx5_esw_query_functions() allocates and returns functions changed
1016 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
1017 * Caller must free the memory using kvfree() when valid pointer is returned.
1018 */
mlx5_esw_query_functions(struct mlx5_core_dev * dev)1019 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1020 {
1021 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1022 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1023 u32 *out;
1024 int err;
1025
1026 out = kvzalloc(outlen, GFP_KERNEL);
1027 if (!out)
1028 return ERR_PTR(-ENOMEM);
1029
1030 MLX5_SET(query_esw_functions_in, in, opcode,
1031 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1032
1033 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
1034 if (!err)
1035 return out;
1036
1037 kvfree(out);
1038 return ERR_PTR(err);
1039 }
1040
mlx5_eswitch_event_handler_register(struct mlx5_eswitch * esw)1041 static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
1042 {
1043 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1044 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1045 ESW_FUNCTIONS_CHANGED);
1046 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1047 }
1048 }
1049
mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch * esw)1050 static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw)
1051 {
1052 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1053 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1054
1055 flush_workqueue(esw->work_queue);
1056 }
1057
mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch * esw)1058 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1059 {
1060 struct mlx5_vport *vport;
1061 unsigned long i;
1062
1063 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1064 mlx5_esw_qos_vport_qos_free(vport);
1065 memset(&vport->info, 0, sizeof(vport->info));
1066 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1067 }
1068 }
1069
mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch * esw)1070 static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
1071 {
1072 struct mlx5_vport *vport;
1073 unsigned long i;
1074
1075 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
1076 mlx5_esw_qos_vport_qos_free(vport);
1077 memset(&vport->info, 0, sizeof(vport->info));
1078 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1079 }
1080 }
1081
mlx5_eswitch_load_vport(struct mlx5_eswitch * esw,struct mlx5_vport * vport,enum mlx5_eswitch_vport_event enabled_events)1082 static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
1083 enum mlx5_eswitch_vport_event enabled_events)
1084 {
1085 int err;
1086
1087 err = mlx5_esw_vport_enable(esw, vport, enabled_events);
1088 if (err)
1089 return err;
1090
1091 err = mlx5_esw_offloads_load_rep(esw, vport);
1092 if (err)
1093 goto err_rep;
1094
1095 return err;
1096
1097 err_rep:
1098 mlx5_esw_vport_disable(esw, vport);
1099 return err;
1100 }
1101
mlx5_eswitch_unload_vport(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1102 static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1103 {
1104 mlx5_esw_offloads_unload_rep(esw, vport);
1105 mlx5_esw_vport_disable(esw, vport);
1106 }
1107
mlx5_eswitch_load_pf_vf_vport(struct mlx5_eswitch * esw,u16 vport_num,enum mlx5_eswitch_vport_event enabled_events)1108 static int mlx5_eswitch_load_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num,
1109 enum mlx5_eswitch_vport_event enabled_events)
1110 {
1111 struct mlx5_vport *vport;
1112 int err;
1113
1114 vport = mlx5_eswitch_get_vport(esw, vport_num);
1115 if (IS_ERR(vport))
1116 return PTR_ERR(vport);
1117
1118 err = mlx5_esw_offloads_init_pf_vf_rep(esw, vport);
1119 if (err)
1120 return err;
1121
1122 err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
1123 if (err)
1124 goto err_load;
1125 return 0;
1126
1127 err_load:
1128 mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
1129 return err;
1130 }
1131
mlx5_eswitch_unload_pf_vf_vport(struct mlx5_eswitch * esw,u16 vport_num)1132 static void mlx5_eswitch_unload_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1133 {
1134 struct mlx5_vport *vport;
1135
1136 vport = mlx5_eswitch_get_vport(esw, vport_num);
1137 if (IS_ERR(vport))
1138 return;
1139
1140 mlx5_eswitch_unload_vport(esw, vport);
1141 mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
1142 }
1143
mlx5_eswitch_load_sf_vport(struct mlx5_eswitch * esw,u16 vport_num,enum mlx5_eswitch_vport_event enabled_events,struct mlx5_devlink_port * dl_port,u32 controller,u32 sfnum)1144 int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
1145 enum mlx5_eswitch_vport_event enabled_events,
1146 struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum)
1147 {
1148 struct mlx5_vport *vport;
1149 int err;
1150
1151 vport = mlx5_eswitch_get_vport(esw, vport_num);
1152 if (IS_ERR(vport))
1153 return PTR_ERR(vport);
1154
1155 err = mlx5_esw_offloads_init_sf_rep(esw, vport, dl_port, controller, sfnum);
1156 if (err)
1157 return err;
1158
1159 err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
1160 if (err)
1161 goto err_load;
1162
1163 return 0;
1164
1165 err_load:
1166 mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
1167 return err;
1168 }
1169
mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch * esw,u16 vport_num)1170 void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1171 {
1172 struct mlx5_vport *vport;
1173
1174 vport = mlx5_eswitch_get_vport(esw, vport_num);
1175 if (IS_ERR(vport))
1176 return;
1177
1178 mlx5_eswitch_unload_vport(esw, vport);
1179 mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
1180 }
1181
mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch * esw,u16 num_vfs)1182 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1183 {
1184 struct mlx5_vport *vport;
1185 unsigned long i;
1186
1187 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1188 if (!vport->enabled)
1189 continue;
1190 mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
1191 }
1192 }
1193
mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch * esw,u16 num_ec_vfs)1194 static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
1195 u16 num_ec_vfs)
1196 {
1197 struct mlx5_vport *vport;
1198 unsigned long i;
1199
1200 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
1201 if (!vport->enabled)
1202 continue;
1203 mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
1204 }
1205 }
1206
mlx5_eswitch_load_vf_vports(struct mlx5_eswitch * esw,u16 num_vfs,enum mlx5_eswitch_vport_event enabled_events)1207 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1208 enum mlx5_eswitch_vport_event enabled_events)
1209 {
1210 struct mlx5_vport *vport;
1211 unsigned long i;
1212 int err;
1213
1214 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1215 err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
1216 if (err)
1217 goto vf_err;
1218 }
1219
1220 return 0;
1221
1222 vf_err:
1223 mlx5_eswitch_unload_vf_vports(esw, num_vfs);
1224 return err;
1225 }
1226
mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch * esw,u16 num_ec_vfs,enum mlx5_eswitch_vport_event enabled_events)1227 static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_vfs,
1228 enum mlx5_eswitch_vport_event enabled_events)
1229 {
1230 struct mlx5_vport *vport;
1231 unsigned long i;
1232 int err;
1233
1234 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
1235 err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
1236 if (err)
1237 goto vf_err;
1238 }
1239
1240 return 0;
1241
1242 vf_err:
1243 mlx5_eswitch_unload_ec_vf_vports(esw, num_ec_vfs);
1244 return err;
1245 }
1246
host_pf_enable_hca(struct mlx5_core_dev * dev)1247 static int host_pf_enable_hca(struct mlx5_core_dev *dev)
1248 {
1249 if (!mlx5_core_is_ecpf(dev))
1250 return 0;
1251
1252 /* Once vport and representor are ready, take out the external host PF
1253 * out of initializing state. Enabling HCA clears the iser->initializing
1254 * bit and host PF driver loading can progress.
1255 */
1256 return mlx5_cmd_host_pf_enable_hca(dev);
1257 }
1258
host_pf_disable_hca(struct mlx5_core_dev * dev)1259 static void host_pf_disable_hca(struct mlx5_core_dev *dev)
1260 {
1261 if (!mlx5_core_is_ecpf(dev))
1262 return;
1263
1264 mlx5_cmd_host_pf_disable_hca(dev);
1265 }
1266
1267 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1268 * whichever are present on the eswitch.
1269 */
1270 int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch * esw,enum mlx5_eswitch_vport_event enabled_events)1271 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1272 enum mlx5_eswitch_vport_event enabled_events)
1273 {
1274 bool pf_needed;
1275 int ret;
1276
1277 pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1278 esw->mode == MLX5_ESWITCH_LEGACY;
1279
1280 /* Enable PF vport */
1281 if (pf_needed) {
1282 ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
1283 enabled_events);
1284 if (ret)
1285 return ret;
1286 }
1287
1288 /* Enable external host PF HCA */
1289 ret = host_pf_enable_hca(esw->dev);
1290 if (ret)
1291 goto pf_hca_err;
1292
1293 /* Enable ECPF vport */
1294 if (mlx5_ecpf_vport_exists(esw->dev)) {
1295 ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1296 if (ret)
1297 goto ecpf_err;
1298 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1299 ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
1300 enabled_events);
1301 if (ret)
1302 goto ec_vf_err;
1303 }
1304 }
1305
1306 /* Enable VF vports */
1307 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1308 enabled_events);
1309 if (ret)
1310 goto vf_err;
1311 return 0;
1312
1313 vf_err:
1314 if (mlx5_core_ec_sriov_enabled(esw->dev))
1315 mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
1316 ec_vf_err:
1317 if (mlx5_ecpf_vport_exists(esw->dev))
1318 mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
1319 ecpf_err:
1320 host_pf_disable_hca(esw->dev);
1321 pf_hca_err:
1322 if (pf_needed)
1323 mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
1324 return ret;
1325 }
1326
1327 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1328 * whichever are previously enabled on the eswitch.
1329 */
mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch * esw)1330 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1331 {
1332 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1333
1334 if (mlx5_ecpf_vport_exists(esw->dev)) {
1335 if (mlx5_core_ec_sriov_enabled(esw->dev))
1336 mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
1337 mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
1338 }
1339
1340 host_pf_disable_hca(esw->dev);
1341
1342 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1343 esw->mode == MLX5_ESWITCH_LEGACY)
1344 mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
1345 }
1346
mlx5_eswitch_get_devlink_param(struct mlx5_eswitch * esw)1347 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1348 {
1349 struct devlink *devlink = priv_to_devlink(esw->dev);
1350 union devlink_param_value val;
1351 int err;
1352
1353 err = devl_param_driverinit_value_get(devlink,
1354 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1355 &val);
1356 if (!err) {
1357 esw->params.large_group_num = val.vu32;
1358 } else {
1359 esw_warn(esw->dev,
1360 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1361 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1362 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1363 }
1364 }
1365
1366 static void
mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch * esw,int num_vfs)1367 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1368 {
1369 const u32 *out;
1370
1371 if (num_vfs < 0)
1372 return;
1373
1374 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1375 esw->esw_funcs.num_vfs = num_vfs;
1376 return;
1377 }
1378
1379 out = mlx5_esw_query_functions(esw->dev);
1380 if (IS_ERR(out))
1381 return;
1382
1383 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1384 host_params_context.host_num_of_vfs);
1385 if (mlx5_core_ec_sriov_enabled(esw->dev))
1386 esw->esw_funcs.num_ec_vfs = num_vfs;
1387
1388 kvfree(out);
1389 }
1390
mlx5_esw_mode_change_notify(struct mlx5_eswitch * esw,u16 mode)1391 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
1392 {
1393 struct mlx5_esw_event_info info = {};
1394
1395 info.new_mode = mode;
1396
1397 blocking_notifier_call_chain(&esw->n_head, 0, &info);
1398 }
1399
mlx5_esw_acls_ns_init(struct mlx5_eswitch * esw)1400 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
1401 {
1402 struct mlx5_core_dev *dev = esw->dev;
1403 int total_vports;
1404 int err;
1405
1406 if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
1407 return 0;
1408
1409 total_vports = mlx5_eswitch_get_total_vports(dev);
1410
1411 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
1412 err = mlx5_fs_egress_acls_init(dev, total_vports);
1413 if (err)
1414 return err;
1415 } else {
1416 esw_warn(dev, "egress ACL is not supported by FW\n");
1417 }
1418
1419 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
1420 err = mlx5_fs_ingress_acls_init(dev, total_vports);
1421 if (err)
1422 goto err;
1423 } else {
1424 esw_warn(dev, "ingress ACL is not supported by FW\n");
1425 }
1426 esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
1427 return 0;
1428
1429 err:
1430 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1431 mlx5_fs_egress_acls_cleanup(dev);
1432 return err;
1433 }
1434
mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch * esw)1435 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
1436 {
1437 struct mlx5_core_dev *dev = esw->dev;
1438
1439 esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
1440 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1441 mlx5_fs_ingress_acls_cleanup(dev);
1442 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1443 mlx5_fs_egress_acls_cleanup(dev);
1444 }
1445
1446 /**
1447 * mlx5_eswitch_enable_locked - Enable eswitch
1448 * @esw: Pointer to eswitch
1449 * @num_vfs: Enable eswitch for given number of VFs. This is optional.
1450 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
1451 * Caller should pass num_vfs > 0 when enabling eswitch for
1452 * vf vports. Caller should pass num_vfs = 0, when eswitch
1453 * is enabled without sriov VFs or when caller
1454 * is unaware of the sriov state of the host PF on ECPF based
1455 * eswitch. Caller should pass < 0 when num_vfs should be
1456 * completely ignored. This is typically the case when eswitch
1457 * is enabled without sriov regardless of PF/ECPF system.
1458 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1459 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
1460 * It returns 0 on success or error code on failure.
1461 */
mlx5_eswitch_enable_locked(struct mlx5_eswitch * esw,int num_vfs)1462 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
1463 {
1464 int err;
1465
1466 devl_assert_locked(priv_to_devlink(esw->dev));
1467
1468 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1469 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1470 return -EOPNOTSUPP;
1471 }
1472
1473 mlx5_eswitch_get_devlink_param(esw);
1474
1475 err = mlx5_esw_acls_ns_init(esw);
1476 if (err)
1477 return err;
1478
1479 mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1480
1481 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1482 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1483
1484 err = mlx5_esw_qos_init(esw);
1485 if (err)
1486 goto err_esw_init;
1487
1488 if (esw->mode == MLX5_ESWITCH_LEGACY) {
1489 err = esw_legacy_enable(esw);
1490 } else {
1491 err = esw_offloads_enable(esw);
1492 }
1493
1494 if (err)
1495 goto err_esw_init;
1496
1497 esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
1498
1499 mlx5_eswitch_event_handler_register(esw);
1500
1501 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
1502 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1503 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
1504
1505 mlx5_esw_mode_change_notify(esw, esw->mode);
1506
1507 return 0;
1508
1509 err_esw_init:
1510 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1511 mlx5_esw_acls_ns_cleanup(esw);
1512 return err;
1513 }
1514
1515 /**
1516 * mlx5_eswitch_enable - Enable eswitch
1517 * @esw: Pointer to eswitch
1518 * @num_vfs: Enable eswitch switch for given number of VFs.
1519 * Caller must pass num_vfs > 0 when enabling eswitch for
1520 * vf vports.
1521 * mlx5_eswitch_enable() returns 0 on success or error code on failure.
1522 */
mlx5_eswitch_enable(struct mlx5_eswitch * esw,int num_vfs)1523 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
1524 {
1525 bool toggle_lag;
1526 int ret = 0;
1527
1528 if (!mlx5_esw_allowed(esw))
1529 return 0;
1530
1531 devl_assert_locked(priv_to_devlink(esw->dev));
1532
1533 toggle_lag = !mlx5_esw_is_fdb_created(esw);
1534
1535 if (toggle_lag)
1536 mlx5_lag_disable_change(esw->dev);
1537
1538 if (!mlx5_esw_is_fdb_created(esw)) {
1539 ret = mlx5_eswitch_enable_locked(esw, num_vfs);
1540 } else {
1541 enum mlx5_eswitch_vport_event vport_events;
1542
1543 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1544 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1545 /* If this is the ECPF the number of host VFs is managed via the
1546 * eswitch function change event handler, and any num_vfs provided
1547 * here are intended to be EC VFs.
1548 */
1549 if (!mlx5_core_is_ecpf(esw->dev)) {
1550 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1551 if (!ret)
1552 esw->esw_funcs.num_vfs = num_vfs;
1553 } else if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1554 ret = mlx5_eswitch_load_ec_vf_vports(esw, num_vfs, vport_events);
1555 if (!ret)
1556 esw->esw_funcs.num_ec_vfs = num_vfs;
1557 }
1558 }
1559
1560 if (toggle_lag)
1561 mlx5_lag_enable_change(esw->dev);
1562
1563 return ret;
1564 }
1565
1566 /* When disabling sriov, free driver level resources. */
mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw,bool clear_vf)1567 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
1568 {
1569 if (!mlx5_esw_allowed(esw))
1570 return;
1571
1572 devl_assert_locked(priv_to_devlink(esw->dev));
1573 /* If driver is unloaded, this function is called twice by remove_one()
1574 * and mlx5_unload(). Prevent the second call.
1575 */
1576 if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
1577 return;
1578
1579 esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
1580 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1581 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
1582
1583 if (!mlx5_core_is_ecpf(esw->dev)) {
1584 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1585 if (clear_vf)
1586 mlx5_eswitch_clear_vf_vports_info(esw);
1587 } else if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1588 mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
1589 if (clear_vf)
1590 mlx5_eswitch_clear_ec_vf_vports_info(esw);
1591 }
1592
1593 if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
1594 struct devlink *devlink = priv_to_devlink(esw->dev);
1595
1596 devl_rate_nodes_destroy(devlink);
1597 }
1598 /* Destroy legacy fdb when disabling sriov in legacy mode. */
1599 if (esw->mode == MLX5_ESWITCH_LEGACY)
1600 mlx5_eswitch_disable_locked(esw);
1601
1602 if (!mlx5_core_is_ecpf(esw->dev))
1603 esw->esw_funcs.num_vfs = 0;
1604 else
1605 esw->esw_funcs.num_ec_vfs = 0;
1606 }
1607
1608 /* Free resources for corresponding eswitch mode. It is called by devlink
1609 * when changing eswitch mode or modprobe when unloading driver.
1610 */
mlx5_eswitch_disable_locked(struct mlx5_eswitch * esw)1611 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
1612 {
1613 struct devlink *devlink = priv_to_devlink(esw->dev);
1614
1615 /* Notify eswitch users that it is exiting from current mode.
1616 * So that it can do necessary cleanup before the eswitch is disabled.
1617 */
1618 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
1619
1620 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1621 mlx5_eswitch_event_handler_unregister(esw);
1622
1623 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
1624 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1625 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
1626
1627 if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
1628 esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
1629 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1630 esw_offloads_disable(esw);
1631 else if (esw->mode == MLX5_ESWITCH_LEGACY)
1632 esw_legacy_disable(esw);
1633 mlx5_esw_acls_ns_cleanup(esw);
1634 }
1635
1636 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1637 devl_rate_nodes_destroy(devlink);
1638 }
1639
mlx5_eswitch_disable(struct mlx5_eswitch * esw)1640 void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
1641 {
1642 if (!mlx5_esw_allowed(esw))
1643 return;
1644
1645 devl_assert_locked(priv_to_devlink(esw->dev));
1646 mlx5_lag_disable_change(esw->dev);
1647 mlx5_eswitch_disable_locked(esw);
1648 esw->mode = MLX5_ESWITCH_LEGACY;
1649 mlx5_lag_enable_change(esw->dev);
1650 }
1651
mlx5_query_hca_cap_host_pf(struct mlx5_core_dev * dev,void * out)1652 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
1653 {
1654 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1655 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1656
1657 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1658 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1659 MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
1660 MLX5_SET(query_hca_cap_in, in, other_function, true);
1661 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1662 }
1663
mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev * dev,u16 * max_sfs,u16 * sf_base_id)1664 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
1665
1666 {
1667 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
1668 void *query_ctx;
1669 void *hca_caps;
1670 int err;
1671
1672 if (!mlx5_core_is_ecpf(dev)) {
1673 *max_sfs = 0;
1674 return 0;
1675 }
1676
1677 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
1678 if (!query_ctx)
1679 return -ENOMEM;
1680
1681 err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
1682 if (err)
1683 goto out_free;
1684
1685 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
1686 *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
1687 *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
1688
1689 out_free:
1690 kfree(query_ctx);
1691 return err;
1692 }
1693
mlx5_esw_vport_alloc(struct mlx5_eswitch * esw,int index,u16 vport_num)1694 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
1695 int index, u16 vport_num)
1696 {
1697 struct mlx5_vport *vport;
1698 int err;
1699
1700 vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1701 if (!vport)
1702 return -ENOMEM;
1703
1704 vport->dev = esw->dev;
1705 vport->vport = vport_num;
1706 vport->index = index;
1707 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1708 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
1709 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
1710 if (err)
1711 goto insert_err;
1712
1713 esw->total_vports++;
1714 return 0;
1715
1716 insert_err:
1717 kfree(vport);
1718 return err;
1719 }
1720
mlx5_esw_vport_free(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1721 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1722 {
1723 xa_erase(&esw->vports, vport->vport);
1724 kfree(vport);
1725 }
1726
mlx5_esw_vports_cleanup(struct mlx5_eswitch * esw)1727 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
1728 {
1729 struct mlx5_vport *vport;
1730 unsigned long i;
1731
1732 mlx5_esw_for_each_vport(esw, i, vport)
1733 mlx5_esw_vport_free(esw, vport);
1734 xa_destroy(&esw->vports);
1735 }
1736
mlx5_esw_vports_init(struct mlx5_eswitch * esw)1737 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
1738 {
1739 struct mlx5_core_dev *dev = esw->dev;
1740 u16 max_host_pf_sfs;
1741 u16 base_sf_num;
1742 int idx = 0;
1743 int err;
1744 int i;
1745
1746 xa_init(&esw->vports);
1747
1748 err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
1749 if (err)
1750 goto err;
1751 if (esw->first_host_vport == MLX5_VPORT_PF)
1752 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1753 idx++;
1754
1755 for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
1756 err = mlx5_esw_vport_alloc(esw, idx, idx);
1757 if (err)
1758 goto err;
1759 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
1760 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1761 idx++;
1762 }
1763 base_sf_num = mlx5_sf_start_function_id(dev);
1764 for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
1765 err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
1766 if (err)
1767 goto err;
1768 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1769 idx++;
1770 }
1771
1772 err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
1773 if (err)
1774 goto err;
1775 for (i = 0; i < max_host_pf_sfs; i++) {
1776 err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
1777 if (err)
1778 goto err;
1779 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1780 idx++;
1781 }
1782
1783 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1784 int ec_vf_base_num = mlx5_core_ec_vf_vport_base(dev);
1785
1786 for (i = 0; i < mlx5_core_max_ec_vfs(esw->dev); i++) {
1787 err = mlx5_esw_vport_alloc(esw, idx, ec_vf_base_num + i);
1788 if (err)
1789 goto err;
1790 idx++;
1791 }
1792 }
1793
1794 if (mlx5_ecpf_vport_exists(dev) ||
1795 mlx5_core_is_ecpf_esw_manager(dev)) {
1796 err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_ECPF);
1797 if (err)
1798 goto err;
1799 idx++;
1800 }
1801 err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_UPLINK);
1802 if (err)
1803 goto err;
1804 return 0;
1805
1806 err:
1807 mlx5_esw_vports_cleanup(esw);
1808 return err;
1809 }
1810
mlx5_devlink_esw_multiport_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1811 static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
1812 struct devlink_param_gset_ctx *ctx,
1813 struct netlink_ext_ack *extack)
1814 {
1815 struct mlx5_core_dev *dev = devlink_priv(devlink);
1816
1817 if (!MLX5_ESWITCH_MANAGER(dev))
1818 return -EOPNOTSUPP;
1819
1820 if (ctx->val.vbool)
1821 return mlx5_lag_mpesw_enable(dev);
1822
1823 mlx5_lag_mpesw_disable(dev);
1824 return 0;
1825 }
1826
mlx5_devlink_esw_multiport_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1827 static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
1828 struct devlink_param_gset_ctx *ctx)
1829 {
1830 struct mlx5_core_dev *dev = devlink_priv(devlink);
1831
1832 ctx->val.vbool = mlx5_lag_is_mpesw(dev);
1833 return 0;
1834 }
1835
1836 static const struct devlink_param mlx5_eswitch_params[] = {
1837 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
1838 "esw_multiport", DEVLINK_PARAM_TYPE_BOOL,
1839 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1840 mlx5_devlink_esw_multiport_get,
1841 mlx5_devlink_esw_multiport_set, NULL),
1842 };
1843
mlx5_eswitch_init(struct mlx5_core_dev * dev)1844 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1845 {
1846 struct mlx5_eswitch *esw;
1847 int err;
1848
1849 if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev))
1850 return 0;
1851
1852 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1853 if (!esw)
1854 return -ENOMEM;
1855
1856 err = devl_params_register(priv_to_devlink(dev), mlx5_eswitch_params,
1857 ARRAY_SIZE(mlx5_eswitch_params));
1858 if (err)
1859 goto free_esw;
1860
1861 esw->dev = dev;
1862 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1863 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1864
1865 esw->debugfs_root = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(dev));
1866 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1867 if (!esw->work_queue) {
1868 err = -ENOMEM;
1869 goto abort;
1870 }
1871
1872 err = mlx5_esw_vports_init(esw);
1873 if (err)
1874 goto abort;
1875
1876 dev->priv.eswitch = esw;
1877 err = esw_offloads_init(esw);
1878 if (err)
1879 goto reps_err;
1880
1881 esw->mode = MLX5_ESWITCH_LEGACY;
1882 err = mlx5_esw_qos_init(esw);
1883 if (err)
1884 goto reps_err;
1885
1886 mutex_init(&esw->offloads.encap_tbl_lock);
1887 hash_init(esw->offloads.encap_tbl);
1888 mutex_init(&esw->offloads.decap_tbl_lock);
1889 hash_init(esw->offloads.decap_tbl);
1890 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1891 atomic64_set(&esw->offloads.num_flows, 0);
1892 ida_init(&esw->offloads.vport_metadata_ida);
1893 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
1894 mutex_init(&esw->state_lock);
1895 init_rwsem(&esw->mode_lock);
1896 refcount_set(&esw->qos.refcnt, 0);
1897
1898 esw->enabled_vports = 0;
1899 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1900 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
1901 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
1902 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1903 else
1904 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1905 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
1906
1907 esw_info(dev,
1908 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1909 esw->total_vports,
1910 MLX5_MAX_UC_PER_VPORT(dev),
1911 MLX5_MAX_MC_PER_VPORT(dev));
1912 return 0;
1913
1914 reps_err:
1915 mlx5_esw_vports_cleanup(esw);
1916 dev->priv.eswitch = NULL;
1917 abort:
1918 if (esw->work_queue)
1919 destroy_workqueue(esw->work_queue);
1920 debugfs_remove_recursive(esw->debugfs_root);
1921 devl_params_unregister(priv_to_devlink(dev), mlx5_eswitch_params,
1922 ARRAY_SIZE(mlx5_eswitch_params));
1923 free_esw:
1924 kfree(esw);
1925 return err;
1926 }
1927
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)1928 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1929 {
1930 if (!esw)
1931 return;
1932
1933 esw_info(esw->dev, "cleanup\n");
1934
1935 mlx5_esw_qos_cleanup(esw);
1936 destroy_workqueue(esw->work_queue);
1937 WARN_ON(refcount_read(&esw->qos.refcnt));
1938 mutex_destroy(&esw->state_lock);
1939 WARN_ON(!xa_empty(&esw->offloads.vhca_map));
1940 xa_destroy(&esw->offloads.vhca_map);
1941 ida_destroy(&esw->offloads.vport_metadata_ida);
1942 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1943 mutex_destroy(&esw->offloads.encap_tbl_lock);
1944 mutex_destroy(&esw->offloads.decap_tbl_lock);
1945 esw_offloads_cleanup(esw);
1946 esw->dev->priv.eswitch = NULL;
1947 mlx5_esw_vports_cleanup(esw);
1948 debugfs_remove_recursive(esw->debugfs_root);
1949 devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
1950 ARRAY_SIZE(mlx5_eswitch_params));
1951 kfree(esw);
1952 }
1953
1954 /* Vport Administration */
1955 static int
mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch * esw,struct mlx5_vport * evport,const u8 * mac)1956 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1957 struct mlx5_vport *evport, const u8 *mac)
1958 {
1959 u16 vport_num = evport->vport;
1960 u64 node_guid;
1961 int err = 0;
1962
1963 if (is_multicast_ether_addr(mac))
1964 return -EINVAL;
1965
1966 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1967 mlx5_core_warn(esw->dev,
1968 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1969 vport_num);
1970
1971 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
1972 if (err) {
1973 mlx5_core_warn(esw->dev,
1974 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1975 vport_num, err);
1976 return err;
1977 }
1978
1979 node_guid_gen_from_mac(&node_guid, mac);
1980 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
1981 if (err)
1982 mlx5_core_warn(esw->dev,
1983 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1984 vport_num, err);
1985
1986 ether_addr_copy(evport->info.mac, mac);
1987 evport->info.node_guid = node_guid;
1988 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1989 err = esw_acl_ingress_lgcy_setup(esw, evport);
1990
1991 return err;
1992 }
1993
mlx5_eswitch_set_vport_mac(struct mlx5_eswitch * esw,u16 vport,const u8 * mac)1994 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1995 u16 vport, const u8 *mac)
1996 {
1997 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1998 int err = 0;
1999
2000 if (IS_ERR(evport))
2001 return PTR_ERR(evport);
2002
2003 mutex_lock(&esw->state_lock);
2004 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
2005 mutex_unlock(&esw->state_lock);
2006 return err;
2007 }
2008
mlx5_esw_check_port_type(struct mlx5_eswitch * esw,u16 vport_num,xa_mark_t mark)2009 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
2010 {
2011 return xa_get_mark(&esw->vports, vport_num, mark);
2012 }
2013
mlx5_eswitch_is_vf_vport(struct mlx5_eswitch * esw,u16 vport_num)2014 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
2015 {
2016 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
2017 }
2018
mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch * esw,u16 vport_num)2019 bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
2020 {
2021 return vport_num == MLX5_VPORT_PF ||
2022 mlx5_eswitch_is_vf_vport(esw, vport_num);
2023 }
2024
mlx5_esw_is_sf_vport(struct mlx5_eswitch * esw,u16 vport_num)2025 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
2026 {
2027 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
2028 }
2029
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state)2030 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
2031 u16 vport, int link_state)
2032 {
2033 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2034 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
2035 int other_vport = 1;
2036 int err = 0;
2037
2038 if (!mlx5_esw_allowed(esw))
2039 return -EPERM;
2040 if (IS_ERR(evport))
2041 return PTR_ERR(evport);
2042
2043 if (vport == MLX5_VPORT_UPLINK) {
2044 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
2045 other_vport = 0;
2046 vport = 0;
2047 }
2048 mutex_lock(&esw->state_lock);
2049 if (esw->mode != MLX5_ESWITCH_LEGACY) {
2050 err = -EOPNOTSUPP;
2051 goto unlock;
2052 }
2053
2054 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
2055 if (err) {
2056 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
2057 vport, opmod, err);
2058 goto unlock;
2059 }
2060
2061 evport->info.link_state = link_state;
2062
2063 unlock:
2064 mutex_unlock(&esw->state_lock);
2065 return err;
2066 }
2067
mlx5_eswitch_get_vport_config(struct mlx5_eswitch * esw,u16 vport,struct ifla_vf_info * ivi)2068 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
2069 u16 vport, struct ifla_vf_info *ivi)
2070 {
2071 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2072 u32 max_rate, min_rate;
2073
2074 if (IS_ERR(evport))
2075 return PTR_ERR(evport);
2076
2077 memset(ivi, 0, sizeof(*ivi));
2078 ivi->vf = vport - 1;
2079
2080 mutex_lock(&esw->state_lock);
2081 ether_addr_copy(ivi->mac, evport->info.mac);
2082 ivi->linkstate = evport->info.link_state;
2083 ivi->vlan = evport->info.vlan;
2084 ivi->qos = evport->info.qos;
2085 ivi->spoofchk = evport->info.spoofchk;
2086 ivi->trusted = evport->info.trusted;
2087
2088 if (mlx5_esw_qos_get_vport_rate(evport, &max_rate, &min_rate)) {
2089 ivi->max_tx_rate = max_rate;
2090 ivi->min_tx_rate = min_rate;
2091 }
2092 mutex_unlock(&esw->state_lock);
2093
2094 return 0;
2095 }
2096
__mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos,u8 set_flags)2097 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2098 u16 vport, u16 vlan, u8 qos, u8 set_flags)
2099 {
2100 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2101 bool vst_mode_steering = esw_vst_mode_is_steering(esw);
2102 int err = 0;
2103
2104 if (IS_ERR(evport))
2105 return PTR_ERR(evport);
2106 if (vlan > 4095 || qos > 7)
2107 return -EINVAL;
2108
2109 if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) {
2110 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2111 if (err)
2112 return err;
2113 }
2114
2115 evport->info.vlan = vlan;
2116 evport->info.qos = qos;
2117 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2118 err = esw_acl_ingress_lgcy_setup(esw, evport);
2119 if (err)
2120 return err;
2121 err = esw_acl_egress_lgcy_setup(esw, evport);
2122 }
2123
2124 return err;
2125 }
2126
mlx5_eswitch_get_vport_stats(struct mlx5_eswitch * esw,u16 vport_num,struct ifla_vf_stats * vf_stats)2127 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2128 u16 vport_num,
2129 struct ifla_vf_stats *vf_stats)
2130 {
2131 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2132 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2133 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
2134 struct mlx5_vport_drop_stats stats = {};
2135 int err = 0;
2136 u32 *out;
2137
2138 if (IS_ERR(vport))
2139 return PTR_ERR(vport);
2140
2141 out = kvzalloc(outlen, GFP_KERNEL);
2142 if (!out)
2143 return -ENOMEM;
2144
2145 MLX5_SET(query_vport_counter_in, in, opcode,
2146 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2147 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2148 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2149 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2150
2151 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
2152 if (err)
2153 goto free_out;
2154
2155 #define MLX5_GET_CTR(p, x) \
2156 MLX5_GET64(query_vport_counter_out, p, x)
2157
2158 memset(vf_stats, 0, sizeof(*vf_stats));
2159 vf_stats->rx_packets =
2160 MLX5_GET_CTR(out, received_eth_unicast.packets) +
2161 MLX5_GET_CTR(out, received_ib_unicast.packets) +
2162 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2163 MLX5_GET_CTR(out, received_ib_multicast.packets) +
2164 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2165
2166 vf_stats->rx_bytes =
2167 MLX5_GET_CTR(out, received_eth_unicast.octets) +
2168 MLX5_GET_CTR(out, received_ib_unicast.octets) +
2169 MLX5_GET_CTR(out, received_eth_multicast.octets) +
2170 MLX5_GET_CTR(out, received_ib_multicast.octets) +
2171 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2172
2173 vf_stats->tx_packets =
2174 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2175 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2176 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2177 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2178 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2179
2180 vf_stats->tx_bytes =
2181 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2182 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2183 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2184 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2185 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2186
2187 vf_stats->multicast =
2188 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2189 MLX5_GET_CTR(out, received_ib_multicast.packets);
2190
2191 vf_stats->broadcast =
2192 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2193
2194 err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
2195 if (err)
2196 goto free_out;
2197 vf_stats->rx_dropped = stats.rx_dropped;
2198 vf_stats->tx_dropped = stats.tx_dropped;
2199
2200 free_out:
2201 kvfree(out);
2202 return err;
2203 }
2204
mlx5_eswitch_mode(const struct mlx5_core_dev * dev)2205 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
2206 {
2207 struct mlx5_eswitch *esw = dev->priv.eswitch;
2208
2209 return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
2210 }
2211 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2212
2213 enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev * dev)2214 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2215 {
2216 struct mlx5_eswitch *esw;
2217
2218 esw = dev->priv.eswitch;
2219 return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap :
2220 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2221 }
2222 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2223
mlx5_esw_multipath_prereq(struct mlx5_core_dev * dev0,struct mlx5_core_dev * dev1)2224 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2225 struct mlx5_core_dev *dev1)
2226 {
2227 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2228 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
2229 }
2230
mlx5_esw_event_notifier_register(struct mlx5_eswitch * esw,struct notifier_block * nb)2231 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
2232 {
2233 return blocking_notifier_chain_register(&esw->n_head, nb);
2234 }
2235
mlx5_esw_event_notifier_unregister(struct mlx5_eswitch * esw,struct notifier_block * nb)2236 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
2237 {
2238 blocking_notifier_chain_unregister(&esw->n_head, nb);
2239 }
2240
2241 /**
2242 * mlx5_esw_hold() - Try to take a read lock on esw mode lock.
2243 * @mdev: mlx5 core device.
2244 *
2245 * Should be called by esw resources callers.
2246 *
2247 * Return: true on success or false.
2248 */
mlx5_esw_hold(struct mlx5_core_dev * mdev)2249 bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
2250 {
2251 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2252
2253 /* e.g. VF doesn't have eswitch so nothing to do */
2254 if (!mlx5_esw_allowed(esw))
2255 return true;
2256
2257 if (down_read_trylock(&esw->mode_lock) != 0) {
2258 if (esw->eswitch_operation_in_progress) {
2259 up_read(&esw->mode_lock);
2260 return false;
2261 }
2262 return true;
2263 }
2264
2265 return false;
2266 }
2267
2268 /**
2269 * mlx5_esw_release() - Release a read lock on esw mode lock.
2270 * @mdev: mlx5 core device.
2271 */
mlx5_esw_release(struct mlx5_core_dev * mdev)2272 void mlx5_esw_release(struct mlx5_core_dev *mdev)
2273 {
2274 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2275
2276 if (mlx5_esw_allowed(esw))
2277 up_read(&esw->mode_lock);
2278 }
2279
2280 /**
2281 * mlx5_esw_get() - Increase esw user count.
2282 * @mdev: mlx5 core device.
2283 */
mlx5_esw_get(struct mlx5_core_dev * mdev)2284 void mlx5_esw_get(struct mlx5_core_dev *mdev)
2285 {
2286 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2287
2288 if (mlx5_esw_allowed(esw))
2289 atomic64_inc(&esw->user_count);
2290 }
2291
2292 /**
2293 * mlx5_esw_put() - Decrease esw user count.
2294 * @mdev: mlx5 core device.
2295 */
mlx5_esw_put(struct mlx5_core_dev * mdev)2296 void mlx5_esw_put(struct mlx5_core_dev *mdev)
2297 {
2298 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2299
2300 if (mlx5_esw_allowed(esw))
2301 atomic64_dec_if_positive(&esw->user_count);
2302 }
2303
2304 /**
2305 * mlx5_esw_try_lock() - Take a write lock on esw mode lock.
2306 * @esw: eswitch device.
2307 *
2308 * Should be called by esw mode change routine.
2309 *
2310 * Return:
2311 * * 0 - esw mode if successfully locked and refcount is 0.
2312 * * -EBUSY - refcount is not 0.
2313 * * -EINVAL - In the middle of switching mode or lock is already held.
2314 */
mlx5_esw_try_lock(struct mlx5_eswitch * esw)2315 int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
2316 {
2317 if (down_write_trylock(&esw->mode_lock) == 0)
2318 return -EINVAL;
2319
2320 if (esw->eswitch_operation_in_progress ||
2321 atomic64_read(&esw->user_count) > 0) {
2322 up_write(&esw->mode_lock);
2323 return -EBUSY;
2324 }
2325
2326 return esw->mode;
2327 }
2328
mlx5_esw_lock(struct mlx5_eswitch * esw)2329 int mlx5_esw_lock(struct mlx5_eswitch *esw)
2330 {
2331 down_write(&esw->mode_lock);
2332
2333 if (esw->eswitch_operation_in_progress) {
2334 up_write(&esw->mode_lock);
2335 return -EBUSY;
2336 }
2337
2338 return 0;
2339 }
2340
2341 /**
2342 * mlx5_esw_unlock() - Release write lock on esw mode lock
2343 * @esw: eswitch device.
2344 */
mlx5_esw_unlock(struct mlx5_eswitch * esw)2345 void mlx5_esw_unlock(struct mlx5_eswitch *esw)
2346 {
2347 up_write(&esw->mode_lock);
2348 }
2349
2350 /**
2351 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
2352 *
2353 * @dev: Pointer to core device
2354 *
2355 * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
2356 */
mlx5_eswitch_get_total_vports(const struct mlx5_core_dev * dev)2357 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
2358 {
2359 struct mlx5_eswitch *esw;
2360
2361 esw = dev->priv.eswitch;
2362 return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
2363 }
2364 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
2365
2366 /**
2367 * mlx5_eswitch_get_core_dev - Get the mdev device
2368 * @esw : eswitch device.
2369 *
2370 * Return the mellanox core device which manages the eswitch.
2371 */
mlx5_eswitch_get_core_dev(struct mlx5_eswitch * esw)2372 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
2373 {
2374 return mlx5_esw_allowed(esw) ? esw->dev : NULL;
2375 }
2376 EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);
2377
mlx5_eswitch_block_ipsec(struct mlx5_core_dev * dev)2378 bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
2379 {
2380 struct mlx5_eswitch *esw = dev->priv.eswitch;
2381
2382 if (!mlx5_esw_allowed(esw))
2383 return true;
2384
2385 mutex_lock(&esw->state_lock);
2386 if (esw->enabled_ipsec_vf_count) {
2387 mutex_unlock(&esw->state_lock);
2388 return false;
2389 }
2390
2391 dev->num_ipsec_offloads++;
2392 mutex_unlock(&esw->state_lock);
2393 return true;
2394 }
2395
mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev * dev)2396 void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
2397 {
2398 struct mlx5_eswitch *esw = dev->priv.eswitch;
2399
2400 if (!mlx5_esw_allowed(esw))
2401 /* Failure means no eswitch => core dev is not a PF */
2402 return;
2403
2404 mutex_lock(&esw->state_lock);
2405 dev->num_ipsec_offloads--;
2406 mutex_unlock(&esw->state_lock);
2407 }
2408