1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
43 #include "rdma.h"
44 #include "en.h"
45 #include "fs_core.h"
46 #include "lib/mlx5.h"
47 #include "lib/devcom.h"
48 #include "lib/eq.h"
49 #include "lib/fs_chains.h"
50 #include "en_tc.h"
51 #include "en/mapping.h"
52 #include "devlink.h"
53 #include "lag/lag.h"
54 #include "en/tc/post_meter.h"
55 #include "fw_reset.h"
56
57 /* There are two match-all miss flows, one for unicast dst mac and
58 * one for multicast.
59 */
60 #define MLX5_ESW_MISS_FLOWS (2)
61 #define UPLINK_REP_INDEX 0
62
63 #define MLX5_ESW_VPORT_TBL_SIZE 128
64 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
65
66 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
67
68 #define MLX5_ESW_MAX_CTRL_EQS 4
69 #define MLX5_ESW_DEFAULT_SF_COMP_EQS 8
70
71 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
72 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
73 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
74 .flags = 0,
75 };
76
mlx5_eswitch_get_rep(struct mlx5_eswitch * esw,u16 vport_num)77 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
78 u16 vport_num)
79 {
80 return xa_load(&esw->offloads.vport_reps, vport_num);
81 }
82
83 static void
mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_esw_flow_attr * attr)84 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
85 struct mlx5_flow_spec *spec,
86 struct mlx5_esw_flow_attr *attr)
87 {
88 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
89 return;
90
91 if (attr->int_port) {
92 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
93
94 return;
95 }
96
97 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
98 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
99 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
100 }
101
102 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
103 * are not needed as well in the following process. So clear them all for simplicity.
104 */
105 void
mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec)106 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
107 {
108 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
109 void *misc2;
110
111 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
112 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
113
114 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
115 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
116
117 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
118 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
119 }
120 }
121
122 static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr,struct mlx5_eswitch * src_esw,u16 vport)123 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
124 struct mlx5_flow_spec *spec,
125 struct mlx5_flow_attr *attr,
126 struct mlx5_eswitch *src_esw,
127 u16 vport)
128 {
129 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
130 u32 metadata;
131 void *misc2;
132 void *misc;
133
134 /* Use metadata matching because vport is not represented by single
135 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
136 */
137 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
138 if (mlx5_esw_indir_table_decap_vport(attr))
139 vport = mlx5_esw_indir_table_decap_vport(attr);
140
141 if (!attr->chain && esw_attr && esw_attr->int_port)
142 metadata =
143 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
144 else
145 metadata =
146 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
147
148 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
149 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
150
151 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
152 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
153 mlx5_eswitch_get_vport_metadata_mask());
154
155 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
156 } else {
157 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
158 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
159
160 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
161 MLX5_SET(fte_match_set_misc, misc,
162 source_eswitch_owner_vhca_id,
163 MLX5_CAP_GEN(src_esw->dev, vhca_id));
164
165 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
166 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
167 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
168 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
169 source_eswitch_owner_vhca_id);
170
171 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
172 }
173 }
174
175 static int
esw_setup_decap_indir(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)176 esw_setup_decap_indir(struct mlx5_eswitch *esw,
177 struct mlx5_flow_attr *attr)
178 {
179 struct mlx5_flow_table *ft;
180
181 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
182 return -EOPNOTSUPP;
183
184 ft = mlx5_esw_indir_table_get(esw, attr,
185 mlx5_esw_indir_table_decap_vport(attr), true);
186 return PTR_ERR_OR_ZERO(ft);
187 }
188
189 static void
esw_cleanup_decap_indir(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)190 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
191 struct mlx5_flow_attr *attr)
192 {
193 if (mlx5_esw_indir_table_decap_vport(attr))
194 mlx5_esw_indir_table_put(esw,
195 mlx5_esw_indir_table_decap_vport(attr),
196 true);
197 }
198
199 static int
esw_setup_mtu_dest(struct mlx5_flow_destination * dest,struct mlx5e_meter_attr * meter,int i)200 esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
201 struct mlx5e_meter_attr *meter,
202 int i)
203 {
204 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
205 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
206 dest[i].range.min = 0;
207 dest[i].range.max = meter->params.mtu;
208 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
209 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
210
211 return 0;
212 }
213
214 static int
esw_setup_sampler_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,u32 sampler_id,int i)215 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
216 struct mlx5_flow_act *flow_act,
217 u32 sampler_id,
218 int i)
219 {
220 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
221 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
222 dest[i].sampler_id = sampler_id;
223
224 return 0;
225 }
226
227 static int
esw_setup_ft_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int i)228 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
229 struct mlx5_flow_act *flow_act,
230 struct mlx5_eswitch *esw,
231 struct mlx5_flow_attr *attr,
232 int i)
233 {
234 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
235 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
236 dest[i].ft = attr->dest_ft;
237
238 if (mlx5_esw_indir_table_decap_vport(attr))
239 return esw_setup_decap_indir(esw, attr);
240 return 0;
241 }
242
243 static void
esw_setup_accept_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_fs_chains * chains,int i)244 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
245 struct mlx5_fs_chains *chains, int i)
246 {
247 if (mlx5_chains_ignore_flow_level_supported(chains))
248 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
249 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
250 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
251 }
252
253 static void
esw_setup_slow_path_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,int i)254 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
255 struct mlx5_eswitch *esw, int i)
256 {
257 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
258 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
259 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
260 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
261 }
262
263 static int
esw_setup_chain_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level,int i)264 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
265 struct mlx5_flow_act *flow_act,
266 struct mlx5_fs_chains *chains,
267 u32 chain, u32 prio, u32 level,
268 int i)
269 {
270 struct mlx5_flow_table *ft;
271
272 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
273 ft = mlx5_chains_get_table(chains, chain, prio, level);
274 if (IS_ERR(ft))
275 return PTR_ERR(ft);
276
277 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
278 dest[i].ft = ft;
279 return 0;
280 }
281
esw_put_dest_tables_loop(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int from,int to)282 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
283 int from, int to)
284 {
285 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
286 struct mlx5_fs_chains *chains = esw_chains(esw);
287 int i;
288
289 for (i = from; i < to; i++)
290 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
291 mlx5_chains_put_table(chains, 0, 1, 0);
292 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
293 esw_attr->dests[i].mdev))
294 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
295 }
296
297 static bool
esw_is_chain_src_port_rewrite(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr)298 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
299 {
300 int i;
301
302 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
303 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
304 return true;
305 return false;
306 }
307
308 static int
esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains,struct mlx5_flow_attr * attr,int * i)309 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
310 struct mlx5_flow_act *flow_act,
311 struct mlx5_eswitch *esw,
312 struct mlx5_fs_chains *chains,
313 struct mlx5_flow_attr *attr,
314 int *i)
315 {
316 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
317 int err;
318
319 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
320 return -EOPNOTSUPP;
321
322 /* flow steering cannot handle more than one dest with the same ft
323 * in a single flow
324 */
325 if (esw_attr->out_count - esw_attr->split_count > 1)
326 return -EOPNOTSUPP;
327
328 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
329 if (err)
330 return err;
331
332 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
333 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
334 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
335 }
336 (*i)++;
337
338 return 0;
339 }
340
esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)341 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
342 struct mlx5_flow_attr *attr)
343 {
344 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
345
346 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
347 }
348
349 static bool
esw_is_indir_table(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)350 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
351 {
352 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
353 bool result = false;
354 int i;
355
356 /* Indirect table is supported only for flows with in_port uplink
357 * and the destination is vport on the same eswitch as the uplink,
358 * return false in case at least one of destinations doesn't meet
359 * this criteria.
360 */
361 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
362 if (esw_attr->dests[i].vport_valid &&
363 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
364 esw_attr->dests[i].mdev)) {
365 result = true;
366 } else {
367 result = false;
368 break;
369 }
370 }
371 return result;
372 }
373
374 static int
esw_setup_indir_table(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int * i)375 esw_setup_indir_table(struct mlx5_flow_destination *dest,
376 struct mlx5_flow_act *flow_act,
377 struct mlx5_eswitch *esw,
378 struct mlx5_flow_attr *attr,
379 int *i)
380 {
381 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
382 int j, err;
383
384 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
385 return -EOPNOTSUPP;
386
387 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
388 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
389 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
390
391 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
392 esw_attr->dests[j].vport, false);
393 if (IS_ERR(dest[*i].ft)) {
394 err = PTR_ERR(dest[*i].ft);
395 goto err_indir_tbl_get;
396 }
397 }
398
399 if (mlx5_esw_indir_table_decap_vport(attr)) {
400 err = esw_setup_decap_indir(esw, attr);
401 if (err)
402 goto err_indir_tbl_get;
403 }
404
405 return 0;
406
407 err_indir_tbl_get:
408 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
409 return err;
410 }
411
esw_cleanup_indir_table(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)412 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
413 {
414 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
415
416 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
417 esw_cleanup_decap_indir(esw, attr);
418 }
419
420 static void
esw_cleanup_chain_dest(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)421 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
422 {
423 mlx5_chains_put_table(chains, chain, prio, level);
424 }
425
esw_same_vhca_id(struct mlx5_core_dev * mdev1,struct mlx5_core_dev * mdev2)426 static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
427 {
428 return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id);
429 }
430
esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx)431 static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
432 struct mlx5_esw_flow_attr *esw_attr,
433 int attr_idx)
434 {
435 if (esw->offloads.ft_ipsec_tx_pol &&
436 esw_attr->dests[attr_idx].vport_valid &&
437 esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK &&
438 /* To be aligned with software, encryption is needed only for tunnel device */
439 (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
440 esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport &&
441 esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
442 return true;
443
444 return false;
445 }
446
esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr)447 static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
448 struct mlx5_esw_flow_attr *esw_attr)
449 {
450 int i;
451
452 if (!esw->offloads.ft_ipsec_tx_pol)
453 return true;
454
455 for (i = 0; i < esw_attr->split_count; i++)
456 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
457 return false;
458
459 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
460 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
461 (esw_attr->out_count - esw_attr->split_count > 1))
462 return false;
463
464 return true;
465 }
466
467 static void
esw_setup_dest_fwd_vport(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)468 esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
469 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
470 int attr_idx, int dest_idx, bool pkt_reformat)
471 {
472 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
473 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport;
474 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
475 dest[dest_idx].vport.vhca_id =
476 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
477 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
478 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
479 mlx5_lag_is_mpesw(esw->dev))
480 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
481 }
482 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
483 if (pkt_reformat) {
484 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
485 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
486 }
487 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
488 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
489 }
490 }
491
492 static void
esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)493 esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
494 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
495 int attr_idx, int dest_idx, bool pkt_reformat)
496 {
497 dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
498 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
499 if (pkt_reformat &&
500 esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
501 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
502 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
503 }
504 }
505
506 static void
esw_setup_vport_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)507 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
508 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
509 int attr_idx, int dest_idx, bool pkt_reformat)
510 {
511 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
512 esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
513 attr_idx, dest_idx, pkt_reformat);
514 else
515 esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
516 attr_idx, dest_idx, pkt_reformat);
517 }
518
519 static int
esw_setup_vport_dests(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int i)520 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
521 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
522 int i)
523 {
524 int j;
525
526 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
527 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
528 return i;
529 }
530
531 static bool
esw_src_port_rewrite_supported(struct mlx5_eswitch * esw)532 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
533 {
534 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
535 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
536 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
537 }
538
539 static bool
esw_dests_to_int_external(struct mlx5_flow_destination * dests,int max_dest)540 esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
541 {
542 bool internal_dest = false, external_dest = false;
543 int i;
544
545 for (i = 0; i < max_dest; i++) {
546 if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
547 dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
548 continue;
549
550 /* Uplink dest is external, but considered as internal
551 * if there is reformat because firmware uses LB+hairpin to support it.
552 */
553 if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
554 !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
555 external_dest = true;
556 else
557 internal_dest = true;
558
559 if (internal_dest && external_dest)
560 return true;
561 }
562
563 return false;
564 }
565
566 static int
esw_setup_dests(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,struct mlx5_flow_spec * spec,int * i)567 esw_setup_dests(struct mlx5_flow_destination *dest,
568 struct mlx5_flow_act *flow_act,
569 struct mlx5_eswitch *esw,
570 struct mlx5_flow_attr *attr,
571 struct mlx5_flow_spec *spec,
572 int *i)
573 {
574 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
575 struct mlx5_fs_chains *chains = esw_chains(esw);
576 int err = 0;
577
578 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
579 esw_src_port_rewrite_supported(esw))
580 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
581
582 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
583 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
584 (*i)++;
585 goto out;
586 }
587
588 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
589 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
590 (*i)++;
591 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
592 esw_setup_accept_dest(dest, flow_act, chains, *i);
593 (*i)++;
594 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
595 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
596 (*i)++;
597 } else if (esw_is_indir_table(esw, attr)) {
598 err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
599 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
600 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
601 } else {
602 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
603
604 if (attr->dest_ft) {
605 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
606 (*i)++;
607 } else if (attr->dest_chain) {
608 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
609 1, 0, *i);
610 (*i)++;
611 }
612 }
613
614 if (attr->extra_split_ft) {
615 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
616 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
617 dest[*i].ft = attr->extra_split_ft;
618 (*i)++;
619 }
620
621 out:
622 return err;
623 }
624
625 static void
esw_cleanup_dests(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)626 esw_cleanup_dests(struct mlx5_eswitch *esw,
627 struct mlx5_flow_attr *attr)
628 {
629 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
630 struct mlx5_fs_chains *chains = esw_chains(esw);
631
632 if (attr->dest_ft) {
633 esw_cleanup_decap_indir(esw, attr);
634 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
635 if (attr->dest_chain)
636 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
637 else if (esw_is_indir_table(esw, attr))
638 esw_cleanup_indir_table(esw, attr);
639 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
640 esw_cleanup_chain_src_port_rewrite(esw, attr);
641 }
642 }
643
644 static void
esw_setup_meter(struct mlx5_flow_attr * attr,struct mlx5_flow_act * flow_act)645 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
646 {
647 struct mlx5e_flow_meter_handle *meter;
648
649 meter = attr->meter_attr.meter;
650 flow_act->exe_aso.type = attr->exe_aso_type;
651 flow_act->exe_aso.object_id = meter->obj_id;
652 flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter);
653 flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
654 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
655 /* use metadata reg 5 for packet color */
656 flow_act->exe_aso.return_reg_id = 5;
657 }
658
659 struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)660 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
661 struct mlx5_flow_spec *spec,
662 struct mlx5_flow_attr *attr)
663 {
664 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
665 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
666 struct mlx5_fs_chains *chains = esw_chains(esw);
667 bool split = !!(esw_attr->split_count);
668 struct mlx5_vport_tbl_attr fwd_attr;
669 struct mlx5_flow_destination *dest;
670 struct mlx5_flow_handle *rule;
671 struct mlx5_flow_table *fdb;
672 int i = 0;
673
674 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
675 return ERR_PTR(-EOPNOTSUPP);
676
677 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
678 return ERR_PTR(-EOPNOTSUPP);
679
680 if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
681 return ERR_PTR(-EOPNOTSUPP);
682
683 dest = kzalloc_objs(*dest, MLX5_MAX_FLOW_FWD_VPORTS + 1);
684 if (!dest)
685 return ERR_PTR(-ENOMEM);
686
687 flow_act.action = attr->action;
688
689 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
690 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
691 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
692 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
693 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
694 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
695 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
696 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
697 }
698 }
699
700 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
701
702 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
703 int err;
704
705 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
706 if (err) {
707 rule = ERR_PTR(err);
708 goto err_create_goto_table;
709 }
710
711 /* Header rewrite with combined wire+loopback in FDB is not allowed */
712 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
713 esw_dests_to_int_external(dest, i)) {
714 esw_warn(esw->dev,
715 "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
716 rule = ERR_PTR(-EINVAL);
717 goto err_esw_get;
718 }
719 }
720
721 if (esw_attr->decap_pkt_reformat)
722 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
723
724 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
725 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
726 dest[i].counter = attr->counter;
727 i++;
728 }
729
730 if (attr->outer_match_level != MLX5_MATCH_NONE)
731 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
732 if (attr->inner_match_level != MLX5_MATCH_NONE)
733 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
734
735 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
736 flow_act.modify_hdr = attr->modify_hdr;
737
738 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
739 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
740 esw_setup_meter(attr, &flow_act);
741
742 if (split) {
743 fwd_attr.chain = attr->chain;
744 fwd_attr.prio = attr->prio;
745 fwd_attr.vport = esw_attr->in_rep->vport;
746 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
747
748 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
749 } else {
750 if (attr->chain || attr->prio)
751 fdb = mlx5_chains_get_table(chains, attr->chain,
752 attr->prio, 0);
753 else
754 fdb = attr->ft;
755
756 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
757 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
758 esw_attr->in_mdev->priv.eswitch,
759 esw_attr->in_rep->vport);
760 }
761 if (IS_ERR(fdb)) {
762 rule = ERR_CAST(fdb);
763 goto err_esw_get;
764 }
765
766 if (!i) {
767 kfree(dest);
768 dest = NULL;
769 }
770
771 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
772 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
773 &flow_act, dest, i);
774 else
775 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
776 if (IS_ERR(rule))
777 goto err_add_rule;
778 else
779 atomic64_inc(&esw->offloads.num_flows);
780
781 kfree(dest);
782 return rule;
783
784 err_add_rule:
785 if (split)
786 mlx5_esw_vporttbl_put(esw, &fwd_attr);
787 else if (attr->chain || attr->prio)
788 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
789 err_esw_get:
790 esw_cleanup_dests(esw, attr);
791 err_create_goto_table:
792 kfree(dest);
793 return rule;
794 }
795
796 struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)797 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
798 struct mlx5_flow_spec *spec,
799 struct mlx5_flow_attr *attr)
800 {
801 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
802 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
803 struct mlx5_fs_chains *chains = esw_chains(esw);
804 struct mlx5_vport_tbl_attr fwd_attr;
805 struct mlx5_flow_destination *dest;
806 struct mlx5_flow_table *fast_fdb;
807 struct mlx5_flow_table *fwd_fdb;
808 struct mlx5_flow_handle *rule;
809 int i, err = 0;
810
811 dest = kzalloc_objs(*dest, MLX5_MAX_FLOW_FWD_VPORTS + 1);
812 if (!dest)
813 return ERR_PTR(-ENOMEM);
814
815 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
816 if (IS_ERR(fast_fdb)) {
817 rule = ERR_CAST(fast_fdb);
818 goto err_get_fast;
819 }
820
821 fwd_attr.chain = attr->chain;
822 fwd_attr.prio = attr->prio;
823 fwd_attr.vport = esw_attr->in_rep->vport;
824 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
825 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
826 if (IS_ERR(fwd_fdb)) {
827 rule = ERR_CAST(fwd_fdb);
828 goto err_get_fwd;
829 }
830
831 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
832 for (i = 0; i < esw_attr->split_count; i++) {
833 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
834 /* Source port rewrite (forward to ovs internal port or statck device) isn't
835 * supported in the rule of split action.
836 */
837 err = -EOPNOTSUPP;
838 else
839 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
840
841 if (err) {
842 rule = ERR_PTR(err);
843 goto err_chain_src_rewrite;
844 }
845 }
846 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
847 dest[i].ft = fwd_fdb;
848 i++;
849
850 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
851 esw_attr->in_mdev->priv.eswitch,
852 esw_attr->in_rep->vport);
853
854 if (attr->outer_match_level != MLX5_MATCH_NONE)
855 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
856
857 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
858 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
859
860 if (IS_ERR(rule)) {
861 i = esw_attr->split_count;
862 goto err_chain_src_rewrite;
863 }
864
865 atomic64_inc(&esw->offloads.num_flows);
866
867 kfree(dest);
868 return rule;
869 err_chain_src_rewrite:
870 mlx5_esw_vporttbl_put(esw, &fwd_attr);
871 err_get_fwd:
872 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
873 err_get_fast:
874 kfree(dest);
875 return rule;
876 }
877
878 static void
__mlx5_eswitch_del_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr,bool fwd_rule)879 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
880 struct mlx5_flow_handle *rule,
881 struct mlx5_flow_attr *attr,
882 bool fwd_rule)
883 {
884 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
885 struct mlx5_fs_chains *chains = esw_chains(esw);
886 bool split = (esw_attr->split_count > 0);
887 struct mlx5_vport_tbl_attr fwd_attr;
888 int i;
889
890 mlx5_del_flow_rules(rule);
891
892 if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
893 /* unref the term table */
894 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
895 if (esw_attr->dests[i].termtbl)
896 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
897 }
898 }
899
900 atomic64_dec(&esw->offloads.num_flows);
901
902 if (fwd_rule || split) {
903 fwd_attr.chain = attr->chain;
904 fwd_attr.prio = attr->prio;
905 fwd_attr.vport = esw_attr->in_rep->vport;
906 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
907 }
908
909 if (fwd_rule) {
910 mlx5_esw_vporttbl_put(esw, &fwd_attr);
911 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
912 } else {
913 if (split)
914 mlx5_esw_vporttbl_put(esw, &fwd_attr);
915 else if (attr->chain || attr->prio)
916 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
917 esw_cleanup_dests(esw, attr);
918 }
919 }
920
921 void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)922 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
923 struct mlx5_flow_handle *rule,
924 struct mlx5_flow_attr *attr)
925 {
926 __mlx5_eswitch_del_rule(esw, rule, attr, false);
927 }
928
929 void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)930 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
931 struct mlx5_flow_handle *rule,
932 struct mlx5_flow_attr *attr)
933 {
934 __mlx5_eswitch_del_rule(esw, rule, attr, true);
935 }
936
937 struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch * on_esw,struct mlx5_eswitch * from_esw,struct mlx5_eswitch_rep * rep,u32 sqn)938 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
939 struct mlx5_eswitch *from_esw,
940 struct mlx5_eswitch_rep *rep,
941 u32 sqn)
942 {
943 struct mlx5_flow_act flow_act = {0};
944 struct mlx5_flow_destination dest = {};
945 struct mlx5_flow_handle *flow_rule;
946 struct mlx5_flow_spec *spec;
947 void *misc;
948 u16 vport;
949
950 spec = kvzalloc_obj(*spec);
951 if (!spec) {
952 flow_rule = ERR_PTR(-ENOMEM);
953 goto out;
954 }
955
956 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
957 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
958
959 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
960 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
961
962 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
963
964 /* source vport is the esw manager */
965 vport = from_esw->manager_vport;
966
967 if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
968 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
969 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
970 mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
971
972 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
973 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
974 mlx5_eswitch_get_vport_metadata_mask());
975
976 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
977 } else {
978 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
979 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
980
981 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
982 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
983 MLX5_CAP_GEN(from_esw->dev, vhca_id));
984
985 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
986 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
987
988 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
989 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
990 source_eswitch_owner_vhca_id);
991
992 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
993 }
994
995 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
996 dest.vport.num = rep->vport;
997 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
998 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
999 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1000
1001 if (rep->vport == MLX5_VPORT_UPLINK &&
1002 on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
1003 dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
1004 flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
1005 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1006 } else {
1007 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1008 dest.vport.num = rep->vport;
1009 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
1010 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1011 }
1012
1013 if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
1014 rep->vport == MLX5_VPORT_UPLINK)
1015 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
1016
1017 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
1018 spec, &flow_act, &dest, 1);
1019 if (IS_ERR(flow_rule))
1020 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %pe\n",
1021 flow_rule);
1022 out:
1023 kvfree(spec);
1024 return flow_rule;
1025 }
1026 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
1027
mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle * rule)1028 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
1029 {
1030 mlx5_del_flow_rules(rule);
1031 }
1032
mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle * rule)1033 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
1034 {
1035 if (rule)
1036 mlx5_del_flow_rules(rule);
1037 }
1038
1039 struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch * esw,u16 vport_num)1040 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
1041 {
1042 struct mlx5_flow_destination dest = {};
1043 struct mlx5_flow_act flow_act = {0};
1044 struct mlx5_flow_handle *flow_rule;
1045 struct mlx5_flow_spec *spec;
1046
1047 spec = kvzalloc_obj(*spec);
1048 if (!spec)
1049 return ERR_PTR(-ENOMEM);
1050
1051 MLX5_SET(fte_match_param, spec->match_criteria,
1052 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1053 MLX5_SET(fte_match_param, spec->match_criteria,
1054 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1055 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
1056 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
1057
1058 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1059 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1060 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1061
1062 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
1063 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
1064 dest.vport.num = vport_num;
1065
1066 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1067 spec, &flow_act, &dest, 1);
1068 if (IS_ERR(flow_rule))
1069 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %pe\n",
1070 vport_num, flow_rule);
1071
1072 kvfree(spec);
1073 return flow_rule;
1074 }
1075
mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch * esw)1076 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
1077 {
1078 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1079 MLX5_FDB_TO_VPORT_REG_C_1;
1080 }
1081
esw_set_passing_vport_metadata(struct mlx5_eswitch * esw,bool enable)1082 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
1083 {
1084 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
1085 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
1086 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
1087 u8 curr, wanted;
1088 int err;
1089
1090 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
1091 !mlx5_eswitch_vport_match_metadata_enabled(esw))
1092 return 0;
1093
1094 MLX5_SET(query_esw_vport_context_in, in, opcode,
1095 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
1096 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
1097 if (err)
1098 return err;
1099
1100 curr = MLX5_GET(query_esw_vport_context_out, out,
1101 esw_vport_context.fdb_to_vport_reg_c_id);
1102 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1103 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1104 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
1105
1106 if (enable)
1107 curr |= wanted;
1108 else
1109 curr &= ~wanted;
1110
1111 MLX5_SET(modify_esw_vport_context_in, min,
1112 esw_vport_context.fdb_to_vport_reg_c_id, curr);
1113 MLX5_SET(modify_esw_vport_context_in, min,
1114 field_select.fdb_to_vport_reg_c_id, 1);
1115
1116 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
1117 if (!err) {
1118 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1119 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1120 else
1121 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1122 }
1123
1124 return err;
1125 }
1126
peer_miss_rules_setup(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev,struct mlx5_flow_spec * spec,struct mlx5_flow_destination * dest)1127 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1128 struct mlx5_core_dev *peer_dev,
1129 struct mlx5_flow_spec *spec,
1130 struct mlx5_flow_destination *dest)
1131 {
1132 void *misc;
1133
1134 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1135 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1136 misc_parameters_2);
1137 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1138 mlx5_eswitch_get_vport_metadata_mask());
1139
1140 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1141 } else {
1142 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1143 misc_parameters);
1144
1145 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1146 MLX5_CAP_GEN(peer_dev, vhca_id));
1147
1148 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1149
1150 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1151 misc_parameters);
1152 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1153 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1154 source_eswitch_owner_vhca_id);
1155 }
1156
1157 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1158 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1159 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1160 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1161 }
1162
esw_set_peer_miss_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw,struct mlx5_flow_spec * spec,u16 vport)1163 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1164 struct mlx5_eswitch *peer_esw,
1165 struct mlx5_flow_spec *spec,
1166 u16 vport)
1167 {
1168 void *misc;
1169
1170 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1171 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1172 misc_parameters_2);
1173 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1174 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1175 vport));
1176 } else {
1177 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1178 misc_parameters);
1179 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1180 }
1181 }
1182
esw_add_fdb_peer_miss_rules(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev)1183 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1184 struct mlx5_core_dev *peer_dev)
1185 {
1186 struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
1187 struct mlx5_flow_destination dest = {};
1188 struct mlx5_flow_act flow_act = {0};
1189 struct mlx5_flow_handle **flows;
1190 struct mlx5_flow_handle *flow;
1191 struct mlx5_vport *peer_vport;
1192 struct mlx5_flow_spec *spec;
1193 int err, pfindex;
1194 unsigned long i;
1195 void *misc;
1196
1197 if (!MLX5_VPORT_MANAGER(peer_dev) &&
1198 !mlx5_core_is_ecpf_esw_manager(peer_dev))
1199 return 0;
1200
1201 spec = kvzalloc_obj(*spec);
1202 if (!spec)
1203 return -ENOMEM;
1204
1205 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1206
1207 flows = kvzalloc_objs(*flows, peer_esw->total_vports);
1208 if (!flows) {
1209 err = -ENOMEM;
1210 goto alloc_flows_err;
1211 }
1212
1213 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1214 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1215 misc_parameters);
1216
1217 if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
1218 mlx5_esw_host_functions_enabled(peer_dev)) {
1219 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
1220 esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
1221 MLX5_VPORT_PF);
1222
1223 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1224 spec, &flow_act, &dest, 1);
1225 if (IS_ERR(flow)) {
1226 err = PTR_ERR(flow);
1227 goto add_pf_flow_err;
1228 }
1229 flows[peer_vport->index] = flow;
1230 }
1231
1232 if (mlx5_ecpf_vport_exists(peer_dev)) {
1233 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
1234 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1235 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1236 spec, &flow_act, &dest, 1);
1237 if (IS_ERR(flow)) {
1238 err = PTR_ERR(flow);
1239 goto add_ecpf_flow_err;
1240 }
1241 flows[peer_vport->index] = flow;
1242 }
1243
1244 mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
1245 mlx5_core_max_vfs(peer_dev)) {
1246 esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
1247 peer_vport->vport);
1248 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1249 spec, &flow_act, &dest, 1);
1250 if (IS_ERR(flow)) {
1251 err = PTR_ERR(flow);
1252 goto add_vf_flow_err;
1253 }
1254 flows[peer_vport->index] = flow;
1255 }
1256
1257 if (mlx5_core_ec_sriov_enabled(peer_dev)) {
1258 mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
1259 mlx5_core_max_ec_vfs(peer_dev)) {
1260 esw_set_peer_miss_rule_source_port(esw, peer_esw,
1261 spec,
1262 peer_vport->vport);
1263 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1264 spec, &flow_act, &dest, 1);
1265 if (IS_ERR(flow)) {
1266 err = PTR_ERR(flow);
1267 goto add_ec_vf_flow_err;
1268 }
1269 flows[peer_vport->index] = flow;
1270 }
1271 }
1272
1273 pfindex = mlx5_get_dev_index(peer_dev);
1274 if (pfindex >= MLX5_MAX_PORTS) {
1275 esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
1276 pfindex, MLX5_MAX_PORTS);
1277 err = -EINVAL;
1278 goto add_ec_vf_flow_err;
1279 }
1280 esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
1281
1282 kvfree(spec);
1283 return 0;
1284
1285 add_ec_vf_flow_err:
1286 mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
1287 mlx5_core_max_ec_vfs(peer_dev)) {
1288 if (!flows[peer_vport->index])
1289 continue;
1290 mlx5_del_flow_rules(flows[peer_vport->index]);
1291 }
1292 add_vf_flow_err:
1293 mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
1294 mlx5_core_max_vfs(peer_dev)) {
1295 if (!flows[peer_vport->index])
1296 continue;
1297 mlx5_del_flow_rules(flows[peer_vport->index]);
1298 }
1299 if (mlx5_ecpf_vport_exists(peer_dev)) {
1300 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
1301 mlx5_del_flow_rules(flows[peer_vport->index]);
1302 }
1303 add_ecpf_flow_err:
1304
1305 if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
1306 mlx5_esw_host_functions_enabled(peer_dev)) {
1307 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
1308 mlx5_del_flow_rules(flows[peer_vport->index]);
1309 }
1310 add_pf_flow_err:
1311 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1312 kvfree(flows);
1313 alloc_flows_err:
1314 kvfree(spec);
1315 return err;
1316 }
1317
esw_del_fdb_peer_miss_rules(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev)1318 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1319 struct mlx5_core_dev *peer_dev)
1320 {
1321 struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
1322 u16 peer_index = mlx5_get_dev_index(peer_dev);
1323 struct mlx5_flow_handle **flows;
1324 struct mlx5_vport *peer_vport;
1325 unsigned long i;
1326
1327 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
1328 if (!flows)
1329 return;
1330
1331 if (mlx5_core_ec_sriov_enabled(peer_dev)) {
1332 mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
1333 mlx5_core_max_ec_vfs(peer_dev))
1334 mlx5_del_flow_rules(flows[peer_vport->index]);
1335 }
1336
1337 mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
1338 mlx5_core_max_vfs(peer_dev))
1339 mlx5_del_flow_rules(flows[peer_vport->index]);
1340
1341 if (mlx5_ecpf_vport_exists(peer_dev)) {
1342 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
1343 mlx5_del_flow_rules(flows[peer_vport->index]);
1344 }
1345
1346 if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
1347 mlx5_esw_host_functions_enabled(peer_dev)) {
1348 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
1349 mlx5_del_flow_rules(flows[peer_vport->index]);
1350 }
1351
1352 kvfree(flows);
1353 esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
1354 }
1355
esw_add_fdb_miss_rule(struct mlx5_eswitch * esw)1356 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1357 {
1358 struct mlx5_flow_act flow_act = {0};
1359 struct mlx5_flow_destination dest = {};
1360 struct mlx5_flow_handle *flow_rule = NULL;
1361 struct mlx5_flow_spec *spec;
1362 void *headers_c;
1363 void *headers_v;
1364 int err = 0;
1365 u8 *dmac_c;
1366 u8 *dmac_v;
1367
1368 spec = kvzalloc_obj(*spec);
1369 if (!spec) {
1370 err = -ENOMEM;
1371 goto out;
1372 }
1373
1374 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1375 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1376 outer_headers);
1377 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1378 outer_headers.dmac_47_16);
1379 dmac_c[0] = 0x01;
1380
1381 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1382 dest.vport.num = esw->manager_vport;
1383 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1384
1385 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1386 spec, &flow_act, &dest, 1);
1387 if (IS_ERR(flow_rule)) {
1388 err = PTR_ERR(flow_rule);
1389 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1390 goto out;
1391 }
1392
1393 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1394
1395 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1396 outer_headers);
1397 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1398 outer_headers.dmac_47_16);
1399 dmac_v[0] = 0x01;
1400 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1401 spec, &flow_act, &dest, 1);
1402 if (IS_ERR(flow_rule)) {
1403 err = PTR_ERR(flow_rule);
1404 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1405 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1406 goto out;
1407 }
1408
1409 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1410
1411 out:
1412 kvfree(spec);
1413 return err;
1414 }
1415
1416 struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag)1417 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1418 {
1419 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1420 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1421 struct mlx5_flow_context *flow_context;
1422 struct mlx5_flow_handle *flow_rule;
1423 struct mlx5_flow_destination dest;
1424 struct mlx5_flow_spec *spec;
1425 void *misc;
1426
1427 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1428 return ERR_PTR(-EOPNOTSUPP);
1429
1430 spec = kvzalloc_obj(*spec);
1431 if (!spec)
1432 return ERR_PTR(-ENOMEM);
1433
1434 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1435 misc_parameters_2);
1436 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1437 ESW_REG_C0_USER_DATA_METADATA_MASK);
1438 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1439 misc_parameters_2);
1440 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1441 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1442 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1443 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1444 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1445
1446 flow_context = &spec->flow_context;
1447 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1448 flow_context->flow_tag = tag;
1449 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1450 dest.ft = esw->offloads.ft_offloads;
1451
1452 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1453 kvfree(spec);
1454
1455 if (IS_ERR(flow_rule))
1456 esw_warn(esw->dev,
1457 "Failed to create restore rule for tag: %d, err(%d)\n",
1458 tag, (int)PTR_ERR(flow_rule));
1459
1460 return flow_rule;
1461 }
1462
1463 #define MAX_PF_SQ 256
1464 #define MAX_SQ_NVPORTS 32
1465
1466 void
mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch * esw,u32 * flow_group_in,int match_params)1467 mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1468 u32 *flow_group_in,
1469 int match_params)
1470 {
1471 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1472 flow_group_in,
1473 match_criteria);
1474
1475 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1476 MLX5_SET(create_flow_group_in, flow_group_in,
1477 match_criteria_enable,
1478 MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
1479
1480 MLX5_SET(fte_match_param, match_criteria,
1481 misc_parameters_2.metadata_reg_c_0,
1482 mlx5_eswitch_get_vport_metadata_mask());
1483 } else {
1484 MLX5_SET(create_flow_group_in, flow_group_in,
1485 match_criteria_enable,
1486 MLX5_MATCH_MISC_PARAMETERS | match_params);
1487
1488 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1489 misc_parameters.source_port);
1490 }
1491 }
1492
1493 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
esw_vport_tbl_put(struct mlx5_eswitch * esw)1494 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1495 {
1496 struct mlx5_vport_tbl_attr attr;
1497 struct mlx5_vport *vport;
1498 unsigned long i;
1499
1500 attr.chain = 0;
1501 attr.prio = 1;
1502 mlx5_esw_for_each_vport(esw, i, vport) {
1503 attr.vport = vport->vport;
1504 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1505 mlx5_esw_vporttbl_put(esw, &attr);
1506 }
1507 }
1508
esw_vport_tbl_get(struct mlx5_eswitch * esw)1509 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1510 {
1511 struct mlx5_vport_tbl_attr attr;
1512 struct mlx5_flow_table *fdb;
1513 struct mlx5_vport *vport;
1514 unsigned long i;
1515
1516 attr.chain = 0;
1517 attr.prio = 1;
1518 mlx5_esw_for_each_vport(esw, i, vport) {
1519 attr.vport = vport->vport;
1520 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1521 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1522 if (IS_ERR(fdb))
1523 goto out;
1524 }
1525 return 0;
1526
1527 out:
1528 esw_vport_tbl_put(esw);
1529 return PTR_ERR(fdb);
1530 }
1531
1532 #define fdb_modify_header_fwd_to_table_supported(esw) \
1533 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
esw_init_chains_offload_flags(struct mlx5_eswitch * esw,u32 * flags)1534 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1535 {
1536 struct mlx5_core_dev *dev = esw->dev;
1537
1538 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1539 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1540
1541 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1542 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1543 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1544 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1545 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1546 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1547 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1548 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1549 /* Disabled when ttl workaround is needed, e.g
1550 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1551 */
1552 esw_warn(dev,
1553 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1554 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1555 } else {
1556 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1557 esw_info(dev, "Supported tc chains and prios offload\n");
1558 }
1559
1560 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1561 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1562 }
1563
1564 static int
esw_chains_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * miss_fdb)1565 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1566 {
1567 struct mlx5_core_dev *dev = esw->dev;
1568 struct mlx5_flow_table *nf_ft, *ft;
1569 struct mlx5_chains_attr attr = {};
1570 struct mlx5_fs_chains *chains;
1571 int err;
1572
1573 esw_init_chains_offload_flags(esw, &attr.flags);
1574 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1575 attr.max_grp_num = esw->params.large_group_num;
1576 attr.default_ft = miss_fdb;
1577 attr.mapping = esw->offloads.reg_c0_obj_pool;
1578 attr.fs_base_prio = FDB_BYPASS_PATH;
1579
1580 chains = mlx5_chains_create(dev, &attr);
1581 if (IS_ERR(chains)) {
1582 err = PTR_ERR(chains);
1583 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1584 return err;
1585 }
1586 mlx5_chains_print_info(chains);
1587
1588 esw->fdb_table.offloads.esw_chains_priv = chains;
1589
1590 /* Create tc_end_ft which is the always created ft chain */
1591 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1592 1, 0);
1593 if (IS_ERR(nf_ft)) {
1594 err = PTR_ERR(nf_ft);
1595 goto nf_ft_err;
1596 }
1597
1598 /* Always open the root for fast path */
1599 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1600 if (IS_ERR(ft)) {
1601 err = PTR_ERR(ft);
1602 goto level_0_err;
1603 }
1604
1605 /* Open level 1 for split fdb rules now if prios isn't supported */
1606 if (!mlx5_chains_prios_supported(chains)) {
1607 err = esw_vport_tbl_get(esw);
1608 if (err)
1609 goto level_1_err;
1610 }
1611
1612 mlx5_chains_set_end_ft(chains, nf_ft);
1613
1614 return 0;
1615
1616 level_1_err:
1617 mlx5_chains_put_table(chains, 0, 1, 0);
1618 level_0_err:
1619 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1620 nf_ft_err:
1621 mlx5_chains_destroy(chains);
1622 esw->fdb_table.offloads.esw_chains_priv = NULL;
1623
1624 return err;
1625 }
1626
1627 static void
esw_chains_destroy(struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains)1628 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1629 {
1630 if (!mlx5_chains_prios_supported(chains))
1631 esw_vport_tbl_put(esw);
1632 mlx5_chains_put_table(chains, 0, 1, 0);
1633 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1634 mlx5_chains_destroy(chains);
1635 }
1636
1637 #else /* CONFIG_MLX5_CLS_ACT */
1638
1639 static int
esw_chains_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * miss_fdb)1640 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1641 { return 0; }
1642
1643 static void
esw_chains_destroy(struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains)1644 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1645 {}
1646
1647 #endif
1648
1649 static int
esw_create_send_to_vport_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)1650 esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
1651 struct mlx5_flow_table *fdb,
1652 u32 *flow_group_in,
1653 int *ix)
1654 {
1655 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1656 struct mlx5_flow_group *g;
1657 void *match_criteria;
1658 int count, err = 0;
1659
1660 memset(flow_group_in, 0, inlen);
1661
1662 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
1663
1664 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1665 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1666
1667 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1668 MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1669 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1670 misc_parameters.source_eswitch_owner_vhca_id);
1671 MLX5_SET(create_flow_group_in, flow_group_in,
1672 source_eswitch_owner_vhca_id_valid, 1);
1673 }
1674
1675 /* See comment at table_size calculation */
1676 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1677 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1678 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
1679 *ix += count;
1680
1681 g = mlx5_create_flow_group(fdb, flow_group_in);
1682 if (IS_ERR(g)) {
1683 err = PTR_ERR(g);
1684 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1685 goto out;
1686 }
1687 esw->fdb_table.offloads.send_to_vport_grp = g;
1688
1689 out:
1690 return err;
1691 }
1692
1693 static int
esw_create_meta_send_to_vport_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)1694 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
1695 struct mlx5_flow_table *fdb,
1696 u32 *flow_group_in,
1697 int *ix)
1698 {
1699 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1700 struct mlx5_flow_group *g;
1701 void *match_criteria;
1702 int err = 0;
1703
1704 if (!esw_src_port_rewrite_supported(esw))
1705 return 0;
1706
1707 memset(flow_group_in, 0, inlen);
1708
1709 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1710 MLX5_MATCH_MISC_PARAMETERS_2);
1711
1712 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1713
1714 MLX5_SET(fte_match_param, match_criteria,
1715 misc_parameters_2.metadata_reg_c_0,
1716 mlx5_eswitch_get_vport_metadata_mask());
1717 MLX5_SET(fte_match_param, match_criteria,
1718 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1719
1720 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1721 MLX5_SET(create_flow_group_in, flow_group_in,
1722 end_flow_index, *ix + esw->total_vports - 1);
1723 *ix += esw->total_vports;
1724
1725 g = mlx5_create_flow_group(fdb, flow_group_in);
1726 if (IS_ERR(g)) {
1727 err = PTR_ERR(g);
1728 esw_warn(esw->dev,
1729 "Failed to create send-to-vport meta flow group err(%d)\n", err);
1730 goto send_vport_meta_err;
1731 }
1732 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1733
1734 return 0;
1735
1736 send_vport_meta_err:
1737 return err;
1738 }
1739
1740 static int
esw_create_peer_esw_miss_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)1741 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
1742 struct mlx5_flow_table *fdb,
1743 u32 *flow_group_in,
1744 int *ix)
1745 {
1746 int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1);
1747 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1748 struct mlx5_flow_group *g;
1749 void *match_criteria;
1750 int err = 0;
1751
1752 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1753 return 0;
1754
1755 memset(flow_group_in, 0, inlen);
1756
1757 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1758
1759 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1760 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1761 flow_group_in,
1762 match_criteria);
1763
1764 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1765 misc_parameters.source_eswitch_owner_vhca_id);
1766
1767 MLX5_SET(create_flow_group_in, flow_group_in,
1768 source_eswitch_owner_vhca_id_valid, 1);
1769 }
1770
1771 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1772 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1773 *ix + max_peer_ports);
1774 *ix += max_peer_ports + 1;
1775
1776 g = mlx5_create_flow_group(fdb, flow_group_in);
1777 if (IS_ERR(g)) {
1778 err = PTR_ERR(g);
1779 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
1780 goto out;
1781 }
1782 esw->fdb_table.offloads.peer_miss_grp = g;
1783
1784 out:
1785 return err;
1786 }
1787
1788 static int
esw_create_miss_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)1789 esw_create_miss_group(struct mlx5_eswitch *esw,
1790 struct mlx5_flow_table *fdb,
1791 u32 *flow_group_in,
1792 int *ix)
1793 {
1794 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1795 struct mlx5_flow_group *g;
1796 void *match_criteria;
1797 int err = 0;
1798 u8 *dmac;
1799
1800 memset(flow_group_in, 0, inlen);
1801
1802 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1803 MLX5_MATCH_OUTER_HEADERS);
1804 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1805 match_criteria);
1806 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1807 outer_headers.dmac_47_16);
1808 dmac[0] = 0x01;
1809
1810 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1811 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1812 *ix + MLX5_ESW_MISS_FLOWS);
1813
1814 g = mlx5_create_flow_group(fdb, flow_group_in);
1815 if (IS_ERR(g)) {
1816 err = PTR_ERR(g);
1817 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
1818 goto miss_err;
1819 }
1820 esw->fdb_table.offloads.miss_grp = g;
1821
1822 err = esw_add_fdb_miss_rule(esw);
1823 if (err)
1824 goto miss_rule_err;
1825
1826 return 0;
1827
1828 miss_rule_err:
1829 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1830 miss_err:
1831 return err;
1832 }
1833
esw_create_offloads_fdb_tables(struct mlx5_eswitch * esw)1834 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1835 {
1836 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1837 struct mlx5_flow_table_attr ft_attr = {};
1838 struct mlx5_core_dev *dev = esw->dev;
1839 struct mlx5_flow_namespace *root_ns;
1840 struct mlx5_flow_table *fdb = NULL;
1841 int table_size, ix = 0, err = 0;
1842 u32 flags = 0, *flow_group_in;
1843
1844 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1845
1846 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1847 if (!flow_group_in)
1848 return -ENOMEM;
1849
1850 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1851 if (!root_ns) {
1852 esw_warn(dev, "Failed to get FDB flow namespace\n");
1853 err = -EOPNOTSUPP;
1854 goto ns_err;
1855 }
1856 esw->fdb_table.offloads.ns = root_ns;
1857 err = mlx5_flow_namespace_set_mode(root_ns,
1858 esw->dev->priv.steering->mode);
1859 if (err) {
1860 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1861 goto ns_err;
1862 }
1863
1864 /* To be strictly correct:
1865 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1866 * should be:
1867 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1868 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1869 * but as the peer device might not be in switchdev mode it's not
1870 * possible. We use the fact that by default FW sets max vfs and max sfs
1871 * to the same value on both devices. If it needs to be changed in the future note
1872 * the peer miss group should also be created based on the number of
1873 * total vports of the peer (currently is also uses esw->total_vports).
1874 */
1875 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1876 esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS;
1877
1878 /* create the slow path fdb with encap set, so further table instances
1879 * can be created at run time while VFs are probed if the FW allows that.
1880 */
1881 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1882 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1883 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1884
1885 ft_attr.flags = flags;
1886 ft_attr.max_fte = table_size;
1887 ft_attr.prio = FDB_SLOW_PATH;
1888
1889 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1890 if (IS_ERR(fdb)) {
1891 err = PTR_ERR(fdb);
1892 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1893 goto slow_fdb_err;
1894 }
1895 esw->fdb_table.offloads.slow_fdb = fdb;
1896
1897 /* Create empty TC-miss managed table. This allows plugging in following
1898 * priorities without directly exposing their level 0 table to
1899 * eswitch_offloads and passing it as miss_fdb to following call to
1900 * esw_chains_create().
1901 */
1902 memset(&ft_attr, 0, sizeof(ft_attr));
1903 ft_attr.prio = FDB_TC_MISS;
1904 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1905 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1906 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1907 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1908 goto tc_miss_table_err;
1909 }
1910
1911 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1912 if (err) {
1913 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1914 goto fdb_chains_err;
1915 }
1916
1917 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1918 if (err)
1919 goto send_vport_err;
1920
1921 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1922 if (err)
1923 goto send_vport_meta_err;
1924
1925 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
1926 if (err)
1927 goto peer_miss_err;
1928
1929 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
1930 if (err)
1931 goto miss_err;
1932
1933 kvfree(flow_group_in);
1934 return 0;
1935
1936 miss_err:
1937 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1938 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1939 peer_miss_err:
1940 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1941 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1942 send_vport_meta_err:
1943 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1944 send_vport_err:
1945 esw_chains_destroy(esw, esw_chains(esw));
1946 fdb_chains_err:
1947 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1948 tc_miss_table_err:
1949 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1950 slow_fdb_err:
1951 /* Holds true only as long as DMFS is the default */
1952 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1953 ns_err:
1954 kvfree(flow_group_in);
1955 return err;
1956 }
1957
esw_destroy_offloads_fdb_tables(struct mlx5_eswitch * esw)1958 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1959 {
1960 if (!mlx5_eswitch_get_slow_fdb(esw))
1961 return;
1962
1963 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1964 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1965 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1966 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1967 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1968 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1969 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1970 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1971 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1972
1973 esw_chains_destroy(esw, esw_chains(esw));
1974
1975 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1976 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1977 /* Holds true only as long as DMFS is the default */
1978 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1979 MLX5_FLOW_STEERING_MODE_DMFS);
1980 }
1981
esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch * esw)1982 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
1983 {
1984 int nvports;
1985
1986 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1987 if (mlx5e_tc_int_port_supported(esw))
1988 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1989
1990 return nvports;
1991 }
1992
esw_create_offloads_table(struct mlx5_eswitch * esw)1993 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1994 {
1995 struct mlx5_flow_table_attr ft_attr = {};
1996 struct mlx5_core_dev *dev = esw->dev;
1997 struct mlx5_flow_table *ft_offloads;
1998 struct mlx5_flow_namespace *ns;
1999 int err = 0;
2000
2001 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2002 if (!ns) {
2003 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2004 return -EOPNOTSUPP;
2005 }
2006
2007 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
2008 MLX5_ESW_FT_OFFLOADS_DROP_RULE;
2009 ft_attr.prio = 1;
2010
2011 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
2012 if (IS_ERR(ft_offloads)) {
2013 err = PTR_ERR(ft_offloads);
2014 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
2015 return err;
2016 }
2017
2018 esw->offloads.ft_offloads = ft_offloads;
2019 return 0;
2020 }
2021
esw_destroy_offloads_table(struct mlx5_eswitch * esw)2022 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
2023 {
2024 struct mlx5_esw_offload *offloads = &esw->offloads;
2025
2026 mlx5_destroy_flow_table(offloads->ft_offloads);
2027 }
2028
esw_create_vport_rx_group(struct mlx5_eswitch * esw)2029 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
2030 {
2031 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2032 struct mlx5_flow_group *g;
2033 u32 *flow_group_in;
2034 int nvports;
2035 int err = 0;
2036
2037 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
2038 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2039 if (!flow_group_in)
2040 return -ENOMEM;
2041
2042 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
2043
2044 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2045 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
2046
2047 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
2048
2049 if (IS_ERR(g)) {
2050 err = PTR_ERR(g);
2051 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
2052 goto out;
2053 }
2054
2055 esw->offloads.vport_rx_group = g;
2056 out:
2057 kvfree(flow_group_in);
2058 return err;
2059 }
2060
esw_destroy_vport_rx_group(struct mlx5_eswitch * esw)2061 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
2062 {
2063 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
2064 }
2065
esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch * esw)2066 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
2067 {
2068 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
2069 * for the drop rule, which is placed at the end of the table.
2070 * So return the total of vport and int_port as rule index.
2071 */
2072 return esw_get_nr_ft_offloads_steering_src_ports(esw);
2073 }
2074
esw_create_vport_rx_drop_group(struct mlx5_eswitch * esw)2075 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
2076 {
2077 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2078 struct mlx5_flow_group *g;
2079 u32 *flow_group_in;
2080 int flow_index;
2081 int err = 0;
2082
2083 flow_index = esw_create_vport_rx_drop_rule_index(esw);
2084
2085 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2086 if (!flow_group_in)
2087 return -ENOMEM;
2088
2089 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
2090 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
2091
2092 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
2093
2094 if (IS_ERR(g)) {
2095 err = PTR_ERR(g);
2096 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
2097 goto out;
2098 }
2099
2100 esw->offloads.vport_rx_drop_group = g;
2101 out:
2102 kvfree(flow_group_in);
2103 return err;
2104 }
2105
esw_destroy_vport_rx_drop_group(struct mlx5_eswitch * esw)2106 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
2107 {
2108 if (esw->offloads.vport_rx_drop_group)
2109 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
2110 }
2111
2112 void
mlx5_esw_set_spec_source_port(struct mlx5_eswitch * esw,u16 vport,struct mlx5_flow_spec * spec)2113 mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
2114 u16 vport,
2115 struct mlx5_flow_spec *spec)
2116 {
2117 void *misc;
2118
2119 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2120 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
2121 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2122 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
2123
2124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
2125 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2126 mlx5_eswitch_get_vport_metadata_mask());
2127
2128 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
2129 } else {
2130 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2131 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
2132
2133 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2134 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2135
2136 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2137 }
2138 }
2139
2140 struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch * esw,u16 vport,struct mlx5_flow_destination * dest)2141 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
2142 struct mlx5_flow_destination *dest)
2143 {
2144 struct mlx5_flow_act flow_act = {0};
2145 struct mlx5_flow_handle *flow_rule;
2146 struct mlx5_flow_spec *spec;
2147
2148 spec = kvzalloc_obj(*spec);
2149 if (!spec) {
2150 flow_rule = ERR_PTR(-ENOMEM);
2151 goto out;
2152 }
2153
2154 mlx5_esw_set_spec_source_port(esw, vport, spec);
2155
2156 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2157 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
2158 &flow_act, dest, 1);
2159 if (IS_ERR(flow_rule)) {
2160 esw_warn(esw->dev,
2161 "fs offloads: Failed to add vport rx rule err %pe\n",
2162 flow_rule);
2163 goto out;
2164 }
2165
2166 out:
2167 kvfree(spec);
2168 return flow_rule;
2169 }
2170
esw_create_vport_rx_drop_rule(struct mlx5_eswitch * esw)2171 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2172 {
2173 struct mlx5_flow_act flow_act = {};
2174 struct mlx5_flow_handle *flow_rule;
2175
2176 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2177 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
2178 &flow_act, NULL, 0);
2179 if (IS_ERR(flow_rule)) {
2180 esw_warn(esw->dev,
2181 "fs offloads: Failed to add vport rx drop rule err %pe\n",
2182 flow_rule);
2183 return PTR_ERR(flow_rule);
2184 }
2185
2186 esw->offloads.vport_rx_drop_rule = flow_rule;
2187
2188 return 0;
2189 }
2190
esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch * esw)2191 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2192 {
2193 if (esw->offloads.vport_rx_drop_rule)
2194 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
2195 }
2196
mlx5_eswitch_inline_mode_get(struct mlx5_eswitch * esw,u8 * mode)2197 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2198 {
2199 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2200 struct mlx5_core_dev *dev = esw->dev;
2201 struct mlx5_vport *vport;
2202 unsigned long i;
2203
2204 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2205 return -EOPNOTSUPP;
2206
2207 if (!mlx5_esw_is_fdb_created(esw))
2208 return -EOPNOTSUPP;
2209
2210 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2211 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2212 mlx5_mode = MLX5_INLINE_MODE_NONE;
2213 goto out;
2214 case MLX5_CAP_INLINE_MODE_L2:
2215 mlx5_mode = MLX5_INLINE_MODE_L2;
2216 goto out;
2217 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2218 goto query_vports;
2219 }
2220
2221 query_vports:
2222 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2223 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2224 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2225 if (prev_mlx5_mode != mlx5_mode)
2226 return -EINVAL;
2227 prev_mlx5_mode = mlx5_mode;
2228 }
2229
2230 out:
2231 *mode = mlx5_mode;
2232 return 0;
2233 }
2234
esw_destroy_restore_table(struct mlx5_eswitch * esw)2235 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2236 {
2237 struct mlx5_esw_offload *offloads = &esw->offloads;
2238
2239 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2240 return;
2241
2242 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2243 mlx5_destroy_flow_group(offloads->restore_group);
2244 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2245 }
2246
esw_create_restore_table(struct mlx5_eswitch * esw)2247 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2248 {
2249 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2250 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2251 struct mlx5_flow_table_attr ft_attr = {};
2252 struct mlx5_core_dev *dev = esw->dev;
2253 struct mlx5_flow_namespace *ns;
2254 struct mlx5_modify_hdr *mod_hdr;
2255 void *match_criteria, *misc;
2256 struct mlx5_flow_table *ft;
2257 struct mlx5_flow_group *g;
2258 u32 *flow_group_in;
2259 int err = 0;
2260
2261 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2262 return 0;
2263
2264 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2265 if (!ns) {
2266 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2267 return -EOPNOTSUPP;
2268 }
2269
2270 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2271 if (!flow_group_in) {
2272 err = -ENOMEM;
2273 goto out_free;
2274 }
2275
2276 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2277 ft = mlx5_create_flow_table(ns, &ft_attr);
2278 if (IS_ERR(ft)) {
2279 err = PTR_ERR(ft);
2280 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2281 err);
2282 goto out_free;
2283 }
2284
2285 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2286 match_criteria);
2287 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2288 misc_parameters_2);
2289
2290 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2291 ESW_REG_C0_USER_DATA_METADATA_MASK);
2292 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2293 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2294 ft_attr.max_fte - 1);
2295 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2296 MLX5_MATCH_MISC_PARAMETERS_2);
2297 g = mlx5_create_flow_group(ft, flow_group_in);
2298 if (IS_ERR(g)) {
2299 err = PTR_ERR(g);
2300 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2301 err);
2302 goto err_group;
2303 }
2304
2305 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2306 MLX5_SET(copy_action_in, modact, src_field,
2307 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2308 MLX5_SET(copy_action_in, modact, dst_field,
2309 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2310 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2311 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2312 modact);
2313 if (IS_ERR(mod_hdr)) {
2314 err = PTR_ERR(mod_hdr);
2315 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2316 err);
2317 goto err_mod_hdr;
2318 }
2319
2320 esw->offloads.ft_offloads_restore = ft;
2321 esw->offloads.restore_group = g;
2322 esw->offloads.restore_copy_hdr_id = mod_hdr;
2323
2324 kvfree(flow_group_in);
2325
2326 return 0;
2327
2328 err_mod_hdr:
2329 mlx5_destroy_flow_group(g);
2330 err_group:
2331 mlx5_destroy_flow_table(ft);
2332 out_free:
2333 kvfree(flow_group_in);
2334
2335 return err;
2336 }
2337
esw_mode_change(struct mlx5_eswitch * esw,u16 mode)2338 static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode)
2339 {
2340 mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp);
2341 if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV ||
2342 mlx5_core_mp_enabled(esw->dev)) {
2343 esw->mode = mode;
2344 mlx5_rescan_drivers_locked(esw->dev);
2345 mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
2346 return;
2347 }
2348
2349 esw->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
2350 mlx5_rescan_drivers_locked(esw->dev);
2351 esw->mode = mode;
2352 esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
2353 mlx5_rescan_drivers_locked(esw->dev);
2354 mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
2355 }
2356
mlx5_esw_fdb_drop_destroy(struct mlx5_eswitch * esw)2357 static void mlx5_esw_fdb_drop_destroy(struct mlx5_eswitch *esw)
2358 {
2359 if (!esw->fdb_table.offloads.drop_root)
2360 return;
2361
2362 esw_debug(esw->dev, "Destroying FDB drop root table %#x fc %#x\n",
2363 esw->fdb_table.offloads.drop_root->id,
2364 esw->fdb_table.offloads.drop_root_fc->id);
2365 mlx5_del_flow_rules(esw->fdb_table.offloads.drop_root_rule);
2366 /* Don't free flow counter here, can be reused on a later activation */
2367 mlx5_destroy_flow_table(esw->fdb_table.offloads.drop_root);
2368 esw->fdb_table.offloads.drop_root_rule = NULL;
2369 esw->fdb_table.offloads.drop_root = NULL;
2370 }
2371
mlx5_esw_fdb_drop_create(struct mlx5_eswitch * esw)2372 static int mlx5_esw_fdb_drop_create(struct mlx5_eswitch *esw)
2373 {
2374 struct mlx5_flow_destination drop_fc_dst = {};
2375 struct mlx5_flow_table_attr ft_attr = {};
2376 struct mlx5_flow_destination *dst = NULL;
2377 struct mlx5_core_dev *dev = esw->dev;
2378 struct mlx5_flow_namespace *root_ns;
2379 struct mlx5_flow_act flow_act = {};
2380 struct mlx5_flow_handle *flow_rule;
2381 struct mlx5_flow_table *table;
2382 int err = 0, dst_num = 0;
2383
2384 if (esw->fdb_table.offloads.drop_root)
2385 return 0;
2386
2387 root_ns = esw->fdb_table.offloads.ns;
2388
2389 ft_attr.prio = FDB_DROP_ROOT;
2390 ft_attr.max_fte = 1;
2391 ft_attr.autogroup.max_num_groups = 1;
2392 table = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
2393 if (IS_ERR(table)) {
2394 esw_warn(dev, "Failed to create fdb drop root table, err %pe\n",
2395 table);
2396 return PTR_ERR(table);
2397 }
2398
2399 /* Drop FC reusable, create once on first deactivation of FDB */
2400 if (!esw->fdb_table.offloads.drop_root_fc) {
2401 struct mlx5_fc *counter = mlx5_fc_create(dev, 0);
2402
2403 err = PTR_ERR_OR_ZERO(counter);
2404 if (err)
2405 esw_warn(esw->dev, "create fdb drop fc err %d\n", err);
2406 else
2407 esw->fdb_table.offloads.drop_root_fc = counter;
2408 }
2409
2410 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2411
2412 if (esw->fdb_table.offloads.drop_root_fc) {
2413 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2414 drop_fc_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2415 drop_fc_dst.counter = esw->fdb_table.offloads.drop_root_fc;
2416 dst = &drop_fc_dst;
2417 dst_num++;
2418 }
2419
2420 flow_rule = mlx5_add_flow_rules(table, NULL, &flow_act, dst, dst_num);
2421 err = PTR_ERR_OR_ZERO(flow_rule);
2422 if (err) {
2423 esw_warn(esw->dev,
2424 "fs offloads: Failed to add vport rx drop rule err %d\n",
2425 err);
2426 goto err_flow_rule;
2427 }
2428
2429 esw->fdb_table.offloads.drop_root = table;
2430 esw->fdb_table.offloads.drop_root_rule = flow_rule;
2431 esw_debug(esw->dev, "Created FDB drop root table %#x fc %#x\n",
2432 table->id, dst ? dst->counter->id : 0);
2433 return 0;
2434
2435 err_flow_rule:
2436 /* no need to free drop fc, esw_offloads_steering_cleanup will do it */
2437 mlx5_destroy_flow_table(table);
2438 return err;
2439 }
2440
mlx5_esw_fdb_active(struct mlx5_eswitch * esw)2441 static void mlx5_esw_fdb_active(struct mlx5_eswitch *esw)
2442 {
2443 struct mlx5_vport *vport;
2444 unsigned long i;
2445
2446 mlx5_esw_fdb_drop_destroy(esw);
2447 mlx5_mpfs_enable(esw->dev);
2448
2449 mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
2450 if (!vport->adjacent)
2451 continue;
2452 esw_debug(esw->dev, "Connecting vport %d to eswitch\n",
2453 vport->vport);
2454 mlx5_esw_adj_vport_modify(esw->dev, vport->vport, true);
2455 }
2456
2457 esw->offloads_inactive = false;
2458 esw_warn(esw->dev, "MPFS/FDB active\n");
2459 }
2460
mlx5_esw_fdb_inactive(struct mlx5_eswitch * esw)2461 static void mlx5_esw_fdb_inactive(struct mlx5_eswitch *esw)
2462 {
2463 struct mlx5_vport *vport;
2464 unsigned long i;
2465
2466 mlx5_mpfs_disable(esw->dev);
2467 mlx5_esw_fdb_drop_create(esw);
2468
2469 mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
2470 if (!vport->adjacent)
2471 continue;
2472 esw_debug(esw->dev, "Disconnecting vport %u from eswitch\n",
2473 vport->vport);
2474
2475 mlx5_esw_adj_vport_modify(esw->dev, vport->vport, false);
2476 }
2477
2478 esw->offloads_inactive = true;
2479 esw_warn(esw->dev, "MPFS/FDB inactive\n");
2480 }
2481
esw_offloads_start(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack)2482 static int esw_offloads_start(struct mlx5_eswitch *esw,
2483 struct netlink_ext_ack *extack)
2484 {
2485 int err;
2486
2487 esw_mode_change(esw, MLX5_ESWITCH_OFFLOADS);
2488 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
2489 if (err) {
2490 NL_SET_ERR_MSG_MOD(extack,
2491 "Failed setting eswitch to offloads");
2492 esw_mode_change(esw, MLX5_ESWITCH_LEGACY);
2493 return err;
2494 }
2495 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2496 if (mlx5_eswitch_inline_mode_get(esw,
2497 &esw->offloads.inline_mode)) {
2498 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2499 NL_SET_ERR_MSG_MOD(extack,
2500 "Inline mode is different between vports");
2501 }
2502 }
2503 return 0;
2504 }
2505
mlx5_esw_offloads_rep_remove(struct mlx5_eswitch * esw,const struct mlx5_vport * vport)2506 void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
2507 const struct mlx5_vport *vport)
2508 {
2509 struct mlx5_eswitch_rep *rep = xa_load(&esw->offloads.vport_reps,
2510 vport->vport);
2511
2512 if (!rep)
2513 return;
2514 xa_erase(&esw->offloads.vport_reps, vport->vport);
2515 kfree(rep);
2516 }
2517
mlx5_esw_offloads_rep_add(struct mlx5_eswitch * esw,const struct mlx5_vport * vport)2518 int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
2519 const struct mlx5_vport *vport)
2520 {
2521 struct mlx5_eswitch_rep *rep;
2522 int rep_type;
2523 int err;
2524
2525 rep = kzalloc_obj(*rep);
2526 if (!rep)
2527 return -ENOMEM;
2528
2529 rep->vport = vport->vport;
2530 rep->vport_index = vport->index;
2531 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2532 if (!esw->offloads.rep_ops[rep_type]) {
2533 atomic_set(&rep->rep_data[rep_type].state,
2534 REP_UNREGISTERED);
2535 continue;
2536 }
2537 /* Dynamic/delegated vports add their representors after
2538 * mlx5_eswitch_register_vport_reps, so mark them as registered
2539 * for them to be loaded later with the others.
2540 */
2541 rep->esw = esw;
2542 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2543 }
2544 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2545 if (err)
2546 goto insert_err;
2547
2548 return 0;
2549
2550 insert_err:
2551 kfree(rep);
2552 return err;
2553 }
2554
mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)2555 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2556 struct mlx5_eswitch_rep *rep)
2557 {
2558 xa_erase(&esw->offloads.vport_reps, rep->vport);
2559 kfree(rep);
2560 }
2561
esw_offloads_cleanup_reps(struct mlx5_eswitch * esw)2562 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2563 {
2564 struct mlx5_eswitch_rep *rep;
2565 unsigned long i;
2566
2567 mlx5_esw_for_each_rep(esw, i, rep)
2568 mlx5_esw_offloads_rep_cleanup(esw, rep);
2569 xa_destroy(&esw->offloads.vport_reps);
2570 }
2571
esw_offloads_init_reps(struct mlx5_eswitch * esw)2572 static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2573 {
2574 struct mlx5_vport *vport;
2575 unsigned long i;
2576 int err;
2577
2578 xa_init(&esw->offloads.vport_reps);
2579
2580 mlx5_esw_for_each_vport(esw, i, vport) {
2581 err = mlx5_esw_offloads_rep_add(esw, vport);
2582 if (err)
2583 goto err;
2584 }
2585 return 0;
2586
2587 err:
2588 esw_offloads_cleanup_reps(esw);
2589 return err;
2590 }
2591
esw_port_metadata_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)2592 static int esw_port_metadata_set(struct devlink *devlink, u32 id,
2593 struct devlink_param_gset_ctx *ctx,
2594 struct netlink_ext_ack *extack)
2595 {
2596 struct mlx5_core_dev *dev = devlink_priv(devlink);
2597 struct mlx5_eswitch *esw = dev->priv.eswitch;
2598 int err = 0;
2599
2600 down_write(&esw->mode_lock);
2601 if (mlx5_esw_is_fdb_created(esw)) {
2602 err = -EBUSY;
2603 goto done;
2604 }
2605 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2606 err = -EOPNOTSUPP;
2607 goto done;
2608 }
2609 if (ctx->val.vbool)
2610 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2611 else
2612 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2613 done:
2614 up_write(&esw->mode_lock);
2615 return err;
2616 }
2617
esw_port_metadata_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)2618 static int esw_port_metadata_get(struct devlink *devlink, u32 id,
2619 struct devlink_param_gset_ctx *ctx,
2620 struct netlink_ext_ack *extack)
2621 {
2622 struct mlx5_core_dev *dev = devlink_priv(devlink);
2623
2624 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
2625 return 0;
2626 }
2627
esw_port_metadata_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)2628 static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
2629 union devlink_param_value val,
2630 struct netlink_ext_ack *extack)
2631 {
2632 struct mlx5_core_dev *dev = devlink_priv(devlink);
2633 u8 esw_mode;
2634
2635 esw_mode = mlx5_eswitch_mode(dev);
2636 if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
2637 NL_SET_ERR_MSG_MOD(extack,
2638 "E-Switch must either disabled or non switchdev mode");
2639 return -EBUSY;
2640 }
2641 return 0;
2642 }
2643
2644 static const struct devlink_param esw_devlink_params[] = {
2645 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
2646 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
2647 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2648 esw_port_metadata_get,
2649 esw_port_metadata_set,
2650 esw_port_metadata_validate),
2651 };
2652
esw_offloads_init(struct mlx5_eswitch * esw)2653 int esw_offloads_init(struct mlx5_eswitch *esw)
2654 {
2655 int err;
2656
2657 err = esw_offloads_init_reps(esw);
2658 if (err)
2659 return err;
2660
2661 if (MLX5_ESWITCH_MANAGER(esw->dev) &&
2662 mlx5_esw_vport_match_metadata_supported(esw))
2663 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2664
2665 err = devl_params_register(priv_to_devlink(esw->dev),
2666 esw_devlink_params,
2667 ARRAY_SIZE(esw_devlink_params));
2668 if (err)
2669 goto err_params;
2670
2671 return 0;
2672
2673 err_params:
2674 esw_offloads_cleanup_reps(esw);
2675 return err;
2676 }
2677
esw_offloads_cleanup(struct mlx5_eswitch * esw)2678 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2679 {
2680 devl_params_unregister(priv_to_devlink(esw->dev),
2681 esw_devlink_params,
2682 ARRAY_SIZE(esw_devlink_params));
2683 esw_offloads_cleanup_reps(esw);
2684 }
2685
__esw_offloads_load_rep(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u8 rep_type)2686 static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
2687 struct mlx5_eswitch_rep *rep, u8 rep_type)
2688 {
2689 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2690 REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
2691 return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2692
2693 return 0;
2694 }
2695
__esw_offloads_unload_rep(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u8 rep_type)2696 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2697 struct mlx5_eswitch_rep *rep, u8 rep_type)
2698 {
2699 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2700 REP_LOADED, REP_REGISTERED) == REP_LOADED) {
2701 if (rep_type == REP_ETH)
2702 __esw_offloads_unload_rep(esw, rep, REP_IB);
2703 esw->offloads.rep_ops[rep_type]->unload(rep);
2704 }
2705 }
2706
__unload_reps_all_vport(struct mlx5_eswitch * esw,u8 rep_type)2707 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2708 {
2709 struct mlx5_eswitch_rep *rep;
2710 unsigned long i;
2711
2712 mlx5_esw_for_each_rep(esw, i, rep)
2713 __esw_offloads_unload_rep(esw, rep, rep_type);
2714 }
2715
mlx5_esw_offloads_rep_load(struct mlx5_eswitch * esw,u16 vport_num)2716 static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2717 {
2718 struct mlx5_eswitch_rep *rep;
2719 int rep_type;
2720 int err;
2721
2722 rep = mlx5_eswitch_get_rep(esw, vport_num);
2723 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2724 err = __esw_offloads_load_rep(esw, rep, rep_type);
2725 if (err)
2726 goto err_reps;
2727 }
2728
2729 return 0;
2730
2731 err_reps:
2732 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2733 for (--rep_type; rep_type >= 0; rep_type--)
2734 __esw_offloads_unload_rep(esw, rep, rep_type);
2735 return err;
2736 }
2737
mlx5_esw_offloads_rep_unload(struct mlx5_eswitch * esw,u16 vport_num)2738 static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2739 {
2740 struct mlx5_eswitch_rep *rep;
2741 int rep_type;
2742
2743 rep = mlx5_eswitch_get_rep(esw, vport_num);
2744 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2745 __esw_offloads_unload_rep(esw, rep, rep_type);
2746 }
2747
mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2748 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2749 {
2750 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2751 return 0;
2752
2753 return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
2754 }
2755
mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2756 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2757 {
2758 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2759 return;
2760
2761 mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
2762 }
2763
mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_devlink_port * dl_port,u32 controller,u32 sfnum)2764 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
2765 struct mlx5_devlink_port *dl_port,
2766 u32 controller, u32 sfnum)
2767 {
2768 return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
2769 }
2770
mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2771 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2772 {
2773 mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
2774 }
2775
mlx5_esw_offloads_load_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2776 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2777 {
2778 int err;
2779
2780 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2781 return 0;
2782
2783 err = mlx5_esw_offloads_devlink_port_register(esw, vport);
2784 if (err)
2785 return err;
2786
2787 err = mlx5_esw_offloads_rep_load(esw, vport->vport);
2788 if (err)
2789 goto load_err;
2790 return err;
2791
2792 load_err:
2793 mlx5_esw_offloads_devlink_port_unregister(vport);
2794 return err;
2795 }
2796
mlx5_esw_offloads_unload_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2797 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2798 {
2799 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2800 return;
2801
2802 mlx5_esw_offloads_rep_unload(esw, vport->vport);
2803
2804 mlx5_esw_offloads_devlink_port_unregister(vport);
2805 }
2806
esw_set_slave_root_fdb(struct mlx5_core_dev * master,struct mlx5_core_dev * slave)2807 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2808 struct mlx5_core_dev *slave)
2809 {
2810 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2811 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2812 struct mlx5_flow_root_namespace *root;
2813 struct mlx5_flow_namespace *ns;
2814 int err;
2815
2816 MLX5_SET(set_flow_table_root_in, in, opcode,
2817 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2818 MLX5_SET(set_flow_table_root_in, in, table_type,
2819 FS_FT_FDB);
2820
2821 if (master) {
2822 ns = mlx5_get_flow_namespace(master,
2823 MLX5_FLOW_NAMESPACE_FDB);
2824 root = find_root(&ns->node);
2825 mutex_lock(&root->chain_lock);
2826 MLX5_SET(set_flow_table_root_in, in,
2827 table_eswitch_owner_vhca_id_valid, 1);
2828 MLX5_SET(set_flow_table_root_in, in,
2829 table_eswitch_owner_vhca_id,
2830 MLX5_CAP_GEN(master, vhca_id));
2831 MLX5_SET(set_flow_table_root_in, in, table_id,
2832 root->root_ft->id);
2833 } else {
2834 ns = mlx5_get_flow_namespace(slave,
2835 MLX5_FLOW_NAMESPACE_FDB);
2836 root = find_root(&ns->node);
2837 mutex_lock(&root->chain_lock);
2838 MLX5_SET(set_flow_table_root_in, in, table_id,
2839 root->root_ft->id);
2840 }
2841
2842 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2843 mutex_unlock(&root->chain_lock);
2844
2845 return err;
2846 }
2847
__esw_set_master_egress_rule(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,struct mlx5_vport * vport,struct mlx5_flow_table * acl)2848 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2849 struct mlx5_core_dev *slave,
2850 struct mlx5_vport *vport,
2851 struct mlx5_flow_table *acl)
2852 {
2853 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2854 struct mlx5_flow_handle *flow_rule = NULL;
2855 struct mlx5_flow_destination dest = {};
2856 struct mlx5_flow_act flow_act = {};
2857 struct mlx5_flow_spec *spec;
2858 int err = 0;
2859 void *misc;
2860
2861 spec = kvzalloc_obj(*spec);
2862 if (!spec)
2863 return -ENOMEM;
2864
2865 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2866 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2867 misc_parameters);
2868 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2869 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index);
2870
2871 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2872 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2873 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2874 source_eswitch_owner_vhca_id);
2875
2876 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2877 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2878 dest.vport.num = slave->priv.eswitch->manager_vport;
2879 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2880 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2881
2882 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2883 &dest, 1);
2884 if (IS_ERR(flow_rule)) {
2885 err = PTR_ERR(flow_rule);
2886 } else {
2887 err = xa_insert(&vport->egress.offloads.bounce_rules,
2888 slave_index, flow_rule, GFP_KERNEL);
2889 if (err)
2890 mlx5_del_flow_rules(flow_rule);
2891 }
2892
2893 kvfree(spec);
2894 return err;
2895 }
2896
esw_master_egress_create_resources(struct mlx5_eswitch * esw,struct mlx5_flow_namespace * egress_ns,struct mlx5_vport * vport,size_t count)2897 static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
2898 struct mlx5_flow_namespace *egress_ns,
2899 struct mlx5_vport *vport, size_t count)
2900 {
2901 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2902 struct mlx5_flow_table_attr ft_attr = {
2903 .max_fte = count, .prio = 0, .level = 0,
2904 };
2905 struct mlx5_flow_table *acl;
2906 struct mlx5_flow_group *g;
2907 void *match_criteria;
2908 u32 *flow_group_in;
2909 int err;
2910
2911 if (vport->egress.acl)
2912 return 0;
2913
2914 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2915 if (!flow_group_in)
2916 return -ENOMEM;
2917
2918 if (vport->vport || mlx5_core_is_ecpf(esw->dev))
2919 ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
2920
2921 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2922 if (IS_ERR(acl)) {
2923 err = PTR_ERR(acl);
2924 goto out;
2925 }
2926
2927 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2928 match_criteria);
2929 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2930 misc_parameters.source_port);
2931 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2932 misc_parameters.source_eswitch_owner_vhca_id);
2933 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2934 MLX5_MATCH_MISC_PARAMETERS);
2935
2936 MLX5_SET(create_flow_group_in, flow_group_in,
2937 source_eswitch_owner_vhca_id_valid, 1);
2938 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2939 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count);
2940
2941 g = mlx5_create_flow_group(acl, flow_group_in);
2942 if (IS_ERR(g)) {
2943 err = PTR_ERR(g);
2944 goto err_group;
2945 }
2946
2947 vport->egress.acl = acl;
2948 vport->egress.offloads.bounce_grp = g;
2949 vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB;
2950 xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC);
2951
2952 kvfree(flow_group_in);
2953
2954 return 0;
2955
2956 err_group:
2957 mlx5_destroy_flow_table(acl);
2958 out:
2959 kvfree(flow_group_in);
2960 return err;
2961 }
2962
esw_master_egress_destroy_resources(struct mlx5_vport * vport)2963 static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
2964 {
2965 if (!xa_empty(&vport->egress.offloads.bounce_rules))
2966 return;
2967 mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
2968 vport->egress.offloads.bounce_grp = NULL;
2969 mlx5_destroy_flow_table(vport->egress.acl);
2970 vport->egress.acl = NULL;
2971 }
2972
esw_set_master_egress_rule(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,size_t count)2973 static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2974 struct mlx5_core_dev *slave, size_t count)
2975 {
2976 struct mlx5_eswitch *esw = master->priv.eswitch;
2977 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2978 struct mlx5_flow_namespace *egress_ns;
2979 struct mlx5_vport *vport;
2980 int err;
2981
2982 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2983 if (IS_ERR(vport))
2984 return PTR_ERR(vport);
2985
2986 egress_ns = mlx5_get_flow_vport_namespace(master,
2987 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2988 vport->index);
2989 if (!egress_ns)
2990 return -EINVAL;
2991
2992 if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
2993 return 0;
2994
2995 err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
2996 if (err)
2997 return err;
2998
2999 if (xa_load(&vport->egress.offloads.bounce_rules, slave_index))
3000 return -EINVAL;
3001
3002 err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl);
3003 if (err)
3004 goto err_rule;
3005
3006 return 0;
3007
3008 err_rule:
3009 esw_master_egress_destroy_resources(vport);
3010 return err;
3011 }
3012
esw_unset_master_egress_rule(struct mlx5_core_dev * dev,struct mlx5_core_dev * slave_dev)3013 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
3014 struct mlx5_core_dev *slave_dev)
3015 {
3016 struct mlx5_vport *vport;
3017
3018 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
3019 dev->priv.eswitch->manager_vport);
3020
3021 esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id));
3022
3023 if (xa_empty(&vport->egress.offloads.bounce_rules)) {
3024 esw_acl_egress_ofld_cleanup(vport);
3025 xa_destroy(&vport->egress.offloads.bounce_rules);
3026 }
3027 }
3028
mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw,int max_slaves)3029 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
3030 struct mlx5_eswitch *slave_esw, int max_slaves)
3031 {
3032 int err;
3033
3034 err = esw_set_slave_root_fdb(master_esw->dev,
3035 slave_esw->dev);
3036 if (err)
3037 return err;
3038
3039 err = esw_set_master_egress_rule(master_esw->dev,
3040 slave_esw->dev, max_slaves);
3041 if (err)
3042 goto err_acl;
3043
3044 return err;
3045
3046 err_acl:
3047 esw_set_slave_root_fdb(NULL, slave_esw->dev);
3048 return err;
3049 }
3050
mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw)3051 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
3052 struct mlx5_eswitch *slave_esw)
3053 {
3054 esw_set_slave_root_fdb(NULL, slave_esw->dev);
3055 esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev);
3056 }
3057
3058 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
3059 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
3060
mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)3061 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
3062 struct mlx5_eswitch *peer_esw)
3063 {
3064 const struct mlx5_eswitch_rep_ops *ops;
3065 struct mlx5_eswitch_rep *rep;
3066 unsigned long i;
3067 u8 rep_type;
3068
3069 mlx5_esw_for_each_rep(esw, i, rep) {
3070 rep_type = NUM_REP_TYPES;
3071 while (rep_type--) {
3072 ops = esw->offloads.rep_ops[rep_type];
3073 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3074 ops->event)
3075 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
3076 }
3077 }
3078 }
3079
mlx5_esw_offloads_unpair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)3080 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
3081 struct mlx5_eswitch *peer_esw)
3082 {
3083 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
3084 mlx5e_tc_clean_fdb_peer_flows(esw);
3085 #endif
3086 mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
3087 esw_del_fdb_peer_miss_rules(esw, peer_esw->dev);
3088 }
3089
mlx5_esw_offloads_pair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)3090 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
3091 struct mlx5_eswitch *peer_esw)
3092 {
3093 const struct mlx5_eswitch_rep_ops *ops;
3094 struct mlx5_eswitch_rep *rep;
3095 unsigned long i;
3096 u8 rep_type;
3097 int err;
3098
3099 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
3100 if (err)
3101 return err;
3102
3103 mlx5_esw_for_each_rep(esw, i, rep) {
3104 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
3105 ops = esw->offloads.rep_ops[rep_type];
3106 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3107 ops->event) {
3108 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
3109 if (err)
3110 goto err_out;
3111 }
3112 }
3113 }
3114
3115 return 0;
3116
3117 err_out:
3118 mlx5_esw_offloads_unpair(esw, peer_esw);
3119 return err;
3120 }
3121
mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw,bool pair)3122 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
3123 struct mlx5_eswitch *peer_esw,
3124 bool pair)
3125 {
3126 u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
3127 u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
3128 struct mlx5_flow_root_namespace *peer_ns;
3129 struct mlx5_flow_root_namespace *ns;
3130 int err;
3131
3132 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
3133 ns = esw->dev->priv.steering->fdb_root_ns;
3134
3135 if (pair) {
3136 err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id);
3137 if (err)
3138 return err;
3139
3140 err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id);
3141 if (err) {
3142 mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
3143 return err;
3144 }
3145 } else {
3146 mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
3147 mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id);
3148 }
3149
3150 return 0;
3151 }
3152
mlx5_esw_offloads_devcom_event(int event,void * my_data,void * event_data)3153 static int mlx5_esw_offloads_devcom_event(int event,
3154 void *my_data,
3155 void *event_data)
3156 {
3157 struct mlx5_eswitch *esw = my_data;
3158 struct mlx5_eswitch *peer_esw = event_data;
3159 u16 esw_i, peer_esw_i;
3160 bool esw_paired;
3161 int err;
3162
3163 peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
3164 esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
3165 esw_paired = !!xa_load(&esw->paired, peer_esw_i);
3166
3167 switch (event) {
3168 case ESW_OFFLOADS_DEVCOM_PAIR:
3169 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
3170 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
3171 break;
3172
3173 if (esw_paired)
3174 break;
3175
3176 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
3177 if (err)
3178 goto err_out;
3179
3180 err = mlx5_esw_offloads_pair(esw, peer_esw);
3181 if (err)
3182 goto err_peer;
3183
3184 err = mlx5_esw_offloads_pair(peer_esw, esw);
3185 if (err)
3186 goto err_pair;
3187
3188 err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
3189 if (err)
3190 goto err_xa;
3191
3192 err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
3193 if (err)
3194 goto err_peer_xa;
3195
3196 esw->num_peers++;
3197 peer_esw->num_peers++;
3198 mlx5_devcom_comp_set_ready(esw->devcom, true);
3199 break;
3200
3201 case ESW_OFFLOADS_DEVCOM_UNPAIR:
3202 if (!esw_paired)
3203 break;
3204
3205 peer_esw->num_peers--;
3206 esw->num_peers--;
3207 if (!esw->num_peers && !peer_esw->num_peers)
3208 mlx5_devcom_comp_set_ready(esw->devcom, false);
3209 xa_erase(&peer_esw->paired, esw_i);
3210 xa_erase(&esw->paired, peer_esw_i);
3211 mlx5_esw_offloads_unpair(peer_esw, esw);
3212 mlx5_esw_offloads_unpair(esw, peer_esw);
3213 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
3214 break;
3215 }
3216
3217 return 0;
3218
3219 err_peer_xa:
3220 xa_erase(&esw->paired, peer_esw_i);
3221 err_xa:
3222 mlx5_esw_offloads_unpair(peer_esw, esw);
3223 err_pair:
3224 mlx5_esw_offloads_unpair(esw, peer_esw);
3225 err_peer:
3226 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
3227 err_out:
3228 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
3229 event, err);
3230 return err;
3231 }
3232
mlx5_esw_offloads_devcom_init(struct mlx5_eswitch * esw,const struct mlx5_devcom_match_attr * attr)3233 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
3234 const struct mlx5_devcom_match_attr *attr)
3235 {
3236 int i;
3237
3238 for (i = 0; i < MLX5_MAX_PORTS; i++)
3239 INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
3240 mutex_init(&esw->offloads.peer_mutex);
3241
3242 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
3243 return;
3244
3245 if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) &&
3246 !mlx5_lag_is_supported(esw->dev))
3247 return;
3248
3249 xa_init(&esw->paired);
3250 esw->num_peers = 0;
3251 esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
3252 MLX5_DEVCOM_ESW_OFFLOADS,
3253 attr,
3254 mlx5_esw_offloads_devcom_event,
3255 esw);
3256 if (!esw->devcom)
3257 return;
3258
3259 mlx5_devcom_send_event(esw->devcom,
3260 ESW_OFFLOADS_DEVCOM_PAIR,
3261 ESW_OFFLOADS_DEVCOM_UNPAIR,
3262 esw);
3263 }
3264
mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch * esw)3265 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
3266 {
3267 if (!esw->devcom)
3268 return;
3269
3270 mlx5_devcom_send_event(esw->devcom,
3271 ESW_OFFLOADS_DEVCOM_UNPAIR,
3272 ESW_OFFLOADS_DEVCOM_UNPAIR,
3273 esw);
3274
3275 mlx5_devcom_unregister_component(esw->devcom);
3276 xa_destroy(&esw->paired);
3277 esw->devcom = NULL;
3278 }
3279
mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch * esw)3280 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
3281 {
3282 return mlx5_devcom_comp_is_ready(esw->devcom);
3283 }
3284
mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch * esw)3285 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
3286 {
3287 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
3288 return false;
3289
3290 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
3291 MLX5_FDB_TO_VPORT_REG_C_0))
3292 return false;
3293
3294 return true;
3295 }
3296
3297 #define MLX5_ESW_METADATA_RSVD_UPLINK 1
3298
3299 /* Share the same metadata for uplink's. This is fine because:
3300 * (a) In shared FDB mode (LAG) both uplink's are treated the
3301 * same and tagged with the same metadata.
3302 * (b) In non shared FDB mode, packets from physical port0
3303 * cannot hit eswitch of PF1 and vice versa.
3304 */
mlx5_esw_match_metadata_reserved(struct mlx5_eswitch * esw)3305 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
3306 {
3307 return MLX5_ESW_METADATA_RSVD_UPLINK;
3308 }
3309
mlx5_esw_match_metadata_alloc(struct mlx5_eswitch * esw)3310 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
3311 {
3312 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
3313 /* Reserve 0xf for internal port offload */
3314 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
3315 u32 pf_num;
3316 int id;
3317
3318 /* Only 4 bits of pf_num */
3319 pf_num = mlx5_get_dev_index(esw->dev);
3320 if (pf_num > max_pf_num)
3321 return 0;
3322
3323 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
3324 /* Use only non-zero vport_id (2-4095) for all PF's */
3325 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
3326 MLX5_ESW_METADATA_RSVD_UPLINK + 1,
3327 vport_end_ida, GFP_KERNEL);
3328 if (id < 0)
3329 return 0;
3330 id = (pf_num << ESW_VPORT_BITS) | id;
3331 return id;
3332 }
3333
mlx5_esw_match_metadata_free(struct mlx5_eswitch * esw,u32 metadata)3334 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
3335 {
3336 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
3337
3338 /* Metadata contains only 12 bits of actual ida id */
3339 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
3340 }
3341
esw_offloads_vport_metadata_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3342 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
3343 struct mlx5_vport *vport)
3344 {
3345 if (vport->vport == MLX5_VPORT_UPLINK)
3346 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
3347 else
3348 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
3349
3350 vport->metadata = vport->default_metadata;
3351 return vport->metadata ? 0 : -ENOSPC;
3352 }
3353
esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3354 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
3355 struct mlx5_vport *vport)
3356 {
3357 if (!vport->default_metadata)
3358 return;
3359
3360 if (vport->vport == MLX5_VPORT_UPLINK)
3361 return;
3362
3363 WARN_ON(vport->metadata != vport->default_metadata);
3364 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
3365 }
3366
esw_offloads_metadata_uninit(struct mlx5_eswitch * esw)3367 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
3368 {
3369 struct mlx5_vport *vport;
3370 unsigned long i;
3371
3372 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3373 return;
3374
3375 mlx5_esw_for_each_vport(esw, i, vport)
3376 esw_offloads_vport_metadata_cleanup(esw, vport);
3377 }
3378
esw_offloads_metadata_init(struct mlx5_eswitch * esw)3379 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
3380 {
3381 struct mlx5_vport *vport;
3382 unsigned long i;
3383 int err;
3384
3385 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3386 return 0;
3387
3388 mlx5_esw_for_each_vport(esw, i, vport) {
3389 err = esw_offloads_vport_metadata_setup(esw, vport);
3390 if (err)
3391 goto metadata_err;
3392 }
3393
3394 return 0;
3395
3396 metadata_err:
3397 esw_offloads_metadata_uninit(esw);
3398 return err;
3399 }
3400
3401 int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3402 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
3403 struct mlx5_vport *vport)
3404 {
3405 int err;
3406
3407 err = esw_acl_ingress_ofld_setup(esw, vport);
3408 if (err)
3409 return err;
3410
3411 err = esw_acl_egress_ofld_setup(esw, vport);
3412 if (err)
3413 goto egress_err;
3414
3415 return 0;
3416
3417 egress_err:
3418 esw_acl_ingress_ofld_cleanup(esw, vport);
3419 return err;
3420 }
3421
3422 void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3423 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
3424 struct mlx5_vport *vport)
3425 {
3426 esw_acl_egress_ofld_cleanup(vport);
3427 esw_acl_ingress_ofld_cleanup(esw, vport);
3428 }
3429
esw_create_offloads_acl_tables(struct mlx5_eswitch * esw)3430 static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
3431 {
3432 struct mlx5_vport *uplink, *manager;
3433 int ret;
3434
3435 uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3436 if (IS_ERR(uplink))
3437 return PTR_ERR(uplink);
3438
3439 ret = esw_vport_create_offloads_acl_tables(esw, uplink);
3440 if (ret)
3441 return ret;
3442
3443 manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
3444 if (IS_ERR(manager)) {
3445 ret = PTR_ERR(manager);
3446 goto err_manager;
3447 }
3448
3449 ret = esw_vport_create_offloads_acl_tables(esw, manager);
3450 if (ret)
3451 goto err_manager;
3452
3453 return 0;
3454
3455 err_manager:
3456 esw_vport_destroy_offloads_acl_tables(esw, uplink);
3457 return ret;
3458 }
3459
esw_destroy_offloads_acl_tables(struct mlx5_eswitch * esw)3460 static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
3461 {
3462 struct mlx5_vport *vport;
3463
3464 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
3465 if (!IS_ERR(vport))
3466 esw_vport_destroy_offloads_acl_tables(esw, vport);
3467
3468 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3469 if (!IS_ERR(vport))
3470 esw_vport_destroy_offloads_acl_tables(esw, vport);
3471 }
3472
mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch * esw)3473 int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
3474 {
3475 struct mlx5_eswitch_rep *rep;
3476 unsigned long i;
3477 int ret;
3478
3479 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3480 return 0;
3481
3482 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3483 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3484 return 0;
3485
3486 ret = __esw_offloads_load_rep(esw, rep, REP_IB);
3487 if (ret)
3488 return ret;
3489
3490 mlx5_esw_for_each_rep(esw, i, rep) {
3491 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3492 __esw_offloads_load_rep(esw, rep, REP_IB);
3493 }
3494
3495 return 0;
3496 }
3497
esw_offloads_steering_init(struct mlx5_eswitch * esw)3498 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3499 {
3500 struct mlx5_esw_indir_table *indir;
3501 int err;
3502
3503 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3504 mutex_init(&esw->fdb_table.offloads.vports.lock);
3505 hash_init(esw->fdb_table.offloads.vports.table);
3506 atomic64_set(&esw->user_count, 0);
3507
3508 indir = mlx5_esw_indir_table_init();
3509 if (IS_ERR(indir)) {
3510 err = PTR_ERR(indir);
3511 goto create_indir_err;
3512 }
3513 esw->fdb_table.offloads.indir = indir;
3514
3515 err = esw_create_offloads_acl_tables(esw);
3516 if (err)
3517 goto create_acl_err;
3518
3519 err = esw_create_offloads_table(esw);
3520 if (err)
3521 goto create_offloads_err;
3522
3523 err = esw_create_restore_table(esw);
3524 if (err)
3525 goto create_restore_err;
3526
3527 err = esw_create_offloads_fdb_tables(esw);
3528 if (err)
3529 goto create_fdb_err;
3530
3531 err = esw_create_vport_rx_group(esw);
3532 if (err)
3533 goto create_fg_err;
3534
3535 err = esw_create_vport_rx_drop_group(esw);
3536 if (err)
3537 goto create_rx_drop_fg_err;
3538
3539 err = esw_create_vport_rx_drop_rule(esw);
3540 if (err)
3541 goto create_rx_drop_rule_err;
3542
3543 return 0;
3544
3545 create_rx_drop_rule_err:
3546 esw_destroy_vport_rx_drop_group(esw);
3547 create_rx_drop_fg_err:
3548 esw_destroy_vport_rx_group(esw);
3549 create_fg_err:
3550 esw_destroy_offloads_fdb_tables(esw);
3551 create_fdb_err:
3552 esw_destroy_restore_table(esw);
3553 create_restore_err:
3554 esw_destroy_offloads_table(esw);
3555 create_offloads_err:
3556 esw_destroy_offloads_acl_tables(esw);
3557 create_acl_err:
3558 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3559 create_indir_err:
3560 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3561 return err;
3562 }
3563
esw_offloads_steering_cleanup(struct mlx5_eswitch * esw)3564 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3565 {
3566 mlx5_esw_fdb_drop_destroy(esw);
3567 if (esw->fdb_table.offloads.drop_root_fc)
3568 mlx5_fc_destroy(esw->dev, esw->fdb_table.offloads.drop_root_fc);
3569 esw->fdb_table.offloads.drop_root_fc = NULL;
3570 esw_destroy_vport_rx_drop_rule(esw);
3571 esw_destroy_vport_rx_drop_group(esw);
3572 esw_destroy_vport_rx_group(esw);
3573 esw_destroy_offloads_fdb_tables(esw);
3574 esw_destroy_restore_table(esw);
3575 esw_destroy_offloads_table(esw);
3576 esw_destroy_offloads_acl_tables(esw);
3577 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3578 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3579 }
3580
3581 static void
esw_vfs_changed_event_handler(struct mlx5_eswitch * esw,int work_gen,const u32 * out)3582 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, int work_gen,
3583 const u32 *out)
3584 {
3585 struct devlink *devlink;
3586 bool host_pf_disabled;
3587 u16 new_num_vfs;
3588
3589 devlink = priv_to_devlink(esw->dev);
3590 devl_lock(devlink);
3591
3592 /* Stale work from one or more mode changes ago. Bail out. */
3593 if (work_gen != atomic_read(&esw->esw_funcs.generation))
3594 goto unlock;
3595
3596 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3597 host_params_context.host_num_of_vfs);
3598 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3599 host_params_context.host_pf_disabled);
3600
3601 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3602 goto unlock;
3603
3604 /* Number of VFs can only change from "0 to x" or "x to 0". */
3605 if (esw->esw_funcs.num_vfs > 0) {
3606 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3607 } else {
3608 int err;
3609
3610 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3611 MLX5_VPORT_UC_ADDR_CHANGE);
3612 if (err) {
3613 devl_unlock(devlink);
3614 return;
3615 }
3616 }
3617 esw->esw_funcs.num_vfs = new_num_vfs;
3618 unlock:
3619 devl_unlock(devlink);
3620 }
3621
esw_functions_changed_event_handler(struct work_struct * work)3622 static void esw_functions_changed_event_handler(struct work_struct *work)
3623 {
3624 struct mlx5_host_work *host_work;
3625 struct mlx5_eswitch *esw;
3626 const u32 *out;
3627
3628 host_work = container_of(work, struct mlx5_host_work, work);
3629 esw = host_work->esw;
3630
3631 out = mlx5_esw_query_functions(esw->dev);
3632 if (IS_ERR(out))
3633 goto out;
3634
3635 esw_vfs_changed_event_handler(esw, host_work->work_gen, out);
3636 kvfree(out);
3637 out:
3638 kfree(host_work);
3639 }
3640
mlx5_esw_funcs_changed_handler(struct notifier_block * nb,unsigned long type,void * data)3641 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3642 {
3643 struct mlx5_esw_functions *esw_funcs;
3644 struct mlx5_host_work *host_work;
3645 struct mlx5_eswitch *esw;
3646
3647 host_work = kzalloc_obj(*host_work, GFP_ATOMIC);
3648 if (!host_work)
3649 return NOTIFY_DONE;
3650
3651 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3652 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3653
3654 host_work->esw = esw;
3655 host_work->work_gen = atomic_read(&esw_funcs->generation);
3656
3657 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3658 queue_work(esw->work_queue, &host_work->work);
3659
3660 return NOTIFY_OK;
3661 }
3662
mlx5_esw_host_number_init(struct mlx5_eswitch * esw)3663 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3664 {
3665 const u32 *query_host_out;
3666
3667 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3668 return 0;
3669
3670 query_host_out = mlx5_esw_query_functions(esw->dev);
3671 if (IS_ERR(query_host_out))
3672 return PTR_ERR(query_host_out);
3673
3674 /* Mark non local controller with non zero controller number. */
3675 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3676 host_params_context.host_number);
3677 kvfree(query_host_out);
3678 return 0;
3679 }
3680
mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch * esw,u32 controller)3681 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3682 {
3683 /* Local controller is always valid */
3684 if (controller == 0)
3685 return true;
3686
3687 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3688 return false;
3689
3690 /* External host number starts with zero in device */
3691 return (controller == esw->offloads.host_number + 1);
3692 }
3693
esw_offloads_enable(struct mlx5_eswitch * esw)3694 int esw_offloads_enable(struct mlx5_eswitch *esw)
3695 {
3696 u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
3697 struct mapping_ctx *reg_c0_obj_pool;
3698 struct mlx5_vport *vport;
3699 unsigned long i;
3700 u8 id_len;
3701 int err;
3702
3703 mutex_init(&esw->offloads.termtbl_mutex);
3704 mlx5_esw_adjacent_vhcas_setup(esw);
3705
3706 err = mlx5_rdma_enable_roce(esw->dev);
3707 if (err)
3708 goto err_roce;
3709
3710 err = mlx5_esw_host_number_init(esw);
3711 if (err)
3712 goto err_metadata;
3713
3714 err = esw_offloads_metadata_init(esw);
3715 if (err)
3716 goto err_metadata;
3717
3718 err = esw_set_passing_vport_metadata(esw, true);
3719 if (err)
3720 goto err_vport_metadata;
3721
3722 mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len);
3723
3724 reg_c0_obj_pool = mapping_create_for_id(mapping_id, id_len,
3725 MAPPING_TYPE_CHAIN,
3726 sizeof(struct mlx5_mapped_obj),
3727 ESW_REG_C0_USER_DATA_METADATA_MASK,
3728 true);
3729
3730 if (IS_ERR(reg_c0_obj_pool)) {
3731 err = PTR_ERR(reg_c0_obj_pool);
3732 goto err_pool;
3733 }
3734 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3735
3736 err = esw_offloads_steering_init(esw);
3737 if (err)
3738 goto err_steering_init;
3739
3740 if (esw->offloads_inactive)
3741 mlx5_esw_fdb_inactive(esw);
3742 else
3743 mlx5_esw_fdb_active(esw);
3744
3745 /* Representor will control the vport link state */
3746 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3747 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3748 if (mlx5_core_ec_sriov_enabled(esw->dev))
3749 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
3750 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3751
3752 /* Uplink vport rep must load first. */
3753 err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3754 if (err)
3755 goto err_uplink;
3756
3757 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3758 if (err)
3759 goto err_vports;
3760
3761 return 0;
3762
3763 err_vports:
3764 mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
3765 err_uplink:
3766 esw_offloads_steering_cleanup(esw);
3767 err_steering_init:
3768 mapping_destroy(reg_c0_obj_pool);
3769 err_pool:
3770 esw_set_passing_vport_metadata(esw, false);
3771 err_vport_metadata:
3772 esw_offloads_metadata_uninit(esw);
3773 err_metadata:
3774 mlx5_rdma_disable_roce(esw->dev);
3775 err_roce:
3776 mlx5_esw_adjacent_vhcas_cleanup(esw);
3777 mutex_destroy(&esw->offloads.termtbl_mutex);
3778 return err;
3779 }
3780
esw_offloads_stop(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack)3781 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3782 struct netlink_ext_ack *extack)
3783 {
3784 int err;
3785
3786 esw_mode_change(esw, MLX5_ESWITCH_LEGACY);
3787
3788 /* If changing from switchdev to legacy mode without sriov enabled,
3789 * no need to create legacy fdb.
3790 */
3791 if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
3792 return 0;
3793
3794 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3795 if (err)
3796 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3797
3798 return err;
3799 }
3800
esw_offloads_disable(struct mlx5_eswitch * esw)3801 void esw_offloads_disable(struct mlx5_eswitch *esw)
3802 {
3803 mlx5_eswitch_disable_pf_vf_vports(esw);
3804 mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
3805 esw_set_passing_vport_metadata(esw, false);
3806 esw_offloads_steering_cleanup(esw);
3807 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3808 esw_offloads_metadata_uninit(esw);
3809 mlx5_rdma_disable_roce(esw->dev);
3810 mlx5_esw_adjacent_vhcas_cleanup(esw);
3811 /* must be done after vhcas cleanup to avoid adjacent vports connect */
3812 if (esw->offloads_inactive)
3813 mlx5_esw_fdb_active(esw); /* legacy mode always active */
3814 mutex_destroy(&esw->offloads.termtbl_mutex);
3815 }
3816
esw_mode_from_devlink(u16 mode,u16 * mlx5_mode)3817 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3818 {
3819 switch (mode) {
3820 case DEVLINK_ESWITCH_MODE_LEGACY:
3821 *mlx5_mode = MLX5_ESWITCH_LEGACY;
3822 break;
3823 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3824 case DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE:
3825 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3826 break;
3827 default:
3828 return -EINVAL;
3829 }
3830
3831 return 0;
3832 }
3833
esw_mode_to_devlink(struct mlx5_eswitch * esw,u16 * mode)3834 static int esw_mode_to_devlink(struct mlx5_eswitch *esw, u16 *mode)
3835 {
3836 switch (esw->mode) {
3837 case MLX5_ESWITCH_LEGACY:
3838 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3839 break;
3840 case MLX5_ESWITCH_OFFLOADS:
3841 if (esw->offloads_inactive)
3842 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE;
3843 else
3844 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3845 break;
3846 default:
3847 return -EINVAL;
3848 }
3849
3850 return 0;
3851 }
3852
esw_inline_mode_from_devlink(u8 mode,u8 * mlx5_mode)3853 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3854 {
3855 switch (mode) {
3856 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3857 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3858 break;
3859 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3860 *mlx5_mode = MLX5_INLINE_MODE_L2;
3861 break;
3862 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3863 *mlx5_mode = MLX5_INLINE_MODE_IP;
3864 break;
3865 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3866 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3867 break;
3868 default:
3869 return -EINVAL;
3870 }
3871
3872 return 0;
3873 }
3874
esw_inline_mode_to_devlink(u8 mlx5_mode,u8 * mode)3875 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3876 {
3877 switch (mlx5_mode) {
3878 case MLX5_INLINE_MODE_NONE:
3879 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3880 break;
3881 case MLX5_INLINE_MODE_L2:
3882 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3883 break;
3884 case MLX5_INLINE_MODE_IP:
3885 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3886 break;
3887 case MLX5_INLINE_MODE_TCP_UDP:
3888 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3889 break;
3890 default:
3891 return -EINVAL;
3892 }
3893
3894 return 0;
3895 }
3896
mlx5_eswitch_block_mode(struct mlx5_core_dev * dev)3897 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
3898 {
3899 struct mlx5_eswitch *esw = dev->priv.eswitch;
3900 int err;
3901
3902 if (!mlx5_esw_allowed(esw))
3903 return 0;
3904
3905 /* Take TC into account */
3906 err = mlx5_esw_try_lock(esw);
3907 if (err < 0)
3908 return err;
3909
3910 esw->offloads.num_block_mode++;
3911 mlx5_esw_unlock(esw);
3912 return 0;
3913 }
3914
mlx5_eswitch_unblock_mode(struct mlx5_core_dev * dev)3915 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
3916 {
3917 struct mlx5_eswitch *esw = dev->priv.eswitch;
3918
3919 if (!mlx5_esw_allowed(esw))
3920 return;
3921
3922 down_write(&esw->mode_lock);
3923 esw->offloads.num_block_mode--;
3924 up_write(&esw->mode_lock);
3925 }
3926
3927 /* Returns false only when uplink netdev exists and its netns is different from
3928 * devlink's netns. True for all others so entering switchdev mode is allowed.
3929 */
mlx5_devlink_netdev_netns_immutable_set(struct devlink * devlink,bool immutable)3930 static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink,
3931 bool immutable)
3932 {
3933 struct mlx5_core_dev *mdev = devlink_priv(devlink);
3934 struct net_device *netdev;
3935 bool ret;
3936
3937 netdev = mlx5_uplink_netdev_get(mdev);
3938 if (!netdev)
3939 return true;
3940
3941 rtnl_lock();
3942 netdev->netns_immutable = immutable;
3943 ret = net_eq(dev_net(netdev), devlink_net(devlink));
3944 rtnl_unlock();
3945
3946 mlx5_uplink_netdev_put(mdev, netdev);
3947 return ret;
3948 }
3949
3950 /* Returns true when only changing between active and inactive switchdev mode */
mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch * esw,u16 devlink_mode)3951 static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw,
3952 u16 devlink_mode)
3953 {
3954 /* current mode is not switchdev */
3955 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3956 return false;
3957
3958 /* new mode is not switchdev */
3959 if (devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV &&
3960 devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE)
3961 return false;
3962
3963 /* already inactive: no change in current state */
3964 if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE &&
3965 esw->offloads_inactive)
3966 return false;
3967
3968 /* already active: no change in current state */
3969 if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
3970 !esw->offloads_inactive)
3971 return false;
3972
3973 down_write(&esw->mode_lock);
3974 esw->offloads_inactive = !esw->offloads_inactive;
3975 esw->eswitch_operation_in_progress = true;
3976 up_write(&esw->mode_lock);
3977
3978 if (esw->offloads_inactive)
3979 mlx5_esw_fdb_inactive(esw);
3980 else
3981 mlx5_esw_fdb_active(esw);
3982
3983 down_write(&esw->mode_lock);
3984 esw->eswitch_operation_in_progress = false;
3985 up_write(&esw->mode_lock);
3986 return true;
3987 }
3988
3989 #define MLX5_ESW_HOLD_TIMEOUT_MS 7000
3990 #define MLX5_ESW_HOLD_RETRY_DELAY_MS 500
3991
mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev * dev)3992 void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev)
3993 {
3994 unsigned long timeout;
3995 bool hold_esw = true;
3996
3997 /* Wait for any concurrent eswitch mode transition to complete. */
3998 if (!mlx5_esw_hold(dev)) {
3999 timeout = jiffies + msecs_to_jiffies(MLX5_ESW_HOLD_TIMEOUT_MS);
4000 while (!mlx5_esw_hold(dev)) {
4001 if (!time_before(jiffies, timeout)) {
4002 hold_esw = false;
4003 break;
4004 }
4005 msleep(MLX5_ESW_HOLD_RETRY_DELAY_MS);
4006 }
4007 }
4008 if (hold_esw) {
4009 if (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS)
4010 mlx5_core_reps_aux_devs_remove(dev);
4011 mlx5_esw_release(dev);
4012 }
4013 }
4014
mlx5_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)4015 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
4016 struct netlink_ext_ack *extack)
4017 {
4018 u16 cur_mlx5_mode, mlx5_mode = 0;
4019 struct mlx5_eswitch *esw;
4020 int err = 0;
4021
4022 esw = mlx5_devlink_eswitch_get(devlink);
4023 if (IS_ERR(esw))
4024 return PTR_ERR(esw);
4025
4026 if (mlx5_fw_reset_in_progress(esw->dev)) {
4027 NL_SET_ERR_MSG_MOD(extack, "Can't change eswitch mode during firmware reset");
4028 return -EBUSY;
4029 }
4030
4031 if (esw_mode_from_devlink(mode, &mlx5_mode))
4032 return -EINVAL;
4033
4034 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && mlx5_get_sd(esw->dev)) {
4035 NL_SET_ERR_MSG_MOD(extack,
4036 "Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured.");
4037 return -EPERM;
4038 }
4039
4040 /* Avoid try_lock, active/inactive mode change is not restricted */
4041 if (mlx5_devlink_switchdev_active_mode_change(esw, mode))
4042 return 0;
4043
4044 mlx5_lag_disable_change(esw->dev);
4045 err = mlx5_esw_try_lock(esw);
4046 if (err < 0) {
4047 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
4048 goto enable_lag;
4049 }
4050 cur_mlx5_mode = err;
4051 err = 0;
4052
4053 if (cur_mlx5_mode == mlx5_mode)
4054 goto unlock;
4055
4056 if (esw->offloads.num_block_mode) {
4057 NL_SET_ERR_MSG_MOD(extack,
4058 "Can't change eswitch mode when IPsec SA and/or policies are configured");
4059 err = -EOPNOTSUPP;
4060 goto unlock;
4061 }
4062
4063 esw->eswitch_operation_in_progress = true;
4064 up_write(&esw->mode_lock);
4065
4066 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS &&
4067 !mlx5_devlink_netdev_netns_immutable_set(devlink, true)) {
4068 NL_SET_ERR_MSG_MOD(extack,
4069 "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
4070 err = -EINVAL;
4071 goto skip;
4072 }
4073
4074 if (mlx5_mode == MLX5_ESWITCH_LEGACY)
4075 esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
4076 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS)
4077 esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_SWITCH_LEGACY;
4078 mlx5_eswitch_disable_locked(esw);
4079 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) {
4080 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
4081 NL_SET_ERR_MSG_MOD(extack,
4082 "Can't change mode while devlink traps are active");
4083 err = -EOPNOTSUPP;
4084 goto skip;
4085 }
4086 esw->offloads_inactive =
4087 (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE);
4088 err = esw_offloads_start(esw, extack);
4089 } else if (mlx5_mode == MLX5_ESWITCH_LEGACY) {
4090 err = esw_offloads_stop(esw, extack);
4091 } else {
4092 err = -EINVAL;
4093 }
4094
4095 skip:
4096 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && err)
4097 mlx5_devlink_netdev_netns_immutable_set(devlink, false);
4098 down_write(&esw->mode_lock);
4099 esw->eswitch_operation_in_progress = false;
4100 unlock:
4101 mlx5_esw_unlock(esw);
4102 enable_lag:
4103 mlx5_lag_enable_change(esw->dev);
4104 return err;
4105 }
4106
mlx5_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)4107 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
4108 {
4109 struct mlx5_eswitch *esw;
4110
4111 esw = mlx5_devlink_eswitch_get(devlink);
4112 if (IS_ERR(esw))
4113 return PTR_ERR(esw);
4114
4115 return esw_mode_to_devlink(esw, mode);
4116 }
4117
mlx5_esw_vports_inline_set(struct mlx5_eswitch * esw,u8 mlx5_mode,struct netlink_ext_ack * extack)4118 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
4119 struct netlink_ext_ack *extack)
4120 {
4121 struct mlx5_core_dev *dev = esw->dev;
4122 struct mlx5_vport *vport;
4123 u16 err_vport_num = 0;
4124 unsigned long i;
4125 int err = 0;
4126
4127 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
4128 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
4129 if (err) {
4130 err_vport_num = vport->vport;
4131 NL_SET_ERR_MSG_MOD(extack,
4132 "Failed to set min inline on vport");
4133 goto revert_inline_mode;
4134 }
4135 }
4136 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
4137 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
4138 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
4139 if (err) {
4140 err_vport_num = vport->vport;
4141 NL_SET_ERR_MSG_MOD(extack,
4142 "Failed to set min inline on vport");
4143 goto revert_ec_vf_inline_mode;
4144 }
4145 }
4146 }
4147 return 0;
4148
4149 revert_ec_vf_inline_mode:
4150 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
4151 if (vport->vport == err_vport_num)
4152 break;
4153 mlx5_modify_nic_vport_min_inline(dev,
4154 vport->vport,
4155 esw->offloads.inline_mode);
4156 }
4157 revert_inline_mode:
4158 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
4159 if (vport->vport == err_vport_num)
4160 break;
4161 mlx5_modify_nic_vport_min_inline(dev,
4162 vport->vport,
4163 esw->offloads.inline_mode);
4164 }
4165 return err;
4166 }
4167
mlx5_devlink_eswitch_inline_mode_set(struct devlink * devlink,u8 mode,struct netlink_ext_ack * extack)4168 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
4169 struct netlink_ext_ack *extack)
4170 {
4171 struct mlx5_core_dev *dev = devlink_priv(devlink);
4172 struct mlx5_eswitch *esw;
4173 u8 mlx5_mode;
4174 int err;
4175
4176 esw = mlx5_devlink_eswitch_get(devlink);
4177 if (IS_ERR(esw))
4178 return PTR_ERR(esw);
4179
4180 down_write(&esw->mode_lock);
4181
4182 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
4183 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
4184 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
4185 err = 0;
4186 goto out;
4187 }
4188
4189 fallthrough;
4190 case MLX5_CAP_INLINE_MODE_L2:
4191 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
4192 err = -EOPNOTSUPP;
4193 goto out;
4194 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
4195 break;
4196 }
4197
4198 if (atomic64_read(&esw->offloads.num_flows) > 0) {
4199 NL_SET_ERR_MSG_MOD(extack,
4200 "Can't set inline mode when flows are configured");
4201 err = -EOPNOTSUPP;
4202 goto out;
4203 }
4204
4205 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
4206 if (err)
4207 goto out;
4208
4209 esw->eswitch_operation_in_progress = true;
4210 up_write(&esw->mode_lock);
4211
4212 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
4213 if (!err)
4214 esw->offloads.inline_mode = mlx5_mode;
4215
4216 down_write(&esw->mode_lock);
4217 esw->eswitch_operation_in_progress = false;
4218 up_write(&esw->mode_lock);
4219 return 0;
4220
4221 out:
4222 up_write(&esw->mode_lock);
4223 return err;
4224 }
4225
mlx5_devlink_eswitch_inline_mode_get(struct devlink * devlink,u8 * mode)4226 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
4227 {
4228 struct mlx5_eswitch *esw;
4229
4230 esw = mlx5_devlink_eswitch_get(devlink);
4231 if (IS_ERR(esw))
4232 return PTR_ERR(esw);
4233
4234 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
4235 }
4236
mlx5_eswitch_block_encap(struct mlx5_core_dev * dev,bool from_fdb)4237 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
4238 {
4239 struct mlx5_eswitch *esw = dev->priv.eswitch;
4240 enum devlink_eswitch_encap_mode encap;
4241 bool allow_tunnel = false;
4242
4243 if (!mlx5_esw_allowed(esw))
4244 return true;
4245
4246 down_write(&esw->mode_lock);
4247 encap = esw->offloads.encap;
4248 if (esw->mode == MLX5_ESWITCH_LEGACY ||
4249 (encap == DEVLINK_ESWITCH_ENCAP_MODE_NONE && !from_fdb)) {
4250 allow_tunnel = true;
4251 esw->offloads.num_block_encap++;
4252 }
4253 up_write(&esw->mode_lock);
4254
4255 return allow_tunnel;
4256 }
4257
mlx5_eswitch_unblock_encap(struct mlx5_core_dev * dev)4258 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
4259 {
4260 struct mlx5_eswitch *esw = dev->priv.eswitch;
4261
4262 if (!mlx5_esw_allowed(esw))
4263 return;
4264
4265 down_write(&esw->mode_lock);
4266 esw->offloads.num_block_encap--;
4267 up_write(&esw->mode_lock);
4268 }
4269
mlx5_devlink_eswitch_encap_mode_set(struct devlink * devlink,enum devlink_eswitch_encap_mode encap,struct netlink_ext_ack * extack)4270 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
4271 enum devlink_eswitch_encap_mode encap,
4272 struct netlink_ext_ack *extack)
4273 {
4274 struct mlx5_core_dev *dev = devlink_priv(devlink);
4275 struct mlx5_eswitch *esw;
4276 int err = 0;
4277
4278 esw = mlx5_devlink_eswitch_get(devlink);
4279 if (IS_ERR(esw))
4280 return PTR_ERR(esw);
4281
4282 down_write(&esw->mode_lock);
4283
4284 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
4285 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
4286 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
4287 err = -EOPNOTSUPP;
4288 goto unlock;
4289 }
4290
4291 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
4292 err = -EOPNOTSUPP;
4293 goto unlock;
4294 }
4295
4296 if (esw->mode == MLX5_ESWITCH_LEGACY) {
4297 esw->offloads.encap = encap;
4298 goto unlock;
4299 }
4300
4301 if (esw->offloads.encap == encap)
4302 goto unlock;
4303
4304 if (atomic64_read(&esw->offloads.num_flows) > 0) {
4305 NL_SET_ERR_MSG_MOD(extack,
4306 "Can't set encapsulation when flows are configured");
4307 err = -EOPNOTSUPP;
4308 goto unlock;
4309 }
4310
4311 if (esw->offloads.num_block_encap) {
4312 NL_SET_ERR_MSG_MOD(extack,
4313 "Can't set encapsulation when IPsec SA and/or policies are configured");
4314 err = -EOPNOTSUPP;
4315 goto unlock;
4316 }
4317
4318 esw->eswitch_operation_in_progress = true;
4319 up_write(&esw->mode_lock);
4320
4321 esw_destroy_offloads_fdb_tables(esw);
4322
4323 esw->offloads.encap = encap;
4324
4325 err = esw_create_offloads_fdb_tables(esw);
4326
4327 if (err) {
4328 NL_SET_ERR_MSG_MOD(extack,
4329 "Failed re-creating fast FDB table");
4330 esw->offloads.encap = !encap;
4331 (void)esw_create_offloads_fdb_tables(esw);
4332 }
4333
4334 down_write(&esw->mode_lock);
4335 esw->eswitch_operation_in_progress = false;
4336
4337 unlock:
4338 up_write(&esw->mode_lock);
4339 return err;
4340 }
4341
mlx5_devlink_eswitch_encap_mode_get(struct devlink * devlink,enum devlink_eswitch_encap_mode * encap)4342 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
4343 enum devlink_eswitch_encap_mode *encap)
4344 {
4345 struct mlx5_eswitch *esw;
4346
4347 esw = mlx5_devlink_eswitch_get(devlink);
4348 if (IS_ERR(esw))
4349 return PTR_ERR(esw);
4350
4351 *encap = esw->offloads.encap;
4352 return 0;
4353 }
4354
4355 static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch * esw,u16 vport_num)4356 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
4357 {
4358 /* Currently, only ECPF based device has representor for host PF. */
4359 if (vport_num == MLX5_VPORT_PF &&
4360 (!mlx5_core_is_ecpf_esw_manager(esw->dev) ||
4361 !mlx5_esw_host_functions_enabled(esw->dev)))
4362 return false;
4363
4364 if (vport_num == MLX5_VPORT_ECPF &&
4365 !mlx5_ecpf_vport_exists(esw->dev))
4366 return false;
4367
4368 return true;
4369 }
4370
mlx5_eswitch_register_vport_reps(struct mlx5_eswitch * esw,const struct mlx5_eswitch_rep_ops * ops,u8 rep_type)4371 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
4372 const struct mlx5_eswitch_rep_ops *ops,
4373 u8 rep_type)
4374 {
4375 struct mlx5_eswitch_rep_data *rep_data;
4376 struct mlx5_eswitch_rep *rep;
4377 unsigned long i;
4378
4379 esw->offloads.rep_ops[rep_type] = ops;
4380 mlx5_esw_for_each_rep(esw, i, rep) {
4381 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
4382 rep->esw = esw;
4383 rep_data = &rep->rep_data[rep_type];
4384 atomic_set(&rep_data->state, REP_REGISTERED);
4385 }
4386 }
4387 }
4388 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
4389
mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch * esw,u8 rep_type)4390 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
4391 {
4392 struct mlx5_eswitch_rep *rep;
4393 unsigned long i;
4394
4395 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
4396 __unload_reps_all_vport(esw, rep_type);
4397
4398 mlx5_esw_for_each_rep(esw, i, rep)
4399 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
4400 }
4401 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
4402
mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch * esw,u8 rep_type)4403 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
4404 {
4405 struct mlx5_eswitch_rep *rep;
4406
4407 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
4408 return rep->rep_data[rep_type].priv;
4409 }
4410
mlx5_eswitch_get_proto_dev(struct mlx5_eswitch * esw,u16 vport,u8 rep_type)4411 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
4412 u16 vport,
4413 u8 rep_type)
4414 {
4415 struct mlx5_eswitch_rep *rep;
4416
4417 rep = mlx5_eswitch_get_rep(esw, vport);
4418
4419 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
4420 esw->offloads.rep_ops[rep_type]->get_proto_dev)
4421 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
4422 return NULL;
4423 }
4424 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
4425
mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch * esw,u8 rep_type)4426 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
4427 {
4428 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
4429 }
4430 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
4431
mlx5_eswitch_vport_rep(struct mlx5_eswitch * esw,u16 vport)4432 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
4433 u16 vport)
4434 {
4435 return mlx5_eswitch_get_rep(esw, vport);
4436 }
4437 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
4438
mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch * esw)4439 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
4440 {
4441 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
4442 }
4443 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
4444
mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch * esw)4445 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
4446 {
4447 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
4448 }
4449 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
4450
mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch * esw,u16 vport_num)4451 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
4452 u16 vport_num)
4453 {
4454 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
4455
4456 if (WARN_ON_ONCE(IS_ERR(vport)))
4457 return 0;
4458
4459 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
4460 }
4461 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
4462
mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch * esw,struct mlx5_vport * vport)4463 int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
4464 struct mlx5_vport *vport)
4465 {
4466 u16 *old_entry, *vhca_map_entry, vhca_id;
4467
4468 if (WARN_ONCE(MLX5_VPORT_INVAL_VHCA_ID(vport),
4469 "vport %d vhca_id is not set", vport->vport)) {
4470 int err;
4471
4472 err = mlx5_vport_get_vhca_id(vport->dev, vport->vport,
4473 &vhca_id);
4474 if (err)
4475 return err;
4476 vport->vhca_id = vhca_id;
4477 }
4478
4479 vhca_id = vport->vhca_id;
4480 vhca_map_entry = kmalloc_obj(*vhca_map_entry);
4481 if (!vhca_map_entry)
4482 return -ENOMEM;
4483
4484 *vhca_map_entry = vport->vport;
4485 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
4486 if (xa_is_err(old_entry)) {
4487 kfree(vhca_map_entry);
4488 return xa_err(old_entry);
4489 }
4490 kfree(old_entry);
4491 return 0;
4492 }
4493
mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch * esw,struct mlx5_vport * vport)4494 void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
4495 struct mlx5_vport *vport)
4496 {
4497 u16 *vhca_map_entry;
4498
4499 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vport->vhca_id);
4500 kfree(vhca_map_entry);
4501 }
4502
mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch * esw,u16 vhca_id,u16 * vport_num)4503 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
4504 {
4505 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
4506
4507 if (!res)
4508 return -ENOENT;
4509
4510 *vport_num = *res;
4511 return 0;
4512 }
4513
mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch * esw,u16 vport_num)4514 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
4515 u16 vport_num)
4516 {
4517 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
4518
4519 if (WARN_ON_ONCE(IS_ERR(vport)))
4520 return 0;
4521
4522 return vport->metadata;
4523 }
4524 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
4525
mlx5_devlink_port_fn_hw_addr_get(struct devlink_port * port,u8 * hw_addr,int * hw_addr_len,struct netlink_ext_ack * extack)4526 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
4527 u8 *hw_addr, int *hw_addr_len,
4528 struct netlink_ext_ack *extack)
4529 {
4530 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4531 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4532
4533 mutex_lock(&esw->state_lock);
4534
4535 mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, true,
4536 vport->info.mac);
4537 ether_addr_copy(hw_addr, vport->info.mac);
4538 *hw_addr_len = ETH_ALEN;
4539 mutex_unlock(&esw->state_lock);
4540 return 0;
4541 }
4542
mlx5_devlink_port_fn_hw_addr_set(struct devlink_port * port,const u8 * hw_addr,int hw_addr_len,struct netlink_ext_ack * extack)4543 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
4544 const u8 *hw_addr, int hw_addr_len,
4545 struct netlink_ext_ack *extack)
4546 {
4547 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4548 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4549
4550 return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr);
4551 }
4552
mlx5_devlink_port_fn_migratable_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4553 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
4554 struct netlink_ext_ack *extack)
4555 {
4556 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4557 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4558
4559 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4560 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4561 return -EOPNOTSUPP;
4562 }
4563
4564 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4565 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4566 return -EOPNOTSUPP;
4567 }
4568
4569 mutex_lock(&esw->state_lock);
4570 *is_enabled = vport->info.mig_enabled;
4571 mutex_unlock(&esw->state_lock);
4572 return 0;
4573 }
4574
mlx5_devlink_port_fn_migratable_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4575 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
4576 struct netlink_ext_ack *extack)
4577 {
4578 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4579 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4580 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4581 void *query_ctx;
4582 void *hca_caps;
4583 int err;
4584
4585 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4586 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4587 return -EOPNOTSUPP;
4588 }
4589
4590 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4591 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4592 return -EOPNOTSUPP;
4593 }
4594
4595 mutex_lock(&esw->state_lock);
4596
4597 if (vport->info.mig_enabled == enable) {
4598 err = 0;
4599 goto out;
4600 }
4601
4602 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4603 if (!query_ctx) {
4604 err = -ENOMEM;
4605 goto out;
4606 }
4607
4608 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4609 MLX5_CAP_GENERAL_2);
4610 if (err) {
4611 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4612 goto out_free;
4613 }
4614
4615 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4616 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
4617
4618 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4619 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
4620 if (err) {
4621 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
4622 goto out_free;
4623 }
4624
4625 vport->info.mig_enabled = enable;
4626
4627 out_free:
4628 kfree(query_ctx);
4629 out:
4630 mutex_unlock(&esw->state_lock);
4631 return err;
4632 }
4633
mlx5_devlink_port_fn_roce_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4634 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
4635 struct netlink_ext_ack *extack)
4636 {
4637 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4638 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4639
4640 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4641 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4642 return -EOPNOTSUPP;
4643 }
4644
4645 mutex_lock(&esw->state_lock);
4646 *is_enabled = vport->info.roce_enabled;
4647 mutex_unlock(&esw->state_lock);
4648 return 0;
4649 }
4650
mlx5_devlink_port_fn_roce_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4651 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
4652 struct netlink_ext_ack *extack)
4653 {
4654 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4655 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4656 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4657 u16 vport_num = vport->vport;
4658 void *query_ctx;
4659 void *hca_caps;
4660 int err;
4661
4662 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4663 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4664 return -EOPNOTSUPP;
4665 }
4666
4667 mutex_lock(&esw->state_lock);
4668
4669 if (vport->info.roce_enabled == enable) {
4670 err = 0;
4671 goto out;
4672 }
4673
4674 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4675 if (!query_ctx) {
4676 err = -ENOMEM;
4677 goto out;
4678 }
4679
4680 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4681 MLX5_CAP_GENERAL);
4682 if (err) {
4683 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4684 goto out_free;
4685 }
4686
4687 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4688 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
4689
4690 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
4691 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
4692 if (err) {
4693 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
4694 goto out_free;
4695 }
4696
4697 vport->info.roce_enabled = enable;
4698
4699 out_free:
4700 kfree(query_ctx);
4701 out:
4702 mutex_unlock(&esw->state_lock);
4703 return err;
4704 }
4705
mlx5_devlink_pf_port_fn_state_get(struct devlink_port * port,enum devlink_port_fn_state * state,enum devlink_port_fn_opstate * opstate,struct netlink_ext_ack * extack)4706 int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
4707 enum devlink_port_fn_state *state,
4708 enum devlink_port_fn_opstate *opstate,
4709 struct netlink_ext_ack *extack)
4710 {
4711 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4712 const u32 *query_out;
4713 bool pf_disabled;
4714
4715 if (vport->vport != MLX5_VPORT_PF) {
4716 NL_SET_ERR_MSG_MOD(extack, "State get is not supported for VF");
4717 return -EOPNOTSUPP;
4718 }
4719
4720 *state = vport->pf_activated ?
4721 DEVLINK_PORT_FN_STATE_ACTIVE : DEVLINK_PORT_FN_STATE_INACTIVE;
4722
4723 query_out = mlx5_esw_query_functions(vport->dev);
4724 if (IS_ERR(query_out))
4725 return PTR_ERR(query_out);
4726
4727 pf_disabled = MLX5_GET(query_esw_functions_out, query_out,
4728 host_params_context.host_pf_disabled);
4729
4730 *opstate = pf_disabled ? DEVLINK_PORT_FN_OPSTATE_DETACHED :
4731 DEVLINK_PORT_FN_OPSTATE_ATTACHED;
4732
4733 kvfree(query_out);
4734 return 0;
4735 }
4736
mlx5_devlink_pf_port_fn_state_set(struct devlink_port * port,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)4737 int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port,
4738 enum devlink_port_fn_state state,
4739 struct netlink_ext_ack *extack)
4740 {
4741 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4742 struct mlx5_core_dev *dev;
4743
4744 if (vport->vport != MLX5_VPORT_PF) {
4745 NL_SET_ERR_MSG_MOD(extack, "State set is not supported for VF");
4746 return -EOPNOTSUPP;
4747 }
4748
4749 dev = vport->dev;
4750
4751 switch (state) {
4752 case DEVLINK_PORT_FN_STATE_ACTIVE:
4753 return mlx5_esw_host_pf_enable_hca(dev);
4754 case DEVLINK_PORT_FN_STATE_INACTIVE:
4755 return mlx5_esw_host_pf_disable_hca(dev);
4756 default:
4757 return -EOPNOTSUPP;
4758 }
4759 }
4760
4761 int
mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_esw_flow_attr * esw_attr,int attr_idx)4762 mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
4763 struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
4764 {
4765 struct mlx5_flow_destination new_dest = {};
4766 struct mlx5_flow_destination old_dest = {};
4767
4768 if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
4769 return 0;
4770
4771 esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
4772 esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
4773
4774 return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
4775 }
4776
4777 #ifdef CONFIG_XFRM_OFFLOAD
mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4778 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
4779 struct netlink_ext_ack *extack)
4780 {
4781 struct mlx5_eswitch *esw;
4782 struct mlx5_vport *vport;
4783 int err = 0;
4784
4785 esw = mlx5_devlink_eswitch_get(port->devlink);
4786 if (IS_ERR(esw))
4787 return PTR_ERR(esw);
4788
4789 if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
4790 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
4791 return -EOPNOTSUPP;
4792 }
4793
4794 vport = mlx5_devlink_port_vport_get(port);
4795
4796 mutex_lock(&esw->state_lock);
4797 if (!vport->enabled) {
4798 err = -EOPNOTSUPP;
4799 goto unlock;
4800 }
4801
4802 *is_enabled = vport->info.ipsec_crypto_enabled;
4803 unlock:
4804 mutex_unlock(&esw->state_lock);
4805 return err;
4806 }
4807
mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4808 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
4809 struct netlink_ext_ack *extack)
4810 {
4811 struct mlx5_eswitch *esw;
4812 struct mlx5_vport *vport;
4813 u16 vport_num;
4814 int err;
4815
4816 esw = mlx5_devlink_eswitch_get(port->devlink);
4817 if (IS_ERR(esw))
4818 return PTR_ERR(esw);
4819
4820 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4821 err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
4822 if (err) {
4823 NL_SET_ERR_MSG_MOD(extack,
4824 "Device doesn't support IPsec crypto");
4825 return err;
4826 }
4827
4828 vport = mlx5_devlink_port_vport_get(port);
4829
4830 mutex_lock(&esw->state_lock);
4831 if (!vport->enabled) {
4832 err = -EOPNOTSUPP;
4833 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4834 goto unlock;
4835 }
4836
4837 if (vport->info.ipsec_crypto_enabled == enable)
4838 goto unlock;
4839
4840 if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
4841 err = -EBUSY;
4842 goto unlock;
4843 }
4844
4845 err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
4846 if (err) {
4847 NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
4848 goto unlock;
4849 }
4850
4851 vport->info.ipsec_crypto_enabled = enable;
4852 if (enable)
4853 esw->enabled_ipsec_vf_count++;
4854 else
4855 esw->enabled_ipsec_vf_count--;
4856 unlock:
4857 mutex_unlock(&esw->state_lock);
4858 return err;
4859 }
4860
mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4861 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
4862 struct netlink_ext_ack *extack)
4863 {
4864 struct mlx5_eswitch *esw;
4865 struct mlx5_vport *vport;
4866 int err = 0;
4867
4868 esw = mlx5_devlink_eswitch_get(port->devlink);
4869 if (IS_ERR(esw))
4870 return PTR_ERR(esw);
4871
4872 if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
4873 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
4874 return -EOPNOTSUPP;
4875 }
4876
4877 vport = mlx5_devlink_port_vport_get(port);
4878
4879 mutex_lock(&esw->state_lock);
4880 if (!vport->enabled) {
4881 err = -EOPNOTSUPP;
4882 goto unlock;
4883 }
4884
4885 *is_enabled = vport->info.ipsec_packet_enabled;
4886 unlock:
4887 mutex_unlock(&esw->state_lock);
4888 return err;
4889 }
4890
mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4891 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
4892 bool enable,
4893 struct netlink_ext_ack *extack)
4894 {
4895 struct mlx5_eswitch *esw;
4896 struct mlx5_vport *vport;
4897 u16 vport_num;
4898 int err;
4899
4900 esw = mlx5_devlink_eswitch_get(port->devlink);
4901 if (IS_ERR(esw))
4902 return PTR_ERR(esw);
4903
4904 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4905 err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
4906 if (err) {
4907 NL_SET_ERR_MSG_MOD(extack,
4908 "Device doesn't support IPsec packet mode");
4909 return err;
4910 }
4911
4912 vport = mlx5_devlink_port_vport_get(port);
4913 mutex_lock(&esw->state_lock);
4914 if (!vport->enabled) {
4915 err = -EOPNOTSUPP;
4916 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4917 goto unlock;
4918 }
4919
4920 if (vport->info.ipsec_packet_enabled == enable)
4921 goto unlock;
4922
4923 if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
4924 err = -EBUSY;
4925 goto unlock;
4926 }
4927
4928 err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
4929 if (err) {
4930 NL_SET_ERR_MSG_MOD(extack,
4931 "Failed to set IPsec packet mode");
4932 goto unlock;
4933 }
4934
4935 vport->info.ipsec_packet_enabled = enable;
4936 if (enable)
4937 esw->enabled_ipsec_vf_count++;
4938 else
4939 esw->enabled_ipsec_vf_count--;
4940 unlock:
4941 mutex_unlock(&esw->state_lock);
4942 return err;
4943 }
4944 #endif /* CONFIG_XFRM_OFFLOAD */
4945
4946 int
mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port * port,u32 * max_io_eqs,struct netlink_ext_ack * extack)4947 mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
4948 struct netlink_ext_ack *extack)
4949 {
4950 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4951 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4952 u16 vport_num = vport->vport;
4953 struct mlx5_eswitch *esw;
4954 void *query_ctx;
4955 void *hca_caps;
4956 u32 max_eqs;
4957 int err;
4958
4959 esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4960 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4961 NL_SET_ERR_MSG_MOD(extack,
4962 "Device doesn't support VHCA management");
4963 return -EOPNOTSUPP;
4964 }
4965
4966 if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
4967 NL_SET_ERR_MSG_MOD(extack,
4968 "Device doesn't support getting the max number of EQs");
4969 return -EOPNOTSUPP;
4970 }
4971
4972 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4973 if (!query_ctx)
4974 return -ENOMEM;
4975
4976 mutex_lock(&esw->state_lock);
4977 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4978 MLX5_CAP_GENERAL_2);
4979 if (err) {
4980 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4981 goto out;
4982 }
4983
4984 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4985 max_eqs = MLX5_GET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b);
4986 if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
4987 *max_io_eqs = 0;
4988 else
4989 *max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS;
4990 out:
4991 mutex_unlock(&esw->state_lock);
4992 kfree(query_ctx);
4993 return err;
4994 }
4995
4996 int
mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port * port,u32 max_io_eqs,struct netlink_ext_ack * extack)4997 mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
4998 struct netlink_ext_ack *extack)
4999 {
5000 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
5001 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
5002 u16 vport_num = vport->vport;
5003 struct mlx5_eswitch *esw;
5004 void *query_ctx;
5005 void *hca_caps;
5006 u16 max_eqs;
5007 int err;
5008
5009 esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
5010 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
5011 NL_SET_ERR_MSG_MOD(extack,
5012 "Device doesn't support VHCA management");
5013 return -EOPNOTSUPP;
5014 }
5015
5016 if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
5017 NL_SET_ERR_MSG_MOD(extack,
5018 "Device doesn't support changing the max number of EQs");
5019 return -EOPNOTSUPP;
5020 }
5021
5022 if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) {
5023 NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
5024 return -EINVAL;
5025 }
5026
5027 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
5028 if (!query_ctx)
5029 return -ENOMEM;
5030
5031 mutex_lock(&esw->state_lock);
5032 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
5033 MLX5_CAP_GENERAL_2);
5034 if (err) {
5035 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
5036 goto out;
5037 }
5038
5039 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
5040 MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs);
5041
5042 if (mlx5_esw_is_sf_vport(esw, vport_num))
5043 MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1);
5044
5045 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
5046 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
5047 if (err)
5048 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
5049 vport->max_eqs_set = true;
5050 out:
5051 mutex_unlock(&esw->state_lock);
5052 kfree(query_ctx);
5053 return err;
5054 }
5055
5056 int
mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port * port,struct netlink_ext_ack * extack)5057 mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
5058 struct netlink_ext_ack *extack)
5059 {
5060 return mlx5_devlink_port_fn_max_io_eqs_set(port,
5061 MLX5_ESW_DEFAULT_SF_COMP_EQS,
5062 extack);
5063 }
5064