xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c (revision 033771c085c2ed73cb29dd25e1ec8c4b2991cad9)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "eswitch.h"
6 #include "helper.h"
7 #include "ofld.h"
8 
9 static int
10 acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
11 
12 static bool
esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch * esw,const struct mlx5_vport * vport)13 esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
14 				 const struct mlx5_vport *vport)
15 {
16 	return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
17 		mlx5_eswitch_is_vf_vport(esw, vport->vport));
18 }
19 
esw_acl_ingress_prio_tag_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)20 static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw,
21 					   struct mlx5_vport *vport)
22 {
23 	struct mlx5_flow_act flow_act = {};
24 	struct mlx5_flow_spec *spec;
25 	int err = 0;
26 
27 	/* For prio tag mode, there is only 1 FTEs:
28 	 * 1) Untagged packets - push prio tag VLAN and modify metadata if
29 	 * required, allow
30 	 * Unmatched traffic is allowed by default
31 	 */
32 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
33 	if (!spec)
34 		return -ENOMEM;
35 
36 	/* Untagged packets - push prio tag VLAN, allow */
37 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
38 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
39 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
40 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
41 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW;
42 	flow_act.vlan[0].ethtype = ETH_P_8021Q;
43 	flow_act.vlan[0].vid = 0;
44 	flow_act.vlan[0].prio = 0;
45 
46 	if (vport->ingress.offloads.modify_metadata_rule) {
47 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
48 		flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
49 	}
50 
51 	vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
52 							&flow_act, NULL, 0);
53 	if (IS_ERR(vport->ingress.allow_rule)) {
54 		err = PTR_ERR(vport->ingress.allow_rule);
55 		esw_warn(esw->dev,
56 			 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
57 			 vport->vport, err);
58 		vport->ingress.allow_rule = NULL;
59 	}
60 
61 	kvfree(spec);
62 	return err;
63 }
64 
esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)65 static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
66 					       struct mlx5_vport *vport)
67 {
68 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
69 	struct mlx5_flow_act flow_act = {};
70 	int err = 0;
71 	u32 key;
72 
73 	key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
74 	key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
75 
76 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
77 	MLX5_SET(set_action_in, action, field,
78 		 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
79 	MLX5_SET(set_action_in, action, data, key);
80 	MLX5_SET(set_action_in, action, offset,
81 		 ESW_SOURCE_PORT_METADATA_OFFSET);
82 	MLX5_SET(set_action_in, action, length,
83 		 ESW_SOURCE_PORT_METADATA_BITS);
84 
85 	vport->ingress.offloads.modify_metadata =
86 		mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
87 					 1, action);
88 	if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
89 		err = PTR_ERR(vport->ingress.offloads.modify_metadata);
90 		esw_warn(esw->dev,
91 			 "failed to alloc modify header for vport %d ingress acl (%d)\n",
92 			 vport->vport, err);
93 		return err;
94 	}
95 
96 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
97 	flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
98 	flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
99 	vport->ingress.offloads.modify_metadata_rule =
100 				mlx5_add_flow_rules(vport->ingress.acl,
101 						    NULL, &flow_act, NULL, 0);
102 	if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
103 		err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
104 		esw_warn(esw->dev,
105 			 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
106 			 vport->vport, err);
107 		mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
108 		vport->ingress.offloads.modify_metadata_rule = NULL;
109 	}
110 	return err;
111 }
112 
esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)113 static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
114 						 struct mlx5_vport *vport)
115 {
116 	if (!vport->ingress.offloads.modify_metadata_rule)
117 		return;
118 
119 	mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
120 	mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
121 	vport->ingress.offloads.modify_metadata_rule = NULL;
122 }
123 
esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)124 static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
125 						struct mlx5_vport *vport)
126 {
127 	struct mlx5_flow_act flow_act = {};
128 	struct mlx5_flow_handle *flow_rule;
129 	bool created = false;
130 	int err = 0;
131 
132 	if (!vport->ingress.acl) {
133 		err = acl_ingress_ofld_setup(esw, vport);
134 		if (err)
135 			return err;
136 		created = true;
137 	}
138 
139 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
140 	flow_act.fg = vport->ingress.offloads.drop_grp;
141 	flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
142 	if (IS_ERR(flow_rule)) {
143 		err = PTR_ERR(flow_rule);
144 		goto err_out;
145 	}
146 
147 	vport->ingress.offloads.drop_rule = flow_rule;
148 
149 	return 0;
150 err_out:
151 	/* Only destroy ingress acl created in this function. */
152 	if (created)
153 		esw_acl_ingress_ofld_cleanup(esw, vport);
154 	return err;
155 }
156 
esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)157 static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
158 						  struct mlx5_vport *vport)
159 {
160 	if (!vport->ingress.offloads.drop_rule)
161 		return;
162 
163 	mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
164 	vport->ingress.offloads.drop_rule = NULL;
165 }
166 
esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)167 static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
168 					     struct mlx5_vport *vport)
169 {
170 	int err;
171 
172 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
173 		err = esw_acl_ingress_mod_metadata_create(esw, vport);
174 		if (err) {
175 			esw_warn(esw->dev,
176 				 "vport(%d) create ingress modify metadata, err(%d)\n",
177 				 vport->vport, err);
178 			return err;
179 		}
180 	}
181 
182 	if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
183 		err = esw_acl_ingress_prio_tag_create(esw, vport);
184 		if (err) {
185 			esw_warn(esw->dev,
186 				 "vport(%d) create ingress prio tag rule, err(%d)\n",
187 				 vport->vport, err);
188 			goto prio_tag_err;
189 		}
190 	}
191 
192 	return 0;
193 
194 prio_tag_err:
195 	esw_acl_ingress_mod_metadata_destroy(esw, vport);
196 	return err;
197 }
198 
esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)199 static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
200 					       struct mlx5_vport *vport)
201 {
202 	esw_acl_ingress_allow_rule_destroy(vport);
203 	esw_acl_ingress_mod_metadata_destroy(esw, vport);
204 	esw_acl_ingress_src_port_drop_destroy(esw, vport);
205 }
206 
esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)207 static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
208 					      struct mlx5_vport *vport)
209 {
210 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
211 	struct mlx5_flow_group *g;
212 	void *match_criteria;
213 	u32 *flow_group_in;
214 	u32 flow_index = 0;
215 	int ret = 0;
216 
217 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
218 	if (!flow_group_in)
219 		return -ENOMEM;
220 
221 	if (vport->vport == MLX5_VPORT_UPLINK) {
222 		/* This group can hold an FTE to drop all traffic.
223 		 * Need in case LAG is enabled.
224 		 */
225 		MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
226 		MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
227 
228 		g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
229 		if (IS_ERR(g)) {
230 			ret = PTR_ERR(g);
231 			esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
232 				 vport->vport, ret);
233 			goto drop_err;
234 		}
235 		vport->ingress.offloads.drop_grp = g;
236 		flow_index++;
237 	}
238 
239 	if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
240 		/* This group is to hold FTE to match untagged packets when prio_tag
241 		 * is enabled.
242 		 */
243 		memset(flow_group_in, 0, inlen);
244 		match_criteria = MLX5_ADDR_OF(create_flow_group_in,
245 					      flow_group_in, match_criteria);
246 		MLX5_SET(create_flow_group_in, flow_group_in,
247 			 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
248 		MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
249 		MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
250 		MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
251 
252 		g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
253 		if (IS_ERR(g)) {
254 			ret = PTR_ERR(g);
255 			esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
256 				 vport->vport, ret);
257 			goto prio_tag_err;
258 		}
259 		vport->ingress.offloads.metadata_prio_tag_grp = g;
260 		flow_index++;
261 	}
262 
263 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
264 		/* This group holds an FTE with no match to add metadata for
265 		 * tagged packets if prio-tag is enabled, or for all untagged
266 		 * traffic in case prio-tag is disabled.
267 		 */
268 		memset(flow_group_in, 0, inlen);
269 		MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
270 		MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
271 
272 		g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
273 		if (IS_ERR(g)) {
274 			ret = PTR_ERR(g);
275 			esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
276 				 vport->vport, ret);
277 			goto metadata_err;
278 		}
279 		vport->ingress.offloads.metadata_allmatch_grp = g;
280 	}
281 
282 	kvfree(flow_group_in);
283 	return 0;
284 
285 metadata_err:
286 	if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
287 		mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
288 		vport->ingress.offloads.metadata_prio_tag_grp = NULL;
289 	}
290 prio_tag_err:
291 	if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
292 		mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
293 		vport->ingress.offloads.drop_grp = NULL;
294 	}
295 drop_err:
296 	kvfree(flow_group_in);
297 	return ret;
298 }
299 
esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport * vport)300 static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
301 {
302 	if (vport->ingress.offloads.metadata_allmatch_grp) {
303 		mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
304 		vport->ingress.offloads.metadata_allmatch_grp = NULL;
305 	}
306 
307 	if (vport->ingress.offloads.metadata_prio_tag_grp) {
308 		mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
309 		vport->ingress.offloads.metadata_prio_tag_grp = NULL;
310 	}
311 
312 	if (vport->ingress.offloads.drop_grp) {
313 		mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
314 		vport->ingress.offloads.drop_grp = NULL;
315 	}
316 }
317 
318 static int
acl_ingress_ofld_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)319 acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
320 {
321 	int num_ftes = 0;
322 	int err;
323 
324 	esw_acl_ingress_allow_rule_destroy(vport);
325 
326 	if (mlx5_eswitch_vport_match_metadata_enabled(esw))
327 		num_ftes++;
328 	if (vport->vport == MLX5_VPORT_UPLINK)
329 		num_ftes++;
330 	if (esw_acl_ingress_prio_tag_enabled(esw, vport))
331 		num_ftes++;
332 
333 	vport->ingress.acl = esw_acl_table_create(esw, vport,
334 						  MLX5_FLOW_NAMESPACE_ESW_INGRESS,
335 						  num_ftes);
336 	if (IS_ERR(vport->ingress.acl)) {
337 		err = PTR_ERR(vport->ingress.acl);
338 		vport->ingress.acl = NULL;
339 		return err;
340 	}
341 
342 	err = esw_acl_ingress_ofld_groups_create(esw, vport);
343 	if (err)
344 		goto group_err;
345 
346 	esw_debug(esw->dev,
347 		  "vport[%d] configure ingress rules\n", vport->vport);
348 
349 	err = esw_acl_ingress_ofld_rules_create(esw, vport);
350 	if (err)
351 		goto rules_err;
352 
353 	return 0;
354 
355 rules_err:
356 	esw_acl_ingress_ofld_groups_destroy(vport);
357 group_err:
358 	esw_acl_ingress_table_destroy(vport);
359 	return err;
360 }
361 
esw_acl_ingress_ofld_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)362 int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
363 {
364 	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
365 	    !esw_acl_ingress_prio_tag_enabled(esw, vport))
366 		return 0;
367 
368 	return acl_ingress_ofld_setup(esw, vport);
369 }
370 
esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)371 void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
372 				  struct mlx5_vport *vport)
373 {
374 	esw_acl_ingress_ofld_rules_destroy(esw, vport);
375 	esw_acl_ingress_ofld_groups_destroy(vport);
376 	esw_acl_ingress_table_destroy(vport);
377 }
378 
379 /* Caller must hold rtnl_lock */
mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch * esw,u16 vport_num,u32 metadata)380 int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vport_num,
381 					       u32 metadata)
382 {
383 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
384 	int err;
385 
386 	if (WARN_ON_ONCE(IS_ERR(vport))) {
387 		esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
388 		return PTR_ERR(vport);
389 	}
390 
391 	esw_acl_ingress_ofld_rules_destroy(esw, vport);
392 
393 	vport->metadata = metadata ? metadata : vport->default_metadata;
394 
395 	/* Recreate ingress acl rules with vport->metadata */
396 	err = esw_acl_ingress_ofld_rules_create(esw, vport);
397 	if (err)
398 		goto out;
399 
400 	return 0;
401 
402 out:
403 	vport->metadata = vport->default_metadata;
404 	return err;
405 }
406 
mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch * esw,u16 vport_num)407 int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
408 {
409 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
410 
411 	if (IS_ERR(vport)) {
412 		esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
413 		return PTR_ERR(vport);
414 	}
415 
416 	return esw_acl_ingress_src_port_drop_create(esw, vport);
417 }
418 
mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch * esw,u16 vport_num)419 void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
420 {
421 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
422 
423 	if (WARN_ON_ONCE(IS_ERR(vport))) {
424 		esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
425 		return;
426 	}
427 
428 	esw_acl_ingress_src_port_drop_destroy(esw, vport);
429 }
430