xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "eswitch.h"
6 #include "helper.h"
7 #include "ofld.h"
8 
esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport * vport)9 static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
10 {
11 	if (!vport->egress.offloads.fwd_rule)
12 		return;
13 
14 	mlx5_del_flow_rules(vport->egress.offloads.fwd_rule);
15 	vport->egress.offloads.fwd_rule = NULL;
16 }
17 
esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport * vport,int rule_index)18 void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index)
19 {
20 	struct mlx5_flow_handle *bounce_rule =
21 		xa_load(&vport->egress.offloads.bounce_rules, rule_index);
22 
23 	if (!bounce_rule)
24 		return;
25 
26 	mlx5_del_flow_rules(bounce_rule);
27 	xa_erase(&vport->egress.offloads.bounce_rules, rule_index);
28 }
29 
esw_acl_egress_ofld_bounce_rules_destroy(struct mlx5_vport * vport)30 static void esw_acl_egress_ofld_bounce_rules_destroy(struct mlx5_vport *vport)
31 {
32 	struct mlx5_flow_handle *bounce_rule;
33 	unsigned long i;
34 
35 	xa_for_each(&vport->egress.offloads.bounce_rules, i, bounce_rule) {
36 		mlx5_del_flow_rules(bounce_rule);
37 		xa_erase(&vport->egress.offloads.bounce_rules, i);
38 	}
39 }
40 
esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_flow_destination * fwd_dest)41 static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
42 						struct mlx5_vport *vport,
43 						struct mlx5_flow_destination *fwd_dest)
44 {
45 	struct mlx5_flow_act flow_act = {};
46 	int err = 0;
47 
48 	esw_debug(esw->dev, "vport(%d) configure egress acl rule fwd2vport(%d)\n",
49 		  vport->vport, fwd_dest->vport.num);
50 
51 	/* Delete the old egress forward-to-vport rule if any */
52 	esw_acl_egress_ofld_fwd2vport_destroy(vport);
53 
54 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
55 
56 	vport->egress.offloads.fwd_rule =
57 		mlx5_add_flow_rules(vport->egress.acl, NULL,
58 				    &flow_act, fwd_dest, 1);
59 	if (IS_ERR(vport->egress.offloads.fwd_rule)) {
60 		err = PTR_ERR(vport->egress.offloads.fwd_rule);
61 		esw_warn(esw->dev,
62 			 "vport(%d) failed to add fwd2vport acl rule err(%d)\n",
63 			 vport->vport, err);
64 		vport->egress.offloads.fwd_rule = NULL;
65 	}
66 
67 	return err;
68 }
69 
esw_acl_egress_ofld_rules_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_flow_destination * fwd_dest)70 static int esw_acl_egress_ofld_rules_create(struct mlx5_eswitch *esw,
71 					    struct mlx5_vport *vport,
72 					    struct mlx5_flow_destination *fwd_dest)
73 {
74 	int err = 0;
75 	int action;
76 
77 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
78 		/* For prio tag mode, there is only 1 FTEs:
79 		 * 1) prio tag packets - pop the prio tag VLAN, allow
80 		 * Unmatched traffic is allowed by default
81 		 */
82 		esw_debug(esw->dev,
83 			  "vport[%d] configure prio tag egress rules\n", vport->vport);
84 
85 		action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
86 		action |= fwd_dest ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
87 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW;
88 
89 		/* prio tag vlan rule - pop it so vport receives untagged packets */
90 		err = esw_egress_acl_vlan_create(esw, vport, fwd_dest, 0, action);
91 		if (err)
92 			goto prio_err;
93 	}
94 
95 	if (fwd_dest) {
96 		err = esw_acl_egress_ofld_fwd2vport_create(esw, vport, fwd_dest);
97 		if (err)
98 			goto fwd_err;
99 	}
100 
101 	return 0;
102 
103 fwd_err:
104 	esw_acl_egress_vlan_destroy(vport);
105 prio_err:
106 	return err;
107 }
108 
esw_acl_egress_ofld_rules_destroy(struct mlx5_vport * vport)109 static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
110 {
111 	esw_acl_egress_vlan_destroy(vport);
112 	esw_acl_egress_ofld_fwd2vport_destroy(vport);
113 	esw_acl_egress_ofld_bounce_rules_destroy(vport);
114 }
115 
esw_acl_egress_ofld_groups_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)116 static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
117 					     struct mlx5_vport *vport)
118 {
119 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
120 	struct mlx5_flow_group *fwd_grp;
121 	u32 *flow_group_in;
122 	u32 flow_index = 0;
123 	int ret = 0;
124 
125 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
126 		ret = esw_acl_egress_vlan_grp_create(esw, vport);
127 		if (ret)
128 			return ret;
129 
130 		flow_index++;
131 	}
132 
133 	if (!mlx5_esw_acl_egress_fwd2vport_supported(esw))
134 		goto out;
135 
136 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
137 	if (!flow_group_in) {
138 		ret = -ENOMEM;
139 		goto fwd_grp_err;
140 	}
141 
142 	/* This group holds 1 FTE to forward all packets to other vport
143 	 * when bond vports is supported.
144 	 */
145 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
146 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
147 	fwd_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
148 	if (IS_ERR(fwd_grp)) {
149 		ret = PTR_ERR(fwd_grp);
150 		esw_warn(esw->dev,
151 			 "Failed to create vport[%d] egress fwd2vport flow group, err(%d)\n",
152 			 vport->vport, ret);
153 		kvfree(flow_group_in);
154 		goto fwd_grp_err;
155 	}
156 	vport->egress.offloads.fwd_grp = fwd_grp;
157 	kvfree(flow_group_in);
158 	return 0;
159 
160 fwd_grp_err:
161 	esw_acl_egress_vlan_grp_destroy(vport);
162 out:
163 	return ret;
164 }
165 
esw_acl_egress_ofld_groups_destroy(struct mlx5_vport * vport)166 static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
167 {
168 	if (!IS_ERR_OR_NULL(vport->egress.offloads.fwd_grp)) {
169 		mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp);
170 		vport->egress.offloads.fwd_grp = NULL;
171 	}
172 
173 	if (!IS_ERR_OR_NULL(vport->egress.offloads.bounce_grp)) {
174 		mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
175 		vport->egress.offloads.bounce_grp = NULL;
176 	}
177 
178 	esw_acl_egress_vlan_grp_destroy(vport);
179 }
180 
esw_acl_egress_needed(struct mlx5_eswitch * esw,u16 vport_num)181 static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
182 {
183 	return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
184 }
185 
esw_acl_egress_ofld_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)186 int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
187 {
188 	int table_size = 0;
189 	int err;
190 
191 	if (!mlx5_esw_acl_egress_fwd2vport_supported(esw) &&
192 	    !MLX5_CAP_GEN(esw->dev, prio_tag_required))
193 		return 0;
194 
195 	if (!esw_acl_egress_needed(esw, vport->vport))
196 		return 0;
197 
198 	esw_acl_egress_ofld_rules_destroy(vport);
199 
200 	if (mlx5_esw_acl_egress_fwd2vport_supported(esw))
201 		table_size++;
202 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
203 		table_size++;
204 	vport->egress.acl = esw_acl_table_create(esw, vport,
205 						 MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
206 	if (IS_ERR(vport->egress.acl)) {
207 		err = PTR_ERR(vport->egress.acl);
208 		vport->egress.acl = NULL;
209 		return err;
210 	}
211 	vport->egress.type = VPORT_EGRESS_ACL_TYPE_DEFAULT;
212 
213 	err = esw_acl_egress_ofld_groups_create(esw, vport);
214 	if (err)
215 		goto group_err;
216 
217 	esw_debug(esw->dev, "vport[%d] configure egress rules\n", vport->vport);
218 
219 	err = esw_acl_egress_ofld_rules_create(esw, vport, NULL);
220 	if (err)
221 		goto rules_err;
222 
223 	return 0;
224 
225 rules_err:
226 	esw_acl_egress_ofld_groups_destroy(vport);
227 group_err:
228 	esw_acl_egress_table_destroy(vport);
229 	return err;
230 }
231 
esw_acl_egress_ofld_cleanup(struct mlx5_vport * vport)232 void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport)
233 {
234 	esw_acl_egress_ofld_rules_destroy(vport);
235 	esw_acl_egress_ofld_groups_destroy(vport);
236 	esw_acl_egress_table_destroy(vport);
237 }
238 
mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch * esw,u16 active_vport_num,u16 passive_vport_num)239 int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
240 				   u16 passive_vport_num)
241 {
242 	struct mlx5_vport *passive_vport = mlx5_eswitch_get_vport(esw, passive_vport_num);
243 	struct mlx5_vport *active_vport = mlx5_eswitch_get_vport(esw, active_vport_num);
244 	struct mlx5_flow_destination fwd_dest = {};
245 
246 	if (IS_ERR(active_vport))
247 		return PTR_ERR(active_vport);
248 	if (IS_ERR(passive_vport))
249 		return PTR_ERR(passive_vport);
250 
251 	/* Cleanup and recreate rules WITHOUT fwd2vport of active vport */
252 	esw_acl_egress_ofld_rules_destroy(active_vport);
253 	esw_acl_egress_ofld_rules_create(esw, active_vport, NULL);
254 
255 	/* Cleanup and recreate all rules + fwd2vport rule of passive vport to forward */
256 	esw_acl_egress_ofld_rules_destroy(passive_vport);
257 	fwd_dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
258 	fwd_dest.vport.num = active_vport_num;
259 	fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
260 	fwd_dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
261 
262 	return esw_acl_egress_ofld_rules_create(esw, passive_vport, &fwd_dest);
263 }
264 
mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch * esw,u16 vport_num)265 int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num)
266 {
267 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
268 
269 	if (IS_ERR(vport))
270 		return PTR_ERR(vport);
271 
272 	esw_acl_egress_ofld_rules_destroy(vport);
273 	return esw_acl_egress_ofld_rules_create(esw, vport, NULL);
274 }
275