xref: /freebsd/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c (revision 84d688af4e625e159af65cd2432af88609d4962a)
1 /*-
2  * Copyright (c) 2023 NVIDIA corporation & affiliates.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 
27 #include "opt_ipsec.h"
28 
29 #include <sys/types.h>
30 #include <netinet/in.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <net/pfkeyv2.h>
35 #include <netipsec/key_var.h>
36 #include <netipsec/keydb.h>
37 #include <netipsec/ipsec.h>
38 #include <netipsec/xform.h>
39 #include <netipsec/ipsec_offload.h>
40 #include <dev/mlx5/fs.h>
41 #include <dev/mlx5/mlx5_en/en.h>
42 #include <dev/mlx5/qp.h>
43 #include <dev/mlx5/mlx5_accel/ipsec.h>
44 #include <dev/mlx5/mlx5_core/fs_core.h>
45 #include <dev/mlx5/mlx5_core/fs_chains.h>
46 
47 /*
48  * TX tables are organized differently for Ethernet and for RoCE:
49  *
50  *                       +=========+
51  *       Ethernet Tx     | SA KSPI | match
52  * --------------------->|Flowtable|----->+         +
53  *                       |         |\     |        / \
54  *                       +=========+ |    |       /   \         +=========+     +=========+
55  *                              miss |    |      /     \        |  Status |     |         |
56  *                      DROP<--------+    |---->|Encrypt|------>|Flowtable|---->|  TX NS  |
57  *                                        |      \     /        |         |     |         |
58  *                                        |       \   /         +=========+     +=========+
59  *       +=========+      +=========+     |        \ /               |
60  *  RoCE |  Policy | match|SA ReqId |match|         +                |
61  *  Tx   |Flowtable|----->|Flowtable|---->+                          |
62  *  ---->|IP header|      |ReqId+IP |                                |
63  *       |         |      | header  |--------------------------------+
64  *       +=========+      +=========+         miss                   |
65  *            |                                                      |
66  *            |                   miss                               |
67  *            +-------------------------------------------------------
68  *
69  *                                                                                  +=========+
70  *                                                                                  |   RDMA  |
71  *                                                                                  |Flowtable|
72  *                                                                                  |         |
73  * Rx Tables and rules:                                                             +=========+
74  *                                             +                                        /
75  *       +=========+      +=========+         / \         +=========+      +=========+ /match
76  *       |  Policy |      |   SA    |        /   \        |  Status |      |  RoCE   |/
77  *  ---->|Flowtable| match|Flowtable| match /     \       |Flowtable|----->|Flowtable|
78  *       |IP header|----->|IP header|----->|Decrypt|----->|         |      | Roce V2 |
79  *       |         |      |+ESP+SPI |       \     /       |         |      | UDP port|\
80  *       +=========+      +=========+        \   /        +=========+      +=========+ \miss
81  *             |               |              \ /                                       \
82  *             |               |               +                                      +=========+
83  *             |     miss      |          miss                                       | Ethernet|
84  *             +--------------->---------------------------------------------------->|  RX NS  |
85  *                                                                                   |         |
86  *                                                                                   +=========+
87  *
88  */
89 
90 #define NUM_IPSEC_FTE BIT(15)
91 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
92 
93 struct mlx5e_ipsec_fc {
94 	struct mlx5_fc *cnt;
95 	struct mlx5_fc *drop;
96 };
97 
98 struct mlx5e_ipsec_ft {
99 	struct mutex mutex; /* Protect changes to this struct */
100 	struct mlx5_flow_table *pol;
101 	struct mlx5_flow_table *sa_kspi;
102 	struct mlx5_flow_table *sa;
103 	struct mlx5_flow_table *status;
104 	u32 refcnt;
105 };
106 
107 struct mlx5e_ipsec_tx_roce {
108 	struct mlx5_flow_group *g;
109 	struct mlx5_flow_table *ft;
110 	struct mlx5_flow_handle *rule;
111 	struct mlx5_flow_namespace *ns;
112 };
113 
114 struct mlx5e_ipsec_miss {
115 	struct mlx5_flow_group *group;
116 	struct mlx5_flow_handle *rule;
117 };
118 
119 struct mlx5e_ipsec_tx {
120 	struct mlx5e_ipsec_ft ft;
121 	struct mlx5e_ipsec_miss pol;
122 	struct mlx5e_ipsec_miss kspi_miss;
123 	struct mlx5e_ipsec_rule status;
124 	struct mlx5e_ipsec_rule kspi_bypass_rule; /*rule for IPSEC bypass*/
125 	struct mlx5_flow_namespace *ns;
126 	struct mlx5e_ipsec_fc *fc;
127 	struct mlx5_fs_chains *chains;
128 	struct mlx5e_ipsec_tx_roce roce;
129 };
130 
131 struct mlx5e_ipsec_rx_roce {
132 	struct mlx5_flow_group *g;
133 	struct mlx5_flow_table *ft;
134 	struct mlx5_flow_handle *rule;
135 	struct mlx5e_ipsec_miss roce_miss;
136 
137 	struct mlx5_flow_table *ft_rdma;
138 	struct mlx5_flow_namespace *ns_rdma;
139 };
140 
141 struct mlx5e_ipsec_rx_ip_type {
142 	struct mlx5_flow_table *ft;
143 	struct mlx5_flow_namespace *ns;
144 	struct mlx5_flow_handle *ipv4_rule;
145 	struct mlx5_flow_handle *ipv6_rule;
146 	struct mlx5e_ipsec_miss miss;
147 };
148 
149 struct mlx5e_ipsec_rx {
150 	struct mlx5e_ipsec_ft ft;
151 	struct mlx5e_ipsec_miss pol;
152 	struct mlx5e_ipsec_miss sa;
153 	struct mlx5e_ipsec_rule status;
154 	struct mlx5_flow_namespace *ns;
155 	struct mlx5e_ipsec_fc *fc;
156 	struct mlx5_fs_chains *chains;
157 	struct mlx5e_ipsec_rx_roce roce;
158 };
159 
160 static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec,
161                                      u16 kspi);
162 static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec);
163 
setup_fte_no_frags(struct mlx5_flow_spec * spec)164 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
165 {
166 	/* Non fragmented */
167 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
168 
169 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
170 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
171 }
172 
setup_fte_esp(struct mlx5_flow_spec * spec)173 static void setup_fte_esp(struct mlx5_flow_spec *spec)
174 {
175 	/* ESP header */
176 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
177 
178 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
179 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
180 }
181 
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)182 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
183 {
184 	/* SPI number */
185 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
186 
187 	if (encap) {
188 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.inner_esp_spi);
189 		MLX5_SET(fte_match_param, spec->match_value, misc_parameters.inner_esp_spi, spi);
190 	} else {
191 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
192 		MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
193 	}
194 }
195 
196 static void
setup_fte_vid(struct mlx5_flow_spec * spec,u16 vid)197 setup_fte_vid(struct mlx5_flow_spec *spec, u16 vid)
198 {
199 	/* virtual lan tag */
200 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
201 
202 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
203 	    outer_headers.cvlan_tag);
204 	MLX5_SET(fte_match_param, spec->match_value,
205 	    outer_headers.cvlan_tag, 1);
206 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
207 	    outer_headers.first_vid);
208 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
209 	    vid);
210 }
211 
212 static void
clear_fte_vid(struct mlx5_flow_spec * spec)213 clear_fte_vid(struct mlx5_flow_spec *spec)
214 {
215 	MLX5_SET(fte_match_param, spec->match_criteria,
216 	    outer_headers.cvlan_tag, 0);
217 	MLX5_SET(fte_match_param, spec->match_value,
218 	    outer_headers.cvlan_tag, 0);
219 	MLX5_SET(fte_match_param, spec->match_criteria,
220 	    outer_headers.first_vid, 0);
221 	MLX5_SET(fte_match_param, spec->match_value,
222 	    outer_headers.first_vid, 0);
223 }
224 
225 static void
setup_fte_no_vid(struct mlx5_flow_spec * spec)226 setup_fte_no_vid(struct mlx5_flow_spec *spec)
227 {
228 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
229 	    outer_headers.cvlan_tag);
230 	MLX5_SET(fte_match_param, spec->match_value,
231 	    outer_headers.cvlan_tag, 0);
232 }
233 
234 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)235 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
236 		    enum mlx5_flow_namespace_type ns, int base_prio,
237 		    int base_level, struct mlx5_flow_table **root_ft)
238 {
239 	struct mlx5_chains_attr attr = {};
240 	struct mlx5_fs_chains *chains;
241 	struct mlx5_flow_table *ft;
242 	int err;
243 
244 	attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
245 		     MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
246 	attr.max_grp_num = 2;
247 	attr.default_ft = miss_ft;
248 	attr.ns = ns;
249 	attr.fs_base_prio = base_prio;
250 	attr.fs_base_level = base_level;
251 	chains = mlx5_chains_create(mdev, &attr);
252 	if (IS_ERR(chains))
253 		return chains;
254 
255 	/* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
256 	ft = mlx5_chains_get_table(chains, 0, 1, 0);
257 	if (IS_ERR(ft)) {
258 		err = PTR_ERR(ft);
259 		goto err_chains_get;
260 	}
261 
262 	*root_ft = ft;
263 	return chains;
264 
265 err_chains_get:
266 	mlx5_chains_destroy(chains);
267 	return ERR_PTR(err);
268 }
269 
ipsec_chains_destroy(struct mlx5_fs_chains * chains)270 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
271 {
272 	mlx5_chains_put_table(chains, 0, 1, 0);
273 	mlx5_chains_destroy(chains);
274 }
275 
276 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)277 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
278 {
279 	return mlx5_chains_get_table(chains, 0, prio + 1, 0);
280 }
281 
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)282 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
283 {
284 	mlx5_chains_put_table(chains, 0, prio + 1, 0);
285 }
286 
ipsec_rx_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups)287 static struct mlx5_flow_table *ipsec_rx_ft_create(struct mlx5_flow_namespace *ns,
288 						  int level, int prio,
289 						  int max_num_groups)
290 {
291 	struct mlx5_flow_table_attr ft_attr = {};
292 
293 	ft_attr.max_fte = NUM_IPSEC_FTE;
294 	ft_attr.level = level;
295 	ft_attr.prio = prio;
296 	ft_attr.autogroup.max_num_groups = max_num_groups;
297 	ft_attr.autogroup.num_reserved_entries = 1;
298 
299 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
300 }
301 
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)302 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
303 			     struct mlx5_flow_table *ft,
304 			     struct mlx5e_ipsec_miss *miss,
305 			     struct mlx5_flow_destination *dest)
306 {
307 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
308 	struct mlx5_flow_act flow_act = {};
309 	struct mlx5_flow_spec *spec;
310 	u32 *flow_group_in;
311 	int err = 0;
312 
313 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
314 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
315 	if (!flow_group_in || !spec) {
316 		err = -ENOMEM;
317 		goto out;
318 	}
319 
320 	/* Create miss_group */
321 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
322 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
323 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
324 	if (IS_ERR(miss->group)) {
325 		err = PTR_ERR(miss->group);
326 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
327 			      err);
328 		goto out;
329 	}
330 
331 	if (dest)
332 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
333 	else
334 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
335 	/* Create miss rule */
336 	miss->rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
337 	if (IS_ERR(miss->rule)) {
338 		mlx5_destroy_flow_group(miss->group);
339 		err = PTR_ERR(miss->rule);
340 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
341 			      err);
342 		goto out;
343 	}
344 out:
345 	kvfree(flow_group_in);
346 	kvfree(spec);
347 	return err;
348 }
349 
setup_modify_header(struct mlx5_core_dev * mdev,u32 val,u8 dir,struct mlx5_flow_act * flow_act)350 static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
351                                struct mlx5_flow_act *flow_act)
352 {
353         u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
354         enum mlx5_flow_namespace_type ns_type;
355         struct mlx5_modify_hdr *modify_hdr;
356 
357         MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
358         switch (dir) {
359         case IPSEC_DIR_INBOUND:
360                 MLX5_SET(set_action_in, action, field,
361                          MLX5_ACTION_IN_FIELD_METADATA_REG_B);
362                 ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
363                 break;
364         case IPSEC_DIR_OUTBOUND:
365                 MLX5_SET(set_action_in, action, field,
366                          MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
367                 ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
368                 break;
369         default:
370                 return -EINVAL;
371         }
372 
373         MLX5_SET(set_action_in, action, data, val);
374         MLX5_SET(set_action_in, action, offset, 0);
375         MLX5_SET(set_action_in, action, length, 32);
376 
377         modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
378         if (IS_ERR(modify_hdr)) {
379                 mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
380                               PTR_ERR(modify_hdr));
381                 return PTR_ERR(modify_hdr);
382         }
383 
384         flow_act->modify_hdr = modify_hdr;
385         flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
386         return 0;
387 }
388 
389 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)390 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
391 			     struct mlx5_pkt_reformat_params *reformat_params)
392 {
393 	struct udphdr *udphdr;
394 	size_t bfflen = 16;
395 	char *reformatbf;
396 	__be32 spi;
397 	void *hdr;
398 
399 	if (attrs->family == AF_INET) {
400 		if (attrs->encap)
401 			reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
402 		else
403 			reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
404 	} else {
405 		if (attrs->encap)
406 			reformat_params->type =
407 			    MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
408 		else
409 			reformat_params->type =
410 			    MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
411 	}
412 
413 	if (attrs->encap)
414 		bfflen += sizeof(*udphdr);
415 	reformatbf = kzalloc(bfflen, GFP_KERNEL);
416 	if (!reformatbf)
417 		return -ENOMEM;
418 
419 	hdr = reformatbf;
420 	if (attrs->encap) {
421 		udphdr = (struct udphdr *)reformatbf;
422 		udphdr->uh_sport = attrs->sport;
423 		udphdr->uh_dport = attrs->dport;
424 		hdr += sizeof(*udphdr);
425 	}
426 
427 	/* convert to network format */
428 	spi = htonl(attrs->spi);
429 	memcpy(hdr, &spi, 4);
430 
431 	reformat_params->param_0 = attrs->authsize;
432 	reformat_params->size = bfflen;
433 	reformat_params->data = reformatbf;
434 
435 	return 0;
436 }
437 
setup_pkt_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)438 static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
439 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
440 			      struct mlx5_flow_act *flow_act)
441 {
442 	enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
443 	struct mlx5_pkt_reformat_params reformat_params = {};
444 	struct mlx5_pkt_reformat *pkt_reformat;
445 	int ret;
446 
447 	if (attrs->dir == IPSEC_DIR_INBOUND) {
448 		if (attrs->encap)
449 			reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
450 		else
451 			reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
452 		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
453 		goto cmd;
454 	}
455 
456 	ret = setup_pkt_transport_reformat(attrs, &reformat_params);
457 	if (ret)
458 		return ret;
459 cmd:
460 	pkt_reformat =
461 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
462 	if (reformat_params.data)
463 		kfree(reformat_params.data);
464 	if (IS_ERR(pkt_reformat))
465 		return PTR_ERR(pkt_reformat);
466 
467 	flow_act->pkt_reformat = pkt_reformat;
468 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
469 	return 0;
470 }
471 
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)472 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
473                             __be32 *daddr)
474 {
475         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
476 
477         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
478         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
479 
480         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
481                             outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
482         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
483                             outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
484         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
485                          outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
486         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
487                          outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
488 }
489 
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)490 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
491                             __be32 *daddr)
492 {
493         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
494 
495         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
496         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
497 
498         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
499                             outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
500         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
501                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
502         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
503                             outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
504         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
505                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
506 }
507 
508 static void
setup_fte_ip_version(struct mlx5_flow_spec * spec,u8 family)509 setup_fte_ip_version(struct mlx5_flow_spec *spec, u8 family)
510 {
511         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
512 
513         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
514         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
515                  family == AF_INET ? 4 : 6);
516 }
517 
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)518 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
519 {
520 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
521 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
522 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
523 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
524 	struct mlx5_flow_destination dest[2] = {};
525 	struct mlx5_flow_act flow_act = {};
526 	struct mlx5_flow_handle *rule;
527 	struct mlx5_flow_spec *spec;
528 	struct mlx5e_ipsec_rx *rx;
529 	struct mlx5_fc *counter;
530 	int err;
531 
532 	rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6;
533 
534 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
535 	if (!spec)
536 		return -ENOMEM;
537 
538 	if (!attrs->drop) {
539 		err = setup_modify_header(mdev, sa_entry->kspi | BIT(31), IPSEC_DIR_INBOUND,
540 					  &flow_act);
541 		if (err)
542 			goto err_mod_header;
543 	}
544 
545 	err = setup_pkt_reformat(mdev, attrs, &flow_act);
546 	if (err)
547 		goto err_pkt_reformat;
548 
549 	counter = mlx5_fc_create(mdev, false);
550 	if (IS_ERR(counter)) {
551 		err = PTR_ERR(counter);
552 		goto err_add_cnt;
553 	}
554 
555 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
556 	flow_act.crypto.op = MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT;
557 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
558 	flow_act.flags |= FLOW_ACT_NO_APPEND;
559 
560 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
561 		MLX5_FLOW_CONTEXT_ACTION_COUNT;
562 
563 	if (attrs->drop)
564 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
565 	else
566 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
567 
568 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
569 	dest[0].ft = rx->ft.status;
570 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
571 	dest[1].counter_id = mlx5_fc_id(counter);
572 
573 	if (attrs->family == AF_INET)
574 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
575 	else
576 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
577 
578 	if (!attrs->encap)
579 		setup_fte_esp(spec);
580 
581 	setup_fte_spi(spec, attrs->spi, attrs->encap);
582 	setup_fte_no_frags(spec);
583 
584 	if (sa_entry->vid != VLAN_NONE)
585 		setup_fte_vid(spec, sa_entry->vid);
586 	else
587 		setup_fte_no_vid(spec);
588 
589 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
590 	if (IS_ERR(rule)) {
591 		err = PTR_ERR(rule);
592 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
593 		goto err_add_flow;
594 	}
595 	ipsec_rule->rule = rule;
596 
597 	/* Add another rule for zero vid */
598 	if (sa_entry->vid == VLAN_NONE) {
599 		clear_fte_vid(spec);
600 		setup_fte_vid(spec, 0);
601 		rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
602 		if (IS_ERR(rule)) {
603 			err = PTR_ERR(rule);
604 			mlx5_core_err(mdev,
605 			    "fail to add RX ipsec zero vid rule err=%d\n",
606 			    err);
607 			goto err_add_flow;
608 		}
609 		ipsec_rule->vid_zero_rule = rule;
610 	}
611 
612 	kvfree(spec);
613 	ipsec_rule->fc = counter;
614 	ipsec_rule->modify_hdr = flow_act.modify_hdr;
615 	ipsec_rule->pkt_reformat = flow_act.pkt_reformat;
616 	return 0;
617 
618 err_add_flow:
619 	mlx5_fc_destroy(mdev, counter);
620 	if (ipsec_rule->rule != NULL)
621 		mlx5_del_flow_rules(&ipsec_rule->rule);
622 err_add_cnt:
623 	mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
624 err_pkt_reformat:
625 	if (flow_act.modify_hdr != NULL)
626 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
627 err_mod_header:
628 	kvfree(spec);
629 
630 	return err;
631 }
632 
ipsec_tx_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups)633 static struct mlx5_flow_table *ipsec_tx_ft_create(struct mlx5_flow_namespace *ns,
634 						  int level, int prio,
635 						  int max_num_groups)
636 {
637 	struct mlx5_flow_table_attr ft_attr = {};
638 
639         ft_attr.autogroup.num_reserved_entries = 1;
640         ft_attr.autogroup.max_num_groups = max_num_groups;
641         ft_attr.max_fte = NUM_IPSEC_FTE;
642         ft_attr.level = level;
643         ft_attr.prio = prio;
644 
645 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
646 }
647 
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)648 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
649 {
650 	struct mlx5_flow_destination dest = {};
651 	struct mlx5_flow_act flow_act = {};
652 	struct mlx5_flow_handle *fte;
653 	int err;
654 
655 	/* create fte */
656 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_COUNT |
657 		MLX5_FLOW_CONTEXT_ACTION_ALLOW;
658 
659 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
660 	dest.counter_id = mlx5_fc_id(tx->fc->cnt);
661 	fte = mlx5_add_flow_rules(tx->ft.status, NULL, &flow_act, &dest, 1);
662 	if (IS_ERR_OR_NULL(fte)) {
663 		err = PTR_ERR(fte);
664 		mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
665 		goto err_rule;
666 	}
667 
668 	tx->status.rule = fte;
669 	return 0;
670 
671 err_rule:
672 	return err;
673 }
674 
tx_destroy_roce(struct mlx5e_ipsec_tx * tx)675 static void tx_destroy_roce(struct mlx5e_ipsec_tx *tx) {
676 	if (!tx->roce.ft)
677 		return;
678 
679 	mlx5_del_flow_rules(&tx->roce.rule);
680 	mlx5_destroy_flow_group(tx->roce.g);
681 	mlx5_destroy_flow_table(tx->roce.ft);
682 	tx->roce.ft = NULL;
683 }
684 
685 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec_tx * tx)686 static void tx_destroy(struct mlx5e_ipsec_tx *tx)
687 {
688 	tx_destroy_roce(tx);
689 	if (tx->chains) {
690 		ipsec_chains_destroy(tx->chains);
691 	} else {
692 		mlx5_del_flow_rules(&tx->pol.rule);
693 		mlx5_destroy_flow_group(tx->pol.group);
694 		mlx5_destroy_flow_table(tx->ft.pol);
695 	}
696 	mlx5_destroy_flow_table(tx->ft.sa);
697 	mlx5_del_flow_rules(&tx->kspi_miss.rule);
698 	mlx5_destroy_flow_group(tx->kspi_miss.group);
699 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule);
700 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
701 	mlx5_destroy_flow_table(tx->ft.sa_kspi);
702 	mlx5_del_flow_rules(&tx->status.rule);
703 	mlx5_destroy_flow_table(tx->ft.status);
704 }
705 
ipsec_tx_roce_rule_setup(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)706 static int ipsec_tx_roce_rule_setup(struct mlx5_core_dev *mdev,
707 				    struct mlx5e_ipsec_tx *tx)
708 {
709 	struct mlx5_flow_destination dst = {};
710 	struct mlx5_flow_act flow_act = {};
711 	struct mlx5_flow_handle *rule;
712 	int err = 0;
713 
714 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
715 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
716 	dst.ft = tx->ft.pol;
717 	rule = mlx5_add_flow_rules(tx->roce.ft, NULL, &flow_act, &dst, 1);
718 	if (IS_ERR(rule)) {
719 		err = PTR_ERR(rule);
720 		mlx5_core_err(mdev, "Fail to add TX roce ipsec rule err=%d\n",
721 			      err);
722 		goto out;
723 	}
724 	tx->roce.rule = rule;
725 
726 out:
727 	return err;
728 }
729 
ipsec_tx_create_roce(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)730 static int ipsec_tx_create_roce(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
731 {
732 	struct mlx5_flow_table_attr ft_attr = {};
733 	struct mlx5_flow_table *ft;
734 	struct mlx5_flow_group *g;
735 	int ix = 0;
736 	int err;
737 	u32 *in;
738 
739 	if (!tx->roce.ns)
740 		return -EOPNOTSUPP;
741 
742 	in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
743 	if (!in)
744 		return -ENOMEM;
745 
746 	ft_attr.max_fte = 1;
747 	ft = mlx5_create_flow_table(tx->roce.ns, &ft_attr);
748 	if (IS_ERR(ft)) {
749 		err = PTR_ERR(ft);
750 		mlx5_core_err(mdev, "Fail to create ipsec tx roce ft err=%d\n",
751 			      err);
752 		goto fail_table;
753 	}
754 	tx->roce.ft = ft;
755 
756 	MLX5_SET_CFG(in, start_flow_index, ix);
757 	ix += 1;
758 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
759 	g = mlx5_create_flow_group(ft, in);
760 	if (IS_ERR(g)) {
761 		err = PTR_ERR(g);
762 		mlx5_core_err(mdev, "Fail to create ipsec tx roce group err=%d\n",
763 			      err);
764 		goto fail_group;
765 	}
766 	tx->roce.g = g;
767 
768 	err = ipsec_tx_roce_rule_setup(mdev, tx);
769 	if (err) {
770 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
771 		goto fail_rule;
772 	}
773 
774 	kvfree(in);
775 	return 0;
776 
777 fail_rule:
778 	mlx5_destroy_flow_group(tx->roce.g);
779 fail_group:
780 	mlx5_destroy_flow_table(tx->roce.ft);
781 	tx->roce.ft = NULL;
782 fail_table:
783 	kvfree(in);
784 	return err;
785 }
786 
787 /*
788  * Setting a rule in KSPI table for values that should bypass IPSEC.
789  *
790  * mdev - mlx5 core device
791  * tx - IPSEC TX
792  * return - 0 for success errno for failure
793  */
tx_create_kspi_bypass_rules(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)794 static int tx_create_kspi_bypass_rules(struct mlx5_core_dev *mdev,
795                                        struct mlx5e_ipsec_tx *tx)
796 {
797 	struct mlx5_flow_destination dest = {};
798 	struct mlx5_flow_act flow_act = {};
799 	struct mlx5_flow_act flow_act_kspi = {};
800 	struct mlx5_flow_handle *rule;
801 	struct mlx5_flow_spec *spec;
802 	int err;
803 
804 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
805 	if (!spec)
806 		return -ENOMEM;
807 
808 	dest.ft = tx->ft.status;
809 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
810 	flow_act_kspi.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
811 
812 	setup_fte_reg_a_with_tag(spec, IPSEC_ACCEL_DRV_SPI_BYPASS);
813 	rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act_kspi,
814 								&dest, 1);
815 	if (IS_ERR(rule)) {
816 		err = PTR_ERR(rule);
817 		mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n",
818                       err);
819 		goto err_add_kspi_rule;
820 	}
821 	tx->kspi_bypass_rule.kspi_rule = rule;
822 
823 	/* set the rule for packets withoiut ipsec tag. */
824 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
825 	memset(spec, 0, sizeof(*spec));
826 	setup_fte_reg_a_no_tag(spec);
827 	rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act, &dest, 1);
828 	if (IS_ERR(rule)) {
829 		err = PTR_ERR(rule);
830 		mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n", err);
831 		goto err_add_rule;
832 	}
833 	tx->kspi_bypass_rule.rule = rule;
834 
835 	kvfree(spec);
836 	return 0;
837 err_add_rule:
838 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
839 err_add_kspi_rule:
840 	kvfree(spec);
841 	return err;
842 }
843 
844 
tx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)845 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
846 {
847 	struct mlx5_flow_destination dest = {};
848 	struct mlx5_flow_table *ft;
849 	int err;
850 
851 	/*
852 	 *  Tx flow is different for ethernet traffic then for RoCE packets
853 	 *  For Ethernet packets we start in SA KSPI table that matches KSPI of SA rule
854 	 *  to the KSPI in the packet metadata
855 	 *  For RoCE traffic we start in Policy table, then move to SA table
856 	 *  which matches either reqid of the SA rule to reqid reported by policy table
857 	 *  or ip header fields of SA to the packet IP header fields.
858 	 *  Tables are ordered by their level so we set kspi
859 	 *  with level 0 to have it first one for ethernet traffic.
860 	 *  For RoCE the RoCE TX table direct the packets to policy table explicitly
861 	 */
862 	ft = ipsec_tx_ft_create(tx->ns, 0, 0, 4);
863 	if (IS_ERR(ft))
864 		return PTR_ERR(ft);
865 	tx->ft.sa_kspi = ft;
866 
867 	ft = ipsec_tx_ft_create(tx->ns, 2, 0, 4);
868 	if (IS_ERR(ft)) {
869 		err = PTR_ERR(ft);
870 		goto err_reqid_ft;
871 	}
872 	tx->ft.sa = ft;
873 
874 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
875 		tx->chains = ipsec_chains_create(
876 				mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 1,
877 				&tx->ft.pol);
878 		if (IS_ERR(tx->chains)) {
879 			err = PTR_ERR(tx->chains);
880 			goto err_pol_ft;
881 		}
882 	} else {
883 		ft = ipsec_tx_ft_create(tx->ns, 1, 0, 2);
884 		if (IS_ERR(ft)) {
885 			err = PTR_ERR(ft);
886 			goto err_pol_ft;
887 		}
888 		tx->ft.pol = ft;
889 		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
890 		dest.ft = tx->ft.sa;
891 		err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
892 		if (err)
893 			goto err_pol_miss;
894 	}
895 
896 	ft = ipsec_tx_ft_create(tx->ns, 2, 0, 1);
897 	if (IS_ERR(ft)) {
898 		err = PTR_ERR(ft);
899 		goto err_status_ft;
900 	}
901 	tx->ft.status = ft;
902 
903 	/* set miss rule for kspi table with drop action*/
904 	err = ipsec_miss_create(mdev, tx->ft.sa_kspi, &tx->kspi_miss, NULL);
905 	if (err)
906 		goto err_kspi_miss;
907 
908 	err = tx_create_kspi_bypass_rules(mdev, tx);
909 	if (err)
910 		goto err_kspi_rule;
911 
912 	err = ipsec_counter_rule_tx(mdev, tx);
913 	if (err)
914 		goto err_status_rule;
915 
916 	err = ipsec_tx_create_roce(mdev, tx);
917 	if (err)
918 		goto err_counter_rule;
919 
920 	return 0;
921 
922 err_counter_rule:
923 	mlx5_del_flow_rules(&tx->status.rule);
924 err_status_rule:
925 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule);
926 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
927 err_kspi_rule:
928 	mlx5_destroy_flow_table(tx->ft.status);
929 err_status_ft:
930 	if (tx->chains) {
931 		ipsec_chains_destroy(tx->chains);
932 	} else {
933 		mlx5_del_flow_rules(&tx->pol.rule);
934 		mlx5_destroy_flow_group(tx->pol.group);
935 	}
936 err_pol_miss:
937 	if (!tx->chains)
938 		mlx5_destroy_flow_table(tx->ft.pol);
939 err_pol_ft:
940 	mlx5_del_flow_rules(&tx->kspi_miss.rule);
941 	mlx5_destroy_flow_group(tx->kspi_miss.group);
942 err_kspi_miss:
943 	mlx5_destroy_flow_table(tx->ft.sa);
944 err_reqid_ft:
945 	mlx5_destroy_flow_table(tx->ft.sa_kspi);
946 	return err;
947 }
948 
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)949 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
950 		  struct mlx5e_ipsec_tx *tx)
951 {
952 	int err;
953 
954 	if (tx->ft.refcnt)
955 		goto skip;
956 
957 	err = tx_create(mdev, tx);
958 	if (err)
959 		return err;
960 
961 skip:
962 	tx->ft.refcnt++;
963 	return 0;
964 }
965 
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)966 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
967 {
968 	if (--tx->ft.refcnt)
969 		return;
970 
971 	tx_destroy(tx);
972 }
973 
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec)974 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
975 					struct mlx5e_ipsec *ipsec)
976 {
977 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
978 	int err;
979 
980 	mutex_lock(&tx->ft.mutex);
981 	err = tx_get(mdev, ipsec, tx);
982 	mutex_unlock(&tx->ft.mutex);
983 	if (err)
984 		return ERR_PTR(err);
985 
986 	return tx;
987 }
988 
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio)989 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
990                                                 struct mlx5e_ipsec *ipsec,
991                                                 u32 prio)
992 {
993         struct mlx5e_ipsec_tx *tx = ipsec->tx;
994         struct mlx5_flow_table *ft;
995         int err;
996 
997         mutex_lock(&tx->ft.mutex);
998         err = tx_get(mdev, ipsec, tx);
999         if (err)
1000             goto err_get;
1001 
1002         ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
1003         if (IS_ERR(ft)) {
1004                 err = PTR_ERR(ft);
1005                 goto err_get_ft;
1006         }
1007 
1008         mutex_unlock(&tx->ft.mutex);
1009         return ft;
1010 
1011 err_get_ft:
1012         tx_put(ipsec, tx);
1013 err_get:
1014         mutex_unlock(&tx->ft.mutex);
1015         return ERR_PTR(err);
1016 }
1017 
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio)1018 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio)
1019 {
1020         struct mlx5e_ipsec_tx *tx = ipsec->tx;
1021 
1022         mutex_lock(&tx->ft.mutex);
1023         if (tx->chains)
1024                 ipsec_chains_put_table(tx->chains, prio);
1025 
1026         tx_put(ipsec, tx);
1027         mutex_unlock(&tx->ft.mutex);
1028 }
1029 
tx_ft_put(struct mlx5e_ipsec * ipsec)1030 static void tx_ft_put(struct mlx5e_ipsec *ipsec)
1031 {
1032 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
1033 
1034 	mutex_lock(&tx->ft.mutex);
1035 	tx_put(ipsec, tx);
1036 	mutex_unlock(&tx->ft.mutex);
1037 }
1038 
setup_fte_reg_a_with_tag(struct mlx5_flow_spec * spec,u16 kspi)1039 static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec,
1040 									 u16 kspi)
1041 {
1042        /* Add IPsec indicator in metadata_reg_a. */
1043        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1044 
1045        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1046                         misc_parameters_2.metadata_reg_a);
1047        MLX5_SET(fte_match_param, spec->match_value,
1048                 misc_parameters_2.metadata_reg_a,
1049                 MLX5_ETH_WQE_FT_META_IPSEC << 23 |  kspi);
1050 }
1051 
setup_fte_reg_a_no_tag(struct mlx5_flow_spec * spec)1052 static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec)
1053 {
1054        /* Add IPsec indicator in metadata_reg_a. */
1055        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1056 
1057        MLX5_SET(fte_match_param, spec->match_criteria,
1058                 misc_parameters_2.metadata_reg_a,
1059 				MLX5_ETH_WQE_FT_META_IPSEC << 23);
1060        MLX5_SET(fte_match_param, spec->match_value,
1061                 misc_parameters_2.metadata_reg_a,
1062                 0);
1063 }
1064 
setup_fte_reg_c0(struct mlx5_flow_spec * spec,u32 reqid)1065 static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
1066 {
1067 	/* Pass policy check before choosing this SA */
1068 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1069 
1070 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1071 			 misc_parameters_2.metadata_reg_c_0);
1072 	MLX5_SET(fte_match_param, spec->match_value,
1073 		 misc_parameters_2.metadata_reg_c_0, reqid);
1074 }
1075 
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1076 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1077 {
1078         switch (upspec->proto) {
1079         case IPPROTO_UDP:
1080                 if (upspec->dport) {
1081                         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
1082                                          spec->match_criteria, udp_dport);
1083                         MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1084                                  udp_dport, upspec->dport);
1085                 }
1086 
1087                 if (upspec->sport) {
1088                         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
1089                                          spec->match_criteria, udp_sport);
1090                         MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1091                                  udp_dport, upspec->sport);
1092                 }
1093                 break;
1094         case IPPROTO_TCP:
1095                 if (upspec->dport) {
1096                         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
1097                                          spec->match_criteria, tcp_dport);
1098                         MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1099                                  tcp_dport, upspec->dport);
1100                 }
1101 
1102                 if (upspec->sport) {
1103                         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
1104                                          spec->match_criteria, tcp_sport);
1105                         MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1106                                  tcp_dport, upspec->sport);
1107                 }
1108                 break;
1109         default:
1110                 return;
1111         }
1112 
1113         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1114 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1115 	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1116 }
1117 
tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_tx * tx,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)1118 static int tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry *sa_entry,
1119 							struct mlx5e_ipsec_tx *tx,
1120 							struct mlx5_flow_act *flow_act,
1121 							struct mlx5_flow_destination *dest,
1122 							int num_dest)
1123 {
1124 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1125 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1126 	struct mlx5_flow_handle *rule;
1127 	struct mlx5_flow_spec *spec;
1128 	int err;
1129 
1130 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1131 	if (!spec)
1132 		return -ENOMEM;
1133 
1134 	setup_fte_no_frags(spec);
1135 	setup_fte_reg_a_with_tag(spec, sa_entry->kspi);
1136 
1137 	if (sa_entry->vid != VLAN_NONE)
1138 		setup_fte_vid(spec, sa_entry->vid);
1139 	else
1140 		setup_fte_no_vid(spec);
1141 
1142 	rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, flow_act, dest, num_dest);
1143 	if (IS_ERR(rule)) {
1144 		err = PTR_ERR(rule);
1145 		mlx5_core_err(mdev, "fail to add TX ipsec kspi rule err=%d\n", err);
1146 		goto err_add_kspi_flow;
1147 	}
1148 	ipsec_rule->kspi_rule = rule;
1149 	kvfree(spec);
1150 	return 0;
1151 
1152 err_add_kspi_flow:
1153 	kvfree(spec);
1154 	return err;
1155 }
1156 
tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_tx * tx,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)1157 static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry,
1158 								struct mlx5e_ipsec_tx *tx,
1159 								struct mlx5_flow_act *flow_act,
1160 								struct mlx5_flow_destination *dest,
1161 								int num_dest)
1162 {
1163 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1164 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1165 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1166 	struct mlx5_flow_handle *rule;
1167 	struct mlx5_flow_spec *spec;
1168 	int err;
1169 
1170 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1171 	if (!spec)
1172 		return -ENOMEM;
1173 
1174 	flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1175 
1176 	if(attrs->reqid) {
1177 		if (sa_entry->vid != VLAN_NONE)
1178 			setup_fte_vid(spec, sa_entry->vid);
1179 		else
1180 			setup_fte_no_vid(spec);
1181 		setup_fte_no_frags(spec);
1182 		setup_fte_reg_c0(spec, attrs->reqid);
1183 		rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest);
1184 		if (IS_ERR(rule)) {
1185 			err = PTR_ERR(rule);
1186 			mlx5_core_err(mdev, "fail to add TX ipsec reqid rule err=%d\n", err);
1187 			goto err_add_reqid_rule;
1188 		}
1189 		ipsec_rule->reqid_rule = rule;
1190 		memset(spec, 0, sizeof(*spec));
1191 	}
1192 
1193 	if (sa_entry->vid != VLAN_NONE)
1194 		setup_fte_vid(spec, sa_entry->vid);
1195 	else
1196 		setup_fte_no_vid(spec);
1197 
1198 	if (attrs->family == AF_INET)
1199 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1200 	else
1201 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1202 	setup_fte_no_frags(spec);
1203 
1204 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest);
1205 	if (IS_ERR(rule)) {
1206 		err = PTR_ERR(rule);
1207 		mlx5_core_err(mdev, "fail to add TX ipsec ip rule err=%d\n", err);
1208 		goto err_add_ip_rule;
1209 	}
1210 	ipsec_rule->rule = rule;
1211 	kvfree(spec);
1212 	return 0;
1213 
1214 err_add_ip_rule:
1215 	mlx5_del_flow_rules(&ipsec_rule->reqid_rule);
1216 err_add_reqid_rule:
1217 	kvfree(spec);
1218 	return err;
1219 }
1220 
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1221 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1222 {
1223 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1224 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1225 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1226 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1227 	struct mlx5_flow_destination dest[2] = {};
1228 	struct mlx5_flow_act flow_act = {};
1229 	struct mlx5e_ipsec_tx *tx;
1230 	struct mlx5_fc *counter;
1231 	int err;
1232 
1233 	tx = tx_ft_get(mdev, ipsec);
1234 	if (IS_ERR(tx))
1235 		return PTR_ERR(tx);
1236 
1237 	err = setup_pkt_reformat(mdev, attrs, &flow_act);
1238 	if (err)
1239 		goto err_pkt_reformat;
1240 
1241 	counter = mlx5_fc_create(mdev, false);
1242 	if (IS_ERR(counter)) {
1243 		err = PTR_ERR(counter);
1244 		goto err_add_cnt;
1245 	}
1246 
1247 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1248         flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1249         flow_act.flags |= FLOW_ACT_NO_APPEND;
1250         flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
1251                            MLX5_FLOW_CONTEXT_ACTION_COUNT;
1252 
1253 	if (attrs->drop)
1254 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1255 	else
1256 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1257 
1258 	dest[0].ft = tx->ft.status;
1259 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1260 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1261 	dest[1].counter_id = mlx5_fc_id(counter);
1262 
1263 	err = tx_add_kspi_rule(sa_entry, tx, &flow_act, dest, 2);
1264 	if (err) {
1265 		goto err_add_kspi_rule;
1266 	}
1267 
1268 	err = tx_add_reqid_ip_rules(sa_entry, tx, &flow_act, dest, 2);
1269 	if (err) {
1270 		goto err_add_reqid_ip_rule;
1271 	}
1272 
1273 	ipsec_rule->fc = counter;
1274 	ipsec_rule->pkt_reformat = flow_act.pkt_reformat;
1275 	return 0;
1276 
1277 err_add_reqid_ip_rule:
1278 	mlx5_del_flow_rules(&ipsec_rule->kspi_rule);
1279 err_add_kspi_rule:
1280 	mlx5_fc_destroy(mdev, counter);
1281 err_add_cnt:
1282 	if (flow_act.pkt_reformat)
1283 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1284 err_pkt_reformat:
1285 	tx_ft_put(ipsec);
1286 	return err;
1287 }
1288 
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1289 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1290 {
1291         struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1292         struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1293         struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx;
1294         struct mlx5_flow_destination dest[2] = {};
1295         struct mlx5_flow_act flow_act = {};
1296         struct mlx5_flow_handle *rule;
1297         struct mlx5_flow_spec *spec;
1298         struct mlx5_flow_table *ft;
1299         int err, dstn = 0;
1300 
1301         ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio);
1302         if (IS_ERR(ft))
1303             return PTR_ERR(ft);
1304 
1305         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1306         if (!spec) {
1307             err = -ENOMEM;
1308             goto err_alloc;
1309         }
1310 
1311         if (attrs->family == AF_INET)
1312                 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1313         else
1314                 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1315 
1316         setup_fte_no_frags(spec);
1317 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1318 
1319         switch (attrs->action) {
1320         case IPSEC_POLICY_IPSEC:
1321                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1322                 err = setup_modify_header(mdev, attrs->reqid,
1323                                           IPSEC_DIR_OUTBOUND, &flow_act);
1324                 if (err)
1325                         goto err_mod_header;
1326                  break;
1327         case IPSEC_POLICY_DISCARD:
1328                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1329                                    MLX5_FLOW_CONTEXT_ACTION_COUNT;
1330                 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1331                 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
1332                 dstn++;
1333                 break;
1334         default:
1335                 err = -EINVAL;
1336                 goto err_mod_header;
1337         }
1338 
1339         if (attrs->vid != VLAN_NONE)
1340                 setup_fte_vid(spec, attrs->vid);
1341         else
1342                 setup_fte_no_vid(spec);
1343 
1344         flow_act.flags |= FLOW_ACT_NO_APPEND;
1345         dest[dstn].ft = tx->ft.sa;
1346         dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1347         dstn++;
1348         rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1349         if (IS_ERR(rule)) {
1350                 err = PTR_ERR(rule);
1351                 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1352                 goto err_action;
1353         }
1354 
1355         kvfree(spec);
1356         pol_entry->ipsec_rule.rule = rule;
1357         pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1358         return 0;
1359 
1360 err_action:
1361         if (flow_act.modify_hdr)
1362                 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1363 err_mod_header:
1364         kvfree(spec);
1365 err_alloc:
1366         tx_ft_put_policy(pol_entry->ipsec, attrs->prio);
1367         return err;
1368 }
1369 
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1370 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1371 {
1372         struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1373         struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1374 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1375         struct mlx5_flow_destination dest[2];
1376         struct mlx5_flow_act flow_act = {};
1377         struct mlx5_flow_handle *rule;
1378         struct mlx5_flow_spec *spec;
1379         struct mlx5_flow_table *ft;
1380         struct mlx5e_ipsec_rx *rx;
1381 	int err, dstn = 0;
1382 
1383         rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6;
1384         ft = rx->chains ? ipsec_chains_get_table(rx->chains, attrs->prio) : rx->ft.pol;
1385         if (IS_ERR(ft))
1386                 return PTR_ERR(ft);
1387 
1388         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1389         if (!spec) {
1390                 err = -ENOMEM;
1391                 goto err_alloc;
1392         }
1393 
1394         switch (attrs->action) {
1395         case IPSEC_POLICY_IPSEC:
1396                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1397                 break;
1398         case IPSEC_POLICY_DISCARD:
1399                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1400                 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1401                 dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
1402                 dstn++;
1403                 break;
1404         default:
1405                 err = -EINVAL;
1406                 goto err_action;
1407         }
1408 
1409         flow_act.flags |= FLOW_ACT_NO_APPEND;
1410         dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1411         dest[dstn].ft = rx->ft.sa;
1412         dstn++;
1413 
1414 	if (attrs->family == AF_INET)
1415 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1416 	else
1417 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1418 
1419 	setup_fte_no_frags(spec);
1420 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1421 	if (attrs->vid != VLAN_NONE)
1422 		setup_fte_vid(spec, attrs->vid);
1423 	else
1424 		setup_fte_no_vid(spec);
1425 
1426 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1427 	if (IS_ERR(rule)) {
1428 		err = PTR_ERR(rule);
1429 		mlx5_core_err(mdev,
1430 		    "Failed to add RX IPsec policy rule err=%d\n", err);
1431 		goto err_action;
1432 	}
1433 	pol_entry->ipsec_rule.rule = rule;
1434 
1435 	/* Add also rule for zero vid */
1436 	if (attrs->vid == VLAN_NONE) {
1437 		clear_fte_vid(spec);
1438 		setup_fte_vid(spec, 0);
1439 		rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1440 		if (IS_ERR(rule)) {
1441 			err = PTR_ERR(rule);
1442 			mlx5_core_err(mdev,
1443 			    "Failed to add RX IPsec policy rule err=%d\n",
1444 			    err);
1445 			goto err_action;
1446 		}
1447 		pol_entry->ipsec_rule.vid_zero_rule = rule;
1448 	}
1449 
1450 	kvfree(spec);
1451         return 0;
1452 
1453 err_action:
1454 	if (pol_entry->ipsec_rule.rule != NULL)
1455 		mlx5_del_flow_rules(&pol_entry->ipsec_rule.rule);
1456 	kvfree(spec);
1457 err_alloc:
1458         if (rx->chains != NULL)
1459                 ipsec_chains_put_table(rx->chains, attrs->prio);
1460         return err;
1461 }
1462 
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)1463 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
1464 {
1465 	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
1466 	struct mlx5_core_dev *mdev = ipsec->mdev;
1467 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
1468 
1469 	mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
1470 	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
1471 	kfree(rx_ipv4->fc);
1472 	mlx5_fc_destroy(mdev, tx->fc->drop);
1473 	mlx5_fc_destroy(mdev, tx->fc->cnt);
1474 	kfree(tx->fc);
1475 }
1476 
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)1477 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
1478 {
1479 	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
1480 	struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
1481 	struct mlx5_core_dev *mdev = ipsec->mdev;
1482 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
1483 	struct mlx5e_ipsec_fc *fc;
1484 	struct mlx5_fc *counter;
1485 	int err;
1486 
1487 	fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
1488 	if (!fc)
1489 		return -ENOMEM;
1490 
1491 	tx->fc = fc;
1492 	counter = mlx5_fc_create(mdev, false);
1493 	if (IS_ERR(counter)) {
1494 		err = PTR_ERR(counter);
1495 		goto err_tx_fc_alloc;
1496 	}
1497 
1498 	fc->cnt = counter;
1499 	counter = mlx5_fc_create(mdev, false);
1500 	if (IS_ERR(counter)) {
1501 		err = PTR_ERR(counter);
1502 		goto err_tx_fc_cnt;
1503 	}
1504 
1505 	fc->drop = counter;
1506 
1507 	fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
1508 	if (!fc) {
1509 		err = -ENOMEM;
1510 		goto err_tx_fc_drop;
1511 	}
1512 
1513 	/* Both IPv4 and IPv6 point to same flow counters struct. */
1514 	rx_ipv4->fc = fc;
1515 	rx_ipv6->fc = fc;
1516 	counter = mlx5_fc_create(mdev, false);
1517 	if (IS_ERR(counter)) {
1518 		err = PTR_ERR(counter);
1519 		goto err_rx_fc_alloc;
1520 	}
1521 
1522 	fc->cnt = counter;
1523 	counter = mlx5_fc_create(mdev, false);
1524 	if (IS_ERR(counter)) {
1525 		err = PTR_ERR(counter);
1526 		goto err_rx_fc_cnt;
1527 	}
1528 
1529 	fc->drop = counter;
1530 	return 0;
1531 
1532 err_rx_fc_cnt:
1533 	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
1534 err_rx_fc_alloc:
1535 	kfree(rx_ipv4->fc);
1536 err_tx_fc_drop:
1537 	mlx5_fc_destroy(mdev, tx->fc->drop);
1538 err_tx_fc_cnt:
1539 	mlx5_fc_destroy(mdev, tx->fc->cnt);
1540 err_tx_fc_alloc:
1541 	kfree(tx->fc);
1542 	return err;
1543 }
1544 
ipsec_status_rule(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)1545 static int ipsec_status_rule(struct mlx5_core_dev *mdev,
1546 			     struct mlx5e_ipsec_rx *rx,
1547 			     struct mlx5_flow_destination *dest)
1548 {
1549 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1550 	struct mlx5_flow_act flow_act = {};
1551 	struct mlx5_modify_hdr *modify_hdr;
1552 	struct mlx5_flow_handle *rule;
1553 	struct mlx5_flow_spec *spec;
1554 	int err;
1555 
1556 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1557 	if (!spec)
1558 		return -ENOMEM;
1559 
1560 	/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
1561 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
1562 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
1563 	MLX5_SET(copy_action_in, action, src_offset, 0);
1564 	MLX5_SET(copy_action_in, action, length, 7);
1565 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1566 	MLX5_SET(copy_action_in, action, dst_offset, 24);
1567 
1568 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1569 					      1, action);
1570 
1571 	if (IS_ERR(modify_hdr)) {
1572 		err = PTR_ERR(modify_hdr);
1573 		mlx5_core_err(mdev,
1574 			      "fail to alloc ipsec copy modify_header_id err=%d\n", err);
1575 		goto out_spec;
1576 	}
1577 
1578 	/* create fte */
1579 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1580 		MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1581 		MLX5_FLOW_CONTEXT_ACTION_COUNT;
1582 	flow_act.modify_hdr = modify_hdr;
1583 
1584 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
1585 	if (IS_ERR(rule)) {
1586 		err = PTR_ERR(rule);
1587 		mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
1588 		goto out;
1589 	}
1590 
1591 	kvfree(spec);
1592 	rx->status.rule = rule;
1593 	rx->status.modify_hdr = modify_hdr;
1594 	return 0;
1595 
1596 out:
1597 	mlx5_modify_header_dealloc(mdev, modify_hdr);
1598 out_spec:
1599 	kvfree(spec);
1600 	return err;
1601 }
1602 
ipsec_fs_rx_roce_rules_destroy(struct mlx5e_ipsec_rx_roce * rx_roce)1603 static void ipsec_fs_rx_roce_rules_destroy(struct mlx5e_ipsec_rx_roce *rx_roce)
1604 {
1605 	if (!rx_roce->ns_rdma)
1606 		return;
1607 
1608 	mlx5_del_flow_rules(&rx_roce->roce_miss.rule);
1609 	mlx5_del_flow_rules(&rx_roce->rule);
1610 	mlx5_destroy_flow_group(rx_roce->roce_miss.group);
1611 	mlx5_destroy_flow_group(rx_roce->g);
1612 }
1613 
ipsec_fs_rx_catchall_rules_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx)1614 static void ipsec_fs_rx_catchall_rules_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
1615 {
1616 	mutex_lock(&rx->ft.mutex);
1617 	mlx5_del_flow_rules(&rx->sa.rule);
1618 	mlx5_destroy_flow_group(rx->sa.group);
1619 	if (rx->chains == NULL) {
1620 		mlx5_del_flow_rules(&rx->pol.rule);
1621 		mlx5_destroy_flow_group(rx->pol.group);
1622 	}
1623 	mlx5_del_flow_rules(&rx->status.rule);
1624 	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
1625 	ipsec_fs_rx_roce_rules_destroy(&rx->roce);
1626 	mutex_unlock(&rx->ft.mutex);
1627 }
1628 
ipsec_fs_rx_roce_table_destroy(struct mlx5e_ipsec_rx_roce * rx_roce)1629 static void ipsec_fs_rx_roce_table_destroy(struct mlx5e_ipsec_rx_roce *rx_roce)
1630 {
1631 	if (!rx_roce->ns_rdma)
1632 		return;
1633 
1634 	mlx5_destroy_flow_table(rx_roce->ft_rdma);
1635 	mlx5_destroy_flow_table(rx_roce->ft);
1636 }
1637 
1638 static void
ipsec_fs_rx_ip_type_catchall_rule_destroy(struct mlx5e_ipsec_rx_ip_type * rx_ip_type)1639 ipsec_fs_rx_ip_type_catchall_rule_destroy(struct mlx5e_ipsec_rx_ip_type* rx_ip_type)
1640 {
1641 	mlx5_del_flow_rules(&rx_ip_type->ipv4_rule);
1642 	mlx5_del_flow_rules(&rx_ip_type->ipv6_rule);
1643 	mlx5_del_flow_rules(&rx_ip_type->miss.rule);
1644 	mlx5_destroy_flow_group(rx_ip_type->miss.group);
1645 	rx_ip_type->miss.group = NULL;
1646 }
1647 
ipsec_fs_rx_table_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx)1648 static void ipsec_fs_rx_table_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
1649 {
1650 	if (rx->chains) {
1651 		ipsec_chains_destroy(rx->chains);
1652 	} else {
1653 		mlx5_del_flow_rules(&rx->pol.rule);
1654 		mlx5_destroy_flow_table(rx->ft.pol);
1655         }
1656 	mlx5_destroy_flow_table(rx->ft.sa);
1657 	mlx5_destroy_flow_table(rx->ft.status);
1658 	ipsec_fs_rx_roce_table_destroy(&rx->roce);
1659 }
1660 
ipsec_roce_setup_udp_dport(struct mlx5_flow_spec * spec,u16 dport)1661 static void ipsec_roce_setup_udp_dport(struct mlx5_flow_spec *spec, u16 dport)
1662 {
1663 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1664 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1665 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
1666 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
1667 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
1668 }
1669 
ipsec_roce_rx_rule_setup(struct mlx5_flow_destination * default_dst,struct mlx5e_ipsec_rx_roce * roce,struct mlx5_core_dev * mdev)1670 static int ipsec_roce_rx_rule_setup(struct mlx5_flow_destination *default_dst,
1671 				    struct mlx5e_ipsec_rx_roce *roce, struct mlx5_core_dev *mdev)
1672 {
1673 	struct mlx5_flow_destination dst = {};
1674 	struct mlx5_flow_act flow_act = {};
1675 	struct mlx5_flow_handle *rule;
1676 	struct mlx5_flow_spec *spec;
1677 	int err = 0;
1678 
1679 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1680 	if (!spec)
1681 		return -ENOMEM;
1682 
1683 	ipsec_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
1684 
1685 	//flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;//not needed it is added in command
1686 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
1687 	dst.ft = roce->ft_rdma;
1688 
1689 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1690 	rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
1691 	if (IS_ERR(rule)) {
1692 		err = PTR_ERR(rule);
1693 		mlx5_core_err(mdev, "Fail to add RX roce ipsec rule err=%d\n",
1694 			      err);
1695 		goto fail_add_rule;
1696 	}
1697 
1698 	roce->rule = rule;
1699 
1700 	rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, default_dst, 1);
1701 	if (IS_ERR(rule)) {
1702 		err = PTR_ERR(rule);
1703 		mlx5_core_err(mdev, "Fail to add RX roce ipsec miss rule err=%d\n",
1704 			      err);
1705 		goto fail_add_default_rule;
1706 	}
1707 
1708 	roce->roce_miss.rule = rule;
1709 
1710 	kvfree(spec);
1711 	return 0;
1712 
1713 fail_add_default_rule:
1714 	mlx5_del_flow_rules(&roce->rule);
1715 fail_add_rule:
1716 	kvfree(spec);
1717 	return err;
1718 }
1719 
ipsec_roce_rx_rules(struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * defdst,struct mlx5_core_dev * mdev)1720 static int ipsec_roce_rx_rules(struct mlx5e_ipsec_rx *rx, struct mlx5_flow_destination *defdst,
1721 			       struct mlx5_core_dev *mdev)
1722 {
1723 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1724 	struct mlx5_flow_group *g;
1725 	void *outer_headers_c;
1726 	u32 *in;
1727 	int err = 0;
1728 	int ix = 0;
1729 	u8 *mc;
1730 
1731 	if (!rx->roce.ns_rdma)
1732 		return 0;
1733 
1734 	in = kvzalloc(inlen, GFP_KERNEL);
1735 	if (!in)
1736 		return -ENOMEM;
1737 
1738 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1739 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
1740 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
1741 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
1742 
1743 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1744 	MLX5_SET_CFG(in, start_flow_index, ix);
1745 	ix += 1;
1746 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1747 	g = mlx5_create_flow_group(rx->roce.ft, in);
1748 	if (IS_ERR(g)) {
1749 		err = PTR_ERR(g);
1750 		mlx5_core_err(mdev, "Fail to create ipsec rx roce group at nic err=%d\n", err);
1751 		goto fail_group;
1752 	}
1753 	rx->roce.g = g;
1754 
1755 	memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
1756 	MLX5_SET_CFG(in, start_flow_index, ix);
1757 	ix += 1;
1758 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1759 	g = mlx5_create_flow_group(rx->roce.ft, in);
1760 	if (IS_ERR(g)) {
1761 		err = PTR_ERR(g);
1762 		mlx5_core_err(mdev, "Fail to create ipsec rx roce miss group at nic err=%d\n",
1763 			      err);
1764 		goto fail_mgroup;
1765 	}
1766 	rx->roce.roce_miss.group = g;
1767 
1768 	err = ipsec_roce_rx_rule_setup(defdst, &rx->roce, mdev);
1769 	if (err)
1770 		goto fail_setup_rule;
1771 
1772 	kvfree(in);
1773 	return 0;
1774 
1775 fail_setup_rule:
1776 	mlx5_destroy_flow_group(rx->roce.roce_miss.group);
1777 fail_mgroup:
1778 	mlx5_destroy_flow_group(rx->roce.g);
1779 fail_group:
1780 	kvfree(in);
1781 	return err;
1782 }
1783 
ipsec_fs_rx_catchall_rules(struct mlx5e_priv * priv,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * defdst)1784 static int ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv,
1785 				      struct mlx5e_ipsec_rx *rx,
1786 				      struct mlx5_flow_destination *defdst)
1787 {
1788 	struct mlx5_core_dev *mdev = priv->mdev;
1789 	struct mlx5_flow_destination dest[2] = {};
1790 	int err = 0;
1791 
1792 	mutex_lock(&rx->ft.mutex);
1793 	/* IPsec RoCE RX rules */
1794 	err = ipsec_roce_rx_rules(rx, defdst, mdev);
1795 	if (err)
1796 		goto out;
1797 
1798 	/* IPsec Rx IP Status table rule */
1799 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1800 	if (rx->roce.ft)
1801 		dest[0].ft = rx->roce.ft;
1802 	else
1803 		dest[0].ft = priv->fts.vlan.t;
1804 
1805 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1806         dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
1807         err = ipsec_status_rule(mdev, rx, dest);
1808         if (err)
1809                 goto err_roce_rules_destroy;
1810 
1811 	if (!rx->chains) {
1812 		/* IPsec Rx IP policy default miss rule */
1813 		err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, defdst);
1814 		if (err)
1815 			goto err_status_rule_destroy;
1816 	}
1817 
1818 	/* FIXME: This is workaround to current design
1819 	 * which installs SA on firt packet. So we need to forward this
1820 	 * packet to the stack. It doesn't work with RoCE and eswitch traffic,
1821 	 */
1822 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, defdst);
1823 	if (err)
1824 		goto err_status_sa_rule_destroy;
1825 
1826 	mutex_unlock(&rx->ft.mutex);
1827 	return 0;
1828 
1829 err_status_sa_rule_destroy:
1830 	if (!rx->chains) {
1831 		mlx5_del_flow_rules(&rx->pol.rule);
1832 		mlx5_destroy_flow_group(rx->pol.group);
1833 	}
1834 err_status_rule_destroy:
1835 	mlx5_del_flow_rules(&rx->status.rule);
1836 	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
1837 err_roce_rules_destroy:
1838 	ipsec_fs_rx_roce_rules_destroy(&rx->roce);
1839 out:
1840 	mutex_unlock(&rx->ft.mutex);
1841 	return err;
1842 }
1843 
ipsec_fs_rx_roce_tables_create(struct mlx5e_ipsec_rx * rx,int rx_init_level,int rdma_init_level)1844 static int ipsec_fs_rx_roce_tables_create(struct mlx5e_ipsec_rx *rx,
1845 					  int rx_init_level, int rdma_init_level)
1846 {
1847 	struct mlx5_flow_table_attr ft_attr = {};
1848 	struct mlx5_flow_table *ft;
1849 	int err = 0;
1850 
1851 	if (!rx->roce.ns_rdma)
1852 		return 0;
1853 
1854 	ft_attr.max_fte = 2;
1855 	ft_attr.level = rx_init_level;
1856 	ft = mlx5_create_flow_table(rx->ns, &ft_attr);
1857 	if (IS_ERR(ft)) {
1858 		err = PTR_ERR(ft);
1859 		return err;
1860 	}
1861 	rx->roce.ft = ft;
1862 
1863 	ft_attr.max_fte = 0;
1864 	ft_attr.level = rdma_init_level;
1865 	ft = mlx5_create_flow_table(rx->roce.ns_rdma, &ft_attr);
1866 	if (IS_ERR(ft)) {
1867 		err = PTR_ERR(ft);
1868 		goto out;
1869 	}
1870 	rx->roce.ft_rdma = ft;
1871 
1872 	return 0;
1873 out:
1874 	mlx5_destroy_flow_table(rx->roce.ft);
1875 	rx->roce.ft = NULL;
1876 	return err;
1877 }
1878 
1879 static int
ipsec_fs_rx_ip_type_catchall_rules_create(struct mlx5e_priv * priv,struct mlx5_flow_destination * defdst)1880 ipsec_fs_rx_ip_type_catchall_rules_create(struct mlx5e_priv *priv,
1881                                           struct mlx5_flow_destination *defdst)
1882 {
1883 	struct mlx5_core_dev *mdev = priv->mdev;
1884 	struct mlx5e_ipsec *ipsec = priv->ipsec;
1885 	struct mlx5_flow_destination dst = {};
1886 	struct mlx5_flow_act flow_act = {};
1887 	struct mlx5_flow_handle *rule;
1888 	struct mlx5_flow_spec *spec;
1889 	int err = 0;
1890 
1891 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1892 	if (!spec) {
1893 		return -ENOMEM;
1894 	}
1895 	dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1896 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1897 
1898 	/* Set rule for ipv4 packets */
1899 	dst.ft = ipsec->rx_ipv4->ft.pol;
1900 	setup_fte_ip_version(spec, AF_INET);
1901 	rule = mlx5_add_flow_rules(ipsec->rx_ip_type->ft, spec, &flow_act, &dst, 1);
1902 	if (IS_ERR(rule)) {
1903 		err = PTR_ERR(rule);
1904 		mlx5_core_err(mdev, "Failed to add ipv4 rule to ip_type table err=%d\n",
1905 			      err);
1906 		goto out;
1907 	}
1908 	ipsec->rx_ip_type->ipv4_rule = rule;
1909 
1910 	/* Set rule for ipv6 packets */
1911 	dst.ft = ipsec->rx_ipv6->ft.pol;
1912 	setup_fte_ip_version(spec, AF_INET6);
1913 	rule = mlx5_add_flow_rules(ipsec->rx_ip_type->ft, spec, &flow_act, &dst, 1);
1914 	if (IS_ERR(rule)) {
1915 		err = PTR_ERR(rule);
1916 		mlx5_core_err(mdev, "Failed to add ipv6 rule to ip_type table err=%d\n",
1917 			      err);
1918 		goto fail_add_ipv6_rule;
1919 	}
1920 	ipsec->rx_ip_type->ipv6_rule = rule;
1921 
1922 	/* set miss rule */
1923 	err = ipsec_miss_create(mdev, ipsec->rx_ip_type->ft, &ipsec->rx_ip_type->miss, defdst);
1924 	if (err) {
1925 		mlx5_core_err(mdev, "Failed to add miss rule to ip_type table err=%d\n",
1926 			          err);
1927 		goto fail_miss_rule;
1928 	}
1929 
1930 	goto out;
1931 
1932 fail_miss_rule:
1933 	mlx5_del_flow_rules(&ipsec->rx_ip_type->ipv6_rule);
1934 fail_add_ipv6_rule:
1935 	mlx5_del_flow_rules(&ipsec->rx_ip_type->ipv4_rule);
1936 out:
1937 	kvfree(spec);
1938 	return err;
1939 }
1940 
1941 static int
ipsec_fs_rx_ip_type_table_create(struct mlx5e_priv * priv,int level)1942 ipsec_fs_rx_ip_type_table_create(struct mlx5e_priv *priv,
1943                                  int level)
1944 {
1945 	struct mlx5e_ipsec *ipsec = priv->ipsec;
1946 	struct mlx5_flow_table *ft;
1947 	int err = 0;
1948 
1949 	/* Create rx ip type table */
1950 	ft = ipsec_rx_ft_create(ipsec->rx_ip_type->ns, level, 0, 1);
1951 	if (IS_ERR(ft)) {
1952 		err = PTR_ERR(ft);
1953 		goto out;
1954 	}
1955 	ipsec->rx_ip_type->ft = ft;
1956 
1957 	priv->fts.ipsec_ft = priv->ipsec->rx_ip_type->ft;
1958 
1959 out:
1960 	return err;
1961 }
1962 
ipsec_fs_rx_table_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx,int rx_init_level,int rdma_init_level)1963 static int ipsec_fs_rx_table_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx,
1964 				    int rx_init_level, int rdma_init_level)
1965 {
1966 	struct mlx5_flow_namespace *ns = rx->ns;
1967 	struct mlx5_flow_table *ft;
1968 	int err = 0;
1969 
1970 	mutex_lock(&rx->ft.mutex);
1971 
1972 	/* IPsec Rx IP SA table create */
1973 	ft = ipsec_rx_ft_create(ns, rx_init_level + 1, 0, 1);
1974 	if (IS_ERR(ft)) {
1975 		err = PTR_ERR(ft);
1976 		goto out;
1977 	}
1978 	rx->ft.sa = ft;
1979 
1980 	/* IPsec Rx IP Status table create */
1981 	ft = ipsec_rx_ft_create(ns, rx_init_level + 2, 0, 1);
1982 	if (IS_ERR(ft)) {
1983 		err = PTR_ERR(ft);
1984 		goto err_sa_table_destroy;
1985 	}
1986 	rx->ft.status = ft;
1987 
1988 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
1989 		rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
1990 				MLX5_FLOW_NAMESPACE_KERNEL, 0,
1991 				rx_init_level, &rx->ft.pol);
1992 		if (IS_ERR(rx->chains)) {
1993 			err = PTR_ERR(rx->chains);
1994 			goto err_status_table_destroy;
1995 		}
1996 	} else {
1997 		ft = ipsec_rx_ft_create(ns, rx_init_level, 0, 1);
1998 		if (IS_ERR(ft)) {
1999 			err = PTR_ERR(ft);
2000 			goto err_status_table_destroy;
2001 		}
2002 		rx->ft.pol = ft;
2003 	}
2004 
2005 	/* IPsec RoCE RX tables create*/
2006 	err = ipsec_fs_rx_roce_tables_create(rx, rx_init_level + 3,
2007 					     rdma_init_level);
2008 	if (err)
2009 		goto err_pol_table_destroy;
2010 
2011 	goto out;
2012 
2013 err_pol_table_destroy:
2014 	mlx5_destroy_flow_table(rx->ft.pol);
2015 err_status_table_destroy:
2016 	mlx5_destroy_flow_table(rx->ft.status);
2017 err_sa_table_destroy:
2018 	mlx5_destroy_flow_table(rx->ft.sa);
2019 out:
2020 	mutex_unlock(&rx->ft.mutex);
2021 	return err;
2022 }
2023 
2024 #define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
2025 
mlx5e_accel_ipsec_fs_init_roce(struct mlx5e_ipsec * ipsec)2026 static void mlx5e_accel_ipsec_fs_init_roce(struct mlx5e_ipsec *ipsec)
2027 {
2028 	struct mlx5_core_dev *mdev = ipsec->mdev;
2029 	struct mlx5_flow_namespace *ns;
2030 
2031 	if ((MLX5_CAP_GEN_2(ipsec->mdev, flow_table_type_2_type) &
2032 	      NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) {
2033 		mlx5_core_dbg(mdev, "Failed to init roce ns, capabilities not supported\n");
2034 		return;
2035 	}
2036 
2037 	ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
2038 	if (!ns) {
2039 		mlx5_core_err(mdev, "Failed to init roce rx ns\n");
2040 		return;
2041 	}
2042 
2043 	ipsec->rx_ipv4->roce.ns_rdma = ns;
2044 	ipsec->rx_ipv6->roce.ns_rdma = ns;
2045 
2046 	ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
2047 	if (!ns) {
2048 		ipsec->rx_ipv4->roce.ns_rdma = NULL;
2049 		ipsec->rx_ipv6->roce.ns_rdma = NULL;
2050 		mlx5_core_err(mdev, "Failed to init roce tx ns\n");
2051 		return;
2052 	}
2053 
2054 	ipsec->tx->roce.ns = ns;
2055 }
2056 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2057 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2058 {
2059 	if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND)
2060 		return tx_add_rule(sa_entry);
2061 
2062 	return rx_add_rule(sa_entry);
2063 }
2064 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2065 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2066 {
2067 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
2068 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2069 
2070 	mlx5_del_flow_rules(&ipsec_rule->rule);
2071 	mlx5_del_flow_rules(&ipsec_rule->kspi_rule);
2072 	if (ipsec_rule->vid_zero_rule != NULL)
2073 		mlx5_del_flow_rules(&ipsec_rule->vid_zero_rule);
2074 	if (ipsec_rule->reqid_rule != NULL)
2075 		mlx5_del_flow_rules(&ipsec_rule->reqid_rule);
2076 	mlx5_fc_destroy(mdev, ipsec_rule->fc);
2077 	mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
2078 	if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND) {
2079 		tx_ft_put(sa_entry->ipsec);
2080 		return;
2081 	}
2082 
2083 	if (ipsec_rule->modify_hdr != NULL)
2084 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2085 }
2086 
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2087 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2088 {
2089 	if (pol_entry->attrs.dir == IPSEC_DIR_OUTBOUND)
2090 		return tx_add_policy(pol_entry);
2091 
2092 	return rx_add_policy(pol_entry);
2093 }
2094 
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2095 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2096 {
2097 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
2098 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2099 
2100 	mlx5_del_flow_rules(&ipsec_rule->rule);
2101 	if (ipsec_rule->vid_zero_rule != NULL)
2102 		mlx5_del_flow_rules(&ipsec_rule->vid_zero_rule);
2103 
2104 	if (pol_entry->attrs.dir == IPSEC_DIR_INBOUND) {
2105 		struct mlx5e_ipsec_rx *rx;
2106 
2107                 rx = (pol_entry->attrs.family == AF_INET)
2108                          ? pol_entry->ipsec->rx_ipv4
2109                          : pol_entry->ipsec->rx_ipv6;
2110                 if (rx->chains)
2111                         ipsec_chains_put_table(rx->chains,
2112                                                pol_entry->attrs.prio);
2113                 return;
2114 	}
2115 
2116 	if (ipsec_rule->modify_hdr)
2117 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2118 
2119 	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio);
2120 }
2121 
mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv * priv)2122 void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv)
2123 {
2124 	/* Check if IPsec supported */
2125 	if (!priv->ipsec)
2126 		return;
2127 
2128 	ipsec_fs_rx_ip_type_catchall_rule_destroy(priv->ipsec->rx_ip_type);
2129 	ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv4);
2130 	ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6);
2131 }
2132 
mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv * priv)2133 int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv)
2134 {
2135 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2136 	struct mlx5_flow_destination dest = {};
2137 	int err = 0;
2138 
2139 	/* Check if IPsec supported */
2140 	if (!ipsec)
2141 		return 0;
2142 
2143 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2144 	dest.ft = priv->fts.vlan.t;
2145 	err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv6, &dest);
2146 	if (err)
2147 		goto out;
2148 
2149 	err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv4, &dest);
2150 	if (err)
2151 		ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6);
2152 
2153 	err = ipsec_fs_rx_ip_type_catchall_rules_create(priv, &dest);
2154 	if (err) {
2155 		ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6);
2156 		ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv4);
2157 	}
2158 
2159 out:
2160 	return err;
2161 }
2162 
mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv * priv)2163 void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv)
2164 {
2165 	struct mlx5_core_dev *mdev = priv->mdev;
2166 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2167 
2168 	/* Check if IPsec supported */
2169 	if (!ipsec)
2170 		return;
2171 
2172 	mlx5_destroy_flow_table(ipsec->rx_ip_type->ft);
2173 	ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv6);
2174 	ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv4);
2175 }
2176 
mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv * priv)2177 int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv)
2178 {
2179 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2180 	int err = 0;
2181 
2182 	/* Check if IPsec supported */
2183 	if (!ipsec)
2184 		return 0;
2185 
2186 	err = ipsec_fs_rx_ip_type_table_create(priv, 0);
2187 	if (err)
2188 		return err;
2189 
2190 	err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv4, 1, 0);
2191 	if (err)
2192 		goto err_ipv4_table;
2193 
2194 	err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv6, 5, 1);
2195 	if (err)
2196 		goto err_ipv6_table;
2197 
2198 	return 0;
2199 
2200 err_ipv6_table:
2201 	ipsec_fs_rx_table_destroy(priv->mdev, ipsec->rx_ipv4);
2202 err_ipv4_table:
2203 	mlx5_destroy_flow_table(ipsec->rx_ip_type->ft);
2204 	return err;
2205 }
2206 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)2207 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
2208 {
2209 	WARN_ON(ipsec->tx->ft.refcnt);
2210 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
2211 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
2212 	mutex_destroy(&ipsec->tx->ft.mutex);
2213 	ipsec_fs_destroy_counters(ipsec);
2214 	kfree(ipsec->rx_ip_type);
2215 	kfree(ipsec->rx_ipv6);
2216 	kfree(ipsec->rx_ipv4);
2217 	kfree(ipsec->tx);
2218 }
2219 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec)2220 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
2221 {
2222 	struct mlx5_flow_namespace *tns, *rns;
2223 	int err = -ENOMEM;
2224 
2225 	tns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2226 	if (!tns)
2227 		return -EOPNOTSUPP;
2228 
2229 	rns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2230 	if (!rns)
2231 		return -EOPNOTSUPP;
2232 
2233 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2234 	if (!ipsec->tx)
2235 		return -ENOMEM;
2236 
2237 	ipsec->rx_ip_type = kzalloc(sizeof(*ipsec->rx_ip_type), GFP_KERNEL);
2238 	if (!ipsec->rx_ip_type)
2239 		goto err_tx;
2240 
2241 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2242 	if (!ipsec->rx_ipv4)
2243 		goto err_ip_type;
2244 
2245 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2246 	if (!ipsec->rx_ipv6)
2247 		goto err_rx_ipv4;
2248 
2249 	err = ipsec_fs_init_counters(ipsec);
2250 	if (err)
2251 		goto err_rx_ipv6;
2252 
2253 	ipsec->tx->ns = tns;
2254 	mutex_init(&ipsec->tx->ft.mutex);
2255 	ipsec->rx_ip_type->ns = rns;
2256 	ipsec->rx_ipv4->ns = rns;
2257 	ipsec->rx_ipv6->ns = rns;
2258 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
2259 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
2260 
2261 	mlx5e_accel_ipsec_fs_init_roce(ipsec);
2262 
2263 	return 0;
2264 
2265 err_rx_ipv6:
2266 	kfree(ipsec->rx_ipv6);
2267 err_rx_ipv4:
2268 	kfree(ipsec->rx_ipv4);
2269 err_ip_type:
2270 	kfree(ipsec->rx_ip_type);
2271 err_tx:
2272 	kfree(ipsec->tx);
2273 	return err;
2274 }
2275 
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2276 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2277 {
2278         struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2279         int err;
2280 
2281         memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2282         memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2283 
2284         err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2285         if (err)
2286                 return;
2287         mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2288         memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2289 }
2290