xref: /freebsd/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c (revision b762b199afc6ed56ac95ca19c7fc29c2927fa85c)
1 /*-
2  * Copyright (c) 2023 NVIDIA corporation & affiliates.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 
27 #include "opt_ipsec.h"
28 
29 #include <sys/types.h>
30 #include <netinet/in.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <net/pfkeyv2.h>
35 #include <netipsec/key_var.h>
36 #include <netipsec/keydb.h>
37 #include <netipsec/ipsec.h>
38 #include <netipsec/xform.h>
39 #include <netipsec/ipsec_offload.h>
40 #include <dev/mlx5/fs.h>
41 #include <dev/mlx5/mlx5_en/en.h>
42 #include <dev/mlx5/qp.h>
43 #include <dev/mlx5/mlx5_accel/ipsec.h>
44 #include <dev/mlx5/mlx5_core/fs_core.h>
45 #include <dev/mlx5/mlx5_core/fs_chains.h>
46 
47 /*
48  * TX tables are organized differently for Ethernet and for RoCE:
49  *
50  *                       +=========+
51  *       Ethernet Tx     | SA KSPI | match
52  * --------------------->|Flowtable|----->+         +
53  *                       |         |\     |        / \
54  *                       +=========+ |    |       /   \         +=========+     +=========+
55  *                              miss |    |      /     \        |  Status |     |         |
56  *                      DROP<--------+    |---->|Encrypt|------>|Flowtable|---->|  TX NS  |
57  *                                        |      \     /        |         |     |         |
58  *                                        |       \   /         +=========+     +=========+
59  *       +=========+      +=========+     |        \ /               |
60  *  RoCE |  Policy | match|SA ReqId |match|         +                |
61  *  Tx   |Flowtable|----->|Flowtable|---->+                          |
62  *  ---->|IP header|      |ReqId+IP |                                |
63  *       |         |      | header  |--------------------------------+
64  *       +=========+      +=========+         miss                   |
65  *            |                                                      |
66  *            |                   miss                               |
67  *            +-------------------------------------------------------
68  *
69  *                                                                                  +=========+
70  *                                                                                  |   RDMA  |
71  *                                                                                  |Flowtable|
72  *                                                                                  |         |
73  * Rx Tables and rules:                                                             +=========+
74  *                                             +                                        /
75  *       +=========+      +=========+         / \         +=========+      +=========+ /match
76  *       |  Policy |      |   SA    |        /   \        |  Status |      |  RoCE   |/
77  *  ---->|Flowtable| match|Flowtable| match /     \       |Flowtable|----->|Flowtable|
78  *       |IP header|----->|IP header|----->|Decrypt|----->|         |      | Roce V2 |
79  *       |         |      |+ESP+SPI |       \     /       |         |      | UDP port|\
80  *       +=========+      +=========+        \   /        +=========+      +=========+ \miss
81  *             |               |              \ /                                       \
82  *             |               |               +                                      +=========+
83  *             |     miss      |          miss                                       | Ethernet|
84  *             +--------------->---------------------------------------------------->|  RX NS  |
85  *                                                                                   |         |
86  *                                                                                   +=========+
87  *
88  */
89 
90 #define NUM_IPSEC_FTE BIT(15)
91 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
92 
93 struct mlx5e_ipsec_fc {
94 	struct mlx5_fc *cnt;
95 	struct mlx5_fc *drop;
96 };
97 
98 struct mlx5e_ipsec_ft {
99 	struct mutex mutex; /* Protect changes to this struct */
100 	struct mlx5_flow_table *pol;
101 	struct mlx5_flow_table *sa_kspi;
102 	struct mlx5_flow_table *sa;
103 	struct mlx5_flow_table *status;
104 	u32 refcnt;
105 };
106 
107 struct mlx5e_ipsec_tx_roce {
108 	struct mlx5_flow_group *g;
109 	struct mlx5_flow_table *ft;
110 	struct mlx5_flow_handle *rule;
111 	struct mlx5_flow_namespace *ns;
112 };
113 
114 struct mlx5e_ipsec_miss {
115 	struct mlx5_flow_group *group;
116 	struct mlx5_flow_handle *rule;
117 };
118 
119 struct mlx5e_ipsec_tx {
120 	struct mlx5e_ipsec_ft ft;
121 	struct mlx5e_ipsec_miss pol;
122 	struct mlx5e_ipsec_miss kspi_miss;
123 	struct mlx5e_ipsec_rule status;
124 	struct mlx5e_ipsec_rule kspi_bypass_rule; /*rule for IPSEC bypass*/
125 	struct mlx5_flow_namespace *ns;
126 	struct mlx5e_ipsec_fc *fc;
127 	struct mlx5_fs_chains *chains;
128 	struct mlx5e_ipsec_tx_roce roce;
129 };
130 
131 struct mlx5e_ipsec_rx_roce {
132 	struct mlx5_flow_group *g;
133 	struct mlx5_flow_table *ft;
134 	struct mlx5_flow_handle *rule;
135 	struct mlx5e_ipsec_miss roce_miss;
136 
137 	struct mlx5_flow_table *ft_rdma;
138 	struct mlx5_flow_namespace *ns_rdma;
139 };
140 
141 struct mlx5e_ipsec_rx {
142 	struct mlx5e_ipsec_ft ft;
143 	struct mlx5e_ipsec_miss pol;
144 	struct mlx5e_ipsec_miss sa;
145 	struct mlx5e_ipsec_rule status;
146 	struct mlx5_flow_namespace *ns;
147 	struct mlx5e_ipsec_fc *fc;
148 	struct mlx5_fs_chains *chains;
149 	struct mlx5e_ipsec_rx_roce roce;
150 };
151 
152 static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec,
153                                      u16 kspi);
154 static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec);
155 
setup_fte_no_frags(struct mlx5_flow_spec * spec)156 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
157 {
158 	/* Non fragmented */
159 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
160 
161 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
162 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
163 }
164 
setup_fte_esp(struct mlx5_flow_spec * spec)165 static void setup_fte_esp(struct mlx5_flow_spec *spec)
166 {
167 	/* ESP header */
168 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
169 
170 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
171 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
172 }
173 
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)174 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
175 {
176 	/* SPI number */
177 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
178 
179 	if (encap) {
180 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.inner_esp_spi);
181 		MLX5_SET(fte_match_param, spec->match_value, misc_parameters.inner_esp_spi, spi);
182 	} else {
183 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
184 		MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
185 	}
186 }
187 
188 static void
setup_fte_vid(struct mlx5_flow_spec * spec,u16 vid)189 setup_fte_vid(struct mlx5_flow_spec *spec, u16 vid)
190 {
191 	/* virtual lan tag */
192 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
193 
194 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
195 	    outer_headers.cvlan_tag);
196 	MLX5_SET(fte_match_param, spec->match_value,
197 	    outer_headers.cvlan_tag, 1);
198 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
199 	    outer_headers.first_vid);
200 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
201 	    vid);
202 }
203 
204 static void
clear_fte_vid(struct mlx5_flow_spec * spec)205 clear_fte_vid(struct mlx5_flow_spec *spec)
206 {
207 	MLX5_SET(fte_match_param, spec->match_criteria,
208 	    outer_headers.cvlan_tag, 0);
209 	MLX5_SET(fte_match_param, spec->match_value,
210 	    outer_headers.cvlan_tag, 0);
211 	MLX5_SET(fte_match_param, spec->match_criteria,
212 	    outer_headers.first_vid, 0);
213 	MLX5_SET(fte_match_param, spec->match_value,
214 	    outer_headers.first_vid, 0);
215 }
216 
217 static void
setup_fte_no_vid(struct mlx5_flow_spec * spec)218 setup_fte_no_vid(struct mlx5_flow_spec *spec)
219 {
220 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
221 	    outer_headers.cvlan_tag);
222 	MLX5_SET(fte_match_param, spec->match_value,
223 	    outer_headers.cvlan_tag, 0);
224 }
225 
226 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)227 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
228 		    enum mlx5_flow_namespace_type ns, int base_prio,
229 		    int base_level, struct mlx5_flow_table **root_ft)
230 {
231 	struct mlx5_chains_attr attr = {};
232 	struct mlx5_fs_chains *chains;
233 	struct mlx5_flow_table *ft;
234 	int err;
235 
236 	attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
237 		     MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
238 	attr.max_grp_num = 2;
239 	attr.default_ft = miss_ft;
240 	attr.ns = ns;
241 	attr.fs_base_prio = base_prio;
242 	attr.fs_base_level = base_level;
243 	chains = mlx5_chains_create(mdev, &attr);
244 	if (IS_ERR(chains))
245 		return chains;
246 
247 	/* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
248 	ft = mlx5_chains_get_table(chains, 0, 1, 0);
249 	if (IS_ERR(ft)) {
250 		err = PTR_ERR(ft);
251 		goto err_chains_get;
252 	}
253 
254 	*root_ft = ft;
255 	return chains;
256 
257 err_chains_get:
258 	mlx5_chains_destroy(chains);
259 	return ERR_PTR(err);
260 }
261 
ipsec_chains_destroy(struct mlx5_fs_chains * chains)262 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
263 {
264 	mlx5_chains_put_table(chains, 0, 1, 0);
265 	mlx5_chains_destroy(chains);
266 }
267 
268 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)269 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
270 {
271 	return mlx5_chains_get_table(chains, 0, prio + 1, 0);
272 }
273 
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)274 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
275 {
276 	mlx5_chains_put_table(chains, 0, prio + 1, 0);
277 }
278 
ipsec_rx_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups)279 static struct mlx5_flow_table *ipsec_rx_ft_create(struct mlx5_flow_namespace *ns,
280 						  int level, int prio,
281 						  int max_num_groups)
282 {
283 	struct mlx5_flow_table_attr ft_attr = {};
284 
285 	ft_attr.max_fte = NUM_IPSEC_FTE;
286 	ft_attr.level = level;
287 	ft_attr.prio = prio;
288 	ft_attr.autogroup.max_num_groups = max_num_groups;
289 	ft_attr.autogroup.num_reserved_entries = 1;
290 
291 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
292 }
293 
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)294 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
295 			     struct mlx5_flow_table *ft,
296 			     struct mlx5e_ipsec_miss *miss,
297 			     struct mlx5_flow_destination *dest)
298 {
299 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
300 	struct mlx5_flow_act flow_act = {};
301 	struct mlx5_flow_spec *spec;
302 	u32 *flow_group_in;
303 	int err = 0;
304 
305 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
306 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
307 	if (!flow_group_in || !spec) {
308 		err = -ENOMEM;
309 		goto out;
310 	}
311 
312 	/* Create miss_group */
313 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
314 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
315 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
316 	if (IS_ERR(miss->group)) {
317 		err = PTR_ERR(miss->group);
318 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
319 			      err);
320 		goto out;
321 	}
322 
323 	if (dest)
324 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
325 	else
326 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
327 	/* Create miss rule */
328 	miss->rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
329 	if (IS_ERR(miss->rule)) {
330 		mlx5_destroy_flow_group(miss->group);
331 		err = PTR_ERR(miss->rule);
332 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
333 			      err);
334 		goto out;
335 	}
336 out:
337 	kvfree(flow_group_in);
338 	kvfree(spec);
339 	return err;
340 }
341 
setup_modify_header(struct mlx5_core_dev * mdev,u32 val,u8 dir,struct mlx5_flow_act * flow_act)342 static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
343                                struct mlx5_flow_act *flow_act)
344 {
345         u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
346         enum mlx5_flow_namespace_type ns_type;
347         struct mlx5_modify_hdr *modify_hdr;
348 
349         MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
350         switch (dir) {
351         case IPSEC_DIR_INBOUND:
352                 MLX5_SET(set_action_in, action, field,
353                          MLX5_ACTION_IN_FIELD_METADATA_REG_B);
354                 ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
355                 break;
356         case IPSEC_DIR_OUTBOUND:
357                 MLX5_SET(set_action_in, action, field,
358                          MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
359                 ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
360                 break;
361         default:
362                 return -EINVAL;
363         }
364 
365         MLX5_SET(set_action_in, action, data, val);
366         MLX5_SET(set_action_in, action, offset, 0);
367         MLX5_SET(set_action_in, action, length, 32);
368 
369         modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
370         if (IS_ERR(modify_hdr)) {
371                 mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
372                               PTR_ERR(modify_hdr));
373                 return PTR_ERR(modify_hdr);
374         }
375 
376         flow_act->modify_hdr = modify_hdr;
377         flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
378         return 0;
379 }
380 
381 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)382 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
383 			     struct mlx5_pkt_reformat_params *reformat_params)
384 {
385 	struct udphdr *udphdr;
386 	size_t bfflen = 16;
387 	char *reformatbf;
388 	__be32 spi;
389 	void *hdr;
390 
391 	if (attrs->family == AF_INET) {
392 		if (attrs->encap)
393 			reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
394 		else
395 			reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
396 	} else {
397 		if (attrs->encap)
398 			reformat_params->type =
399 			    MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
400 		else
401 			reformat_params->type =
402 			    MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
403 	}
404 
405 	if (attrs->encap)
406 		bfflen += sizeof(*udphdr);
407 	reformatbf = kzalloc(bfflen, GFP_KERNEL);
408 	if (!reformatbf)
409 		return -ENOMEM;
410 
411 	hdr = reformatbf;
412 	if (attrs->encap) {
413 		udphdr = (struct udphdr *)reformatbf;
414 		udphdr->uh_sport = attrs->sport;
415 		udphdr->uh_dport = attrs->dport;
416 		hdr += sizeof(*udphdr);
417 	}
418 
419 	/* convert to network format */
420 	spi = htonl(attrs->spi);
421 	memcpy(hdr, &spi, 4);
422 
423 	reformat_params->param_0 = attrs->authsize;
424 	reformat_params->size = bfflen;
425 	reformat_params->data = reformatbf;
426 
427 	return 0;
428 }
429 
setup_pkt_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)430 static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
431 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
432 			      struct mlx5_flow_act *flow_act)
433 {
434 	enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
435 	struct mlx5_pkt_reformat_params reformat_params = {};
436 	struct mlx5_pkt_reformat *pkt_reformat;
437 	int ret;
438 
439 	if (attrs->dir == IPSEC_DIR_INBOUND) {
440 		if (attrs->encap)
441 			reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
442 		else
443 			reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
444 		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
445 		goto cmd;
446 	}
447 
448 	ret = setup_pkt_transport_reformat(attrs, &reformat_params);
449 	if (ret)
450 		return ret;
451 cmd:
452 	pkt_reformat =
453 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
454 	if (reformat_params.data)
455 		kfree(reformat_params.data);
456 	if (IS_ERR(pkt_reformat))
457 		return PTR_ERR(pkt_reformat);
458 
459 	flow_act->pkt_reformat = pkt_reformat;
460 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
461 	return 0;
462 }
463 
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)464 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
465                             __be32 *daddr)
466 {
467         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
468 
469         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
470         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
471 
472         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
473                             outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
474         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
475                             outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
476         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
477                          outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
478         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
479                          outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
480 }
481 
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)482 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
483                             __be32 *daddr)
484 {
485         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
486 
487         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
488         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
489 
490         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
491                             outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
492         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
493                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
494         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
495                             outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
496         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
497                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
498 }
499 
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)500 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
501 {
502 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
503 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
504 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
505 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
506 	struct mlx5_flow_destination dest[2] = {};
507 	struct mlx5_flow_act flow_act = {};
508 	struct mlx5_flow_handle *rule;
509 	struct mlx5_flow_spec *spec;
510 	struct mlx5e_ipsec_rx *rx;
511 	struct mlx5_fc *counter;
512 	int err;
513 
514 	rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6;
515 
516 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
517 	if (!spec)
518 		return -ENOMEM;
519 
520 	if (!attrs->drop) {
521 		err = setup_modify_header(mdev, sa_entry->kspi | BIT(31), IPSEC_DIR_INBOUND,
522 					  &flow_act);
523 		if (err)
524 			goto err_mod_header;
525 	}
526 
527 	err = setup_pkt_reformat(mdev, attrs, &flow_act);
528 	if (err)
529 		goto err_pkt_reformat;
530 
531 	counter = mlx5_fc_create(mdev, false);
532 	if (IS_ERR(counter)) {
533 		err = PTR_ERR(counter);
534 		goto err_add_cnt;
535 	}
536 
537 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
538 	flow_act.crypto.op = MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT;
539 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
540 	flow_act.flags |= FLOW_ACT_NO_APPEND;
541 
542 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
543 		MLX5_FLOW_CONTEXT_ACTION_COUNT;
544 
545 	if (attrs->drop)
546 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
547 	else
548 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
549 
550 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
551 	dest[0].ft = rx->ft.status;
552 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
553 	dest[1].counter_id = mlx5_fc_id(counter);
554 
555 	if (attrs->family == AF_INET)
556 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
557 	else
558 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
559 
560 	if (!attrs->encap)
561 		setup_fte_esp(spec);
562 
563 	setup_fte_spi(spec, attrs->spi, attrs->encap);
564 	setup_fte_no_frags(spec);
565 
566 	if (sa_entry->vid != VLAN_NONE)
567 		setup_fte_vid(spec, sa_entry->vid);
568 	else
569 		setup_fte_no_vid(spec);
570 
571 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
572 	if (IS_ERR(rule)) {
573 		err = PTR_ERR(rule);
574 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
575 		goto err_add_flow;
576 	}
577 	ipsec_rule->rule = rule;
578 
579 	/* Add another rule for zero vid */
580 	if (sa_entry->vid == VLAN_NONE) {
581 		clear_fte_vid(spec);
582 		setup_fte_vid(spec, 0);
583 		rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
584 		if (IS_ERR(rule)) {
585 			err = PTR_ERR(rule);
586 			mlx5_core_err(mdev,
587 			    "fail to add RX ipsec zero vid rule err=%d\n",
588 			    err);
589 			goto err_add_flow;
590 		}
591 		ipsec_rule->vid_zero_rule = rule;
592 	}
593 
594 	kvfree(spec);
595 	ipsec_rule->fc = counter;
596 	ipsec_rule->modify_hdr = flow_act.modify_hdr;
597 	ipsec_rule->pkt_reformat = flow_act.pkt_reformat;
598 	return 0;
599 
600 err_add_flow:
601 	mlx5_fc_destroy(mdev, counter);
602 	if (ipsec_rule->rule != NULL)
603 		mlx5_del_flow_rules(&ipsec_rule->rule);
604 err_add_cnt:
605 	mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
606 err_pkt_reformat:
607 	if (flow_act.modify_hdr != NULL)
608 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
609 err_mod_header:
610 	kvfree(spec);
611 
612 	return err;
613 }
614 
ipsec_tx_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups)615 static struct mlx5_flow_table *ipsec_tx_ft_create(struct mlx5_flow_namespace *ns,
616 						  int level, int prio,
617 						  int max_num_groups)
618 {
619 	struct mlx5_flow_table_attr ft_attr = {};
620 
621         ft_attr.autogroup.num_reserved_entries = 1;
622         ft_attr.autogroup.max_num_groups = max_num_groups;
623         ft_attr.max_fte = NUM_IPSEC_FTE;
624         ft_attr.level = level;
625         ft_attr.prio = prio;
626 
627 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
628 }
629 
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)630 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
631 {
632 	struct mlx5_flow_destination dest = {};
633 	struct mlx5_flow_act flow_act = {};
634 	struct mlx5_flow_handle *fte;
635 	int err;
636 
637 	/* create fte */
638 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_COUNT |
639 		MLX5_FLOW_CONTEXT_ACTION_ALLOW;
640 
641 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
642 	dest.counter_id = mlx5_fc_id(tx->fc->cnt);
643 	fte = mlx5_add_flow_rules(tx->ft.status, NULL, &flow_act, &dest, 1);
644 	if (IS_ERR_OR_NULL(fte)) {
645 		err = PTR_ERR(fte);
646 		mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
647 		goto err_rule;
648 	}
649 
650 	tx->status.rule = fte;
651 	return 0;
652 
653 err_rule:
654 	return err;
655 }
656 
tx_destroy_roce(struct mlx5e_ipsec_tx * tx)657 static void tx_destroy_roce(struct mlx5e_ipsec_tx *tx) {
658 	if (!tx->roce.ft)
659 		return;
660 
661 	mlx5_del_flow_rules(&tx->roce.rule);
662 	mlx5_destroy_flow_group(tx->roce.g);
663 	mlx5_destroy_flow_table(tx->roce.ft);
664 	tx->roce.ft = NULL;
665 }
666 
667 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec_tx * tx)668 static void tx_destroy(struct mlx5e_ipsec_tx *tx)
669 {
670 	tx_destroy_roce(tx);
671 	if (tx->chains) {
672 		ipsec_chains_destroy(tx->chains);
673 	} else {
674 		mlx5_del_flow_rules(&tx->pol.rule);
675 		mlx5_destroy_flow_group(tx->pol.group);
676 		mlx5_destroy_flow_table(tx->ft.pol);
677 	}
678 	mlx5_destroy_flow_table(tx->ft.sa);
679 	mlx5_del_flow_rules(&tx->kspi_miss.rule);
680 	mlx5_destroy_flow_group(tx->kspi_miss.group);
681 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule);
682 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
683 	mlx5_destroy_flow_table(tx->ft.sa_kspi);
684 	mlx5_del_flow_rules(&tx->status.rule);
685 	mlx5_destroy_flow_table(tx->ft.status);
686 }
687 
ipsec_tx_roce_rule_setup(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)688 static int ipsec_tx_roce_rule_setup(struct mlx5_core_dev *mdev,
689 				    struct mlx5e_ipsec_tx *tx)
690 {
691 	struct mlx5_flow_destination dst = {};
692 	struct mlx5_flow_act flow_act = {};
693 	struct mlx5_flow_handle *rule;
694 	int err = 0;
695 
696 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
697 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
698 	dst.ft = tx->ft.pol;
699 	rule = mlx5_add_flow_rules(tx->roce.ft, NULL, &flow_act, &dst, 1);
700 	if (IS_ERR(rule)) {
701 		err = PTR_ERR(rule);
702 		mlx5_core_err(mdev, "Fail to add TX roce ipsec rule err=%d\n",
703 			      err);
704 		goto out;
705 	}
706 	tx->roce.rule = rule;
707 
708 out:
709 	return err;
710 }
711 
ipsec_tx_create_roce(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)712 static int ipsec_tx_create_roce(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
713 {
714 	struct mlx5_flow_table_attr ft_attr = {};
715 	struct mlx5_flow_table *ft;
716 	struct mlx5_flow_group *g;
717 	int ix = 0;
718 	int err;
719 	u32 *in;
720 
721 	if (!tx->roce.ns)
722 		return -EOPNOTSUPP;
723 
724 	in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
725 	if (!in)
726 		return -ENOMEM;
727 
728 	ft_attr.max_fte = 1;
729 	ft = mlx5_create_flow_table(tx->roce.ns, &ft_attr);
730 	if (IS_ERR(ft)) {
731 		err = PTR_ERR(ft);
732 		mlx5_core_err(mdev, "Fail to create ipsec tx roce ft err=%d\n",
733 			      err);
734 		goto fail_table;
735 	}
736 	tx->roce.ft = ft;
737 
738 	MLX5_SET_CFG(in, start_flow_index, ix);
739 	ix += 1;
740 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
741 	g = mlx5_create_flow_group(ft, in);
742 	if (IS_ERR(g)) {
743 		err = PTR_ERR(g);
744 		mlx5_core_err(mdev, "Fail to create ipsec tx roce group err=%d\n",
745 			      err);
746 		goto fail_group;
747 	}
748 	tx->roce.g = g;
749 
750 	err = ipsec_tx_roce_rule_setup(mdev, tx);
751 	if (err) {
752 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
753 		goto fail_rule;
754 	}
755 
756 	kvfree(in);
757 	return 0;
758 
759 fail_rule:
760 	mlx5_destroy_flow_group(tx->roce.g);
761 fail_group:
762 	mlx5_destroy_flow_table(tx->roce.ft);
763 	tx->roce.ft = NULL;
764 fail_table:
765 	kvfree(in);
766 	return err;
767 }
768 
769 /*
770  * Setting a rule in KSPI table for values that should bypass IPSEC.
771  *
772  * mdev - mlx5 core device
773  * tx - IPSEC TX
774  * return - 0 for success errno for failure
775  */
tx_create_kspi_bypass_rules(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)776 static int tx_create_kspi_bypass_rules(struct mlx5_core_dev *mdev,
777                                        struct mlx5e_ipsec_tx *tx)
778 {
779 	struct mlx5_flow_destination dest = {};
780 	struct mlx5_flow_act flow_act = {};
781 	struct mlx5_flow_act flow_act_kspi = {};
782 	struct mlx5_flow_handle *rule;
783 	struct mlx5_flow_spec *spec;
784 	int err;
785 
786 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
787 	if (!spec)
788 		return -ENOMEM;
789 
790 	dest.ft = tx->ft.status;
791 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
792 	flow_act_kspi.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
793 
794 	setup_fte_reg_a_with_tag(spec, IPSEC_ACCEL_DRV_SPI_BYPASS);
795 	rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act_kspi,
796 								&dest, 1);
797 	if (IS_ERR(rule)) {
798 		err = PTR_ERR(rule);
799 		mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n",
800                       err);
801 		goto err_add_kspi_rule;
802 	}
803 	tx->kspi_bypass_rule.kspi_rule = rule;
804 
805 	/* set the rule for packets withoiut ipsec tag. */
806 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
807 	memset(spec, 0, sizeof(*spec));
808 	setup_fte_reg_a_no_tag(spec);
809 	rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act, &dest, 1);
810 	if (IS_ERR(rule)) {
811 		err = PTR_ERR(rule);
812 		mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n", err);
813 		goto err_add_rule;
814 	}
815 	tx->kspi_bypass_rule.rule = rule;
816 
817 	kvfree(spec);
818 	return 0;
819 err_add_rule:
820 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
821 err_add_kspi_rule:
822 	kvfree(spec);
823 	return err;
824 }
825 
826 
tx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)827 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
828 {
829 	struct mlx5_flow_destination dest = {};
830 	struct mlx5_flow_table *ft;
831 	int err;
832 
833 	/*
834 	 *  Tx flow is different for ethernet traffic then for RoCE packets
835 	 *  For Ethernet packets we start in SA KSPI table that matches KSPI of SA rule
836 	 *  to the KSPI in the packet metadata
837 	 *  For RoCE traffic we start in Policy table, then move to SA table
838 	 *  which matches either reqid of the SA rule to reqid reported by policy table
839 	 *  or ip header fields of SA to the packet IP header fields.
840 	 *  Tables are ordered by their level so we set kspi
841 	 *  with level 0 to have it first one for ethernet traffic.
842 	 *  For RoCE the RoCE TX table direct the packets to policy table explicitly
843 	 */
844 	ft = ipsec_tx_ft_create(tx->ns, 0, 0, 4);
845 	if (IS_ERR(ft))
846 		return PTR_ERR(ft);
847 	tx->ft.sa_kspi = ft;
848 
849 	ft = ipsec_tx_ft_create(tx->ns, 2, 0, 4);
850 	if (IS_ERR(ft)) {
851 		err = PTR_ERR(ft);
852 		goto err_reqid_ft;
853 	}
854 	tx->ft.sa = ft;
855 
856 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
857 		tx->chains = ipsec_chains_create(
858 				mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 1,
859 				&tx->ft.pol);
860 		if (IS_ERR(tx->chains)) {
861 			err = PTR_ERR(tx->chains);
862 			goto err_pol_ft;
863 		}
864 	} else {
865 		ft = ipsec_tx_ft_create(tx->ns, 1, 0, 2);
866 		if (IS_ERR(ft)) {
867 			err = PTR_ERR(ft);
868 			goto err_pol_ft;
869 		}
870 		tx->ft.pol = ft;
871 		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
872 		dest.ft = tx->ft.sa;
873 		err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
874 		if (err)
875 			goto err_pol_miss;
876 	}
877 
878 	ft = ipsec_tx_ft_create(tx->ns, 2, 0, 1);
879 	if (IS_ERR(ft)) {
880 		err = PTR_ERR(ft);
881 		goto err_status_ft;
882 	}
883 	tx->ft.status = ft;
884 
885 	/* set miss rule for kspi table with drop action*/
886 	err = ipsec_miss_create(mdev, tx->ft.sa_kspi, &tx->kspi_miss, NULL);
887 	if (err)
888 		goto err_kspi_miss;
889 
890 	err = tx_create_kspi_bypass_rules(mdev, tx);
891 	if (err)
892 		goto err_kspi_rule;
893 
894 	err = ipsec_counter_rule_tx(mdev, tx);
895 	if (err)
896 		goto err_status_rule;
897 
898 	err = ipsec_tx_create_roce(mdev, tx);
899 	if (err)
900 		goto err_counter_rule;
901 
902 	return 0;
903 
904 err_counter_rule:
905 	mlx5_del_flow_rules(&tx->status.rule);
906 err_status_rule:
907 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule);
908 	mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
909 err_kspi_rule:
910 	mlx5_destroy_flow_table(tx->ft.status);
911 err_status_ft:
912 	if (tx->chains) {
913 		ipsec_chains_destroy(tx->chains);
914 	} else {
915 		mlx5_del_flow_rules(&tx->pol.rule);
916 		mlx5_destroy_flow_group(tx->pol.group);
917 	}
918 err_pol_miss:
919 	if (!tx->chains)
920 		mlx5_destroy_flow_table(tx->ft.pol);
921 err_pol_ft:
922 	mlx5_del_flow_rules(&tx->kspi_miss.rule);
923 	mlx5_destroy_flow_group(tx->kspi_miss.group);
924 err_kspi_miss:
925 	mlx5_destroy_flow_table(tx->ft.sa);
926 err_reqid_ft:
927 	mlx5_destroy_flow_table(tx->ft.sa_kspi);
928 	return err;
929 }
930 
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)931 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
932 		  struct mlx5e_ipsec_tx *tx)
933 {
934 	int err;
935 
936 	if (tx->ft.refcnt)
937 		goto skip;
938 
939 	err = tx_create(mdev, tx);
940 	if (err)
941 		return err;
942 
943 skip:
944 	tx->ft.refcnt++;
945 	return 0;
946 }
947 
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)948 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
949 {
950 	if (--tx->ft.refcnt)
951 		return;
952 
953 	tx_destroy(tx);
954 }
955 
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec)956 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
957 					struct mlx5e_ipsec *ipsec)
958 {
959 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
960 	int err;
961 
962 	mutex_lock(&tx->ft.mutex);
963 	err = tx_get(mdev, ipsec, tx);
964 	mutex_unlock(&tx->ft.mutex);
965 	if (err)
966 		return ERR_PTR(err);
967 
968 	return tx;
969 }
970 
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio)971 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
972                                                 struct mlx5e_ipsec *ipsec,
973                                                 u32 prio)
974 {
975         struct mlx5e_ipsec_tx *tx = ipsec->tx;
976         struct mlx5_flow_table *ft;
977         int err;
978 
979         mutex_lock(&tx->ft.mutex);
980         err = tx_get(mdev, ipsec, tx);
981         if (err)
982             goto err_get;
983 
984         ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
985         if (IS_ERR(ft)) {
986                 err = PTR_ERR(ft);
987                 goto err_get_ft;
988         }
989 
990         mutex_unlock(&tx->ft.mutex);
991         return ft;
992 
993 err_get_ft:
994         tx_put(ipsec, tx);
995 err_get:
996         mutex_unlock(&tx->ft.mutex);
997         return ERR_PTR(err);
998 }
999 
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio)1000 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio)
1001 {
1002         struct mlx5e_ipsec_tx *tx = ipsec->tx;
1003 
1004         mutex_lock(&tx->ft.mutex);
1005         if (tx->chains)
1006                 ipsec_chains_put_table(tx->chains, prio);
1007 
1008         tx_put(ipsec, tx);
1009         mutex_unlock(&tx->ft.mutex);
1010 }
1011 
tx_ft_put(struct mlx5e_ipsec * ipsec)1012 static void tx_ft_put(struct mlx5e_ipsec *ipsec)
1013 {
1014 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
1015 
1016 	mutex_lock(&tx->ft.mutex);
1017 	tx_put(ipsec, tx);
1018 	mutex_unlock(&tx->ft.mutex);
1019 }
1020 
setup_fte_reg_a_with_tag(struct mlx5_flow_spec * spec,u16 kspi)1021 static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec,
1022 									 u16 kspi)
1023 {
1024        /* Add IPsec indicator in metadata_reg_a. */
1025        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1026 
1027        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1028                         misc_parameters_2.metadata_reg_a);
1029        MLX5_SET(fte_match_param, spec->match_value,
1030                 misc_parameters_2.metadata_reg_a,
1031                 MLX5_ETH_WQE_FT_META_IPSEC << 23 |  kspi);
1032 }
1033 
setup_fte_reg_a_no_tag(struct mlx5_flow_spec * spec)1034 static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec)
1035 {
1036        /* Add IPsec indicator in metadata_reg_a. */
1037        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1038 
1039        MLX5_SET(fte_match_param, spec->match_criteria,
1040                 misc_parameters_2.metadata_reg_a,
1041 				MLX5_ETH_WQE_FT_META_IPSEC << 23);
1042        MLX5_SET(fte_match_param, spec->match_value,
1043                 misc_parameters_2.metadata_reg_a,
1044                 0);
1045 }
1046 
setup_fte_reg_c0(struct mlx5_flow_spec * spec,u32 reqid)1047 static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
1048 {
1049 	/* Pass policy check before choosing this SA */
1050 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1051 
1052 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1053 			 misc_parameters_2.metadata_reg_c_0);
1054 	MLX5_SET(fte_match_param, spec->match_value,
1055 		 misc_parameters_2.metadata_reg_c_0, reqid);
1056 }
1057 
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1058 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1059 {
1060         switch (upspec->proto) {
1061         case IPPROTO_UDP:
1062                 if (upspec->dport) {
1063                         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
1064                                          spec->match_criteria, udp_dport);
1065                         MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1066                                  udp_dport, upspec->dport);
1067                 }
1068 
1069                 if (upspec->sport) {
1070                         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
1071                                          spec->match_criteria, udp_sport);
1072                         MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1073                                  udp_dport, upspec->sport);
1074                 }
1075                 break;
1076         case IPPROTO_TCP:
1077                 if (upspec->dport) {
1078                         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
1079                                          spec->match_criteria, tcp_dport);
1080                         MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1081                                  tcp_dport, upspec->dport);
1082                 }
1083 
1084                 if (upspec->sport) {
1085                         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
1086                                          spec->match_criteria, tcp_sport);
1087                         MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1088                                  tcp_dport, upspec->sport);
1089                 }
1090                 break;
1091         default:
1092                 return;
1093         }
1094 
1095         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1096 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1097 	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1098 }
1099 
tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_tx * tx,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)1100 static int tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry *sa_entry,
1101 							struct mlx5e_ipsec_tx *tx,
1102 							struct mlx5_flow_act *flow_act,
1103 							struct mlx5_flow_destination *dest,
1104 							int num_dest)
1105 {
1106 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1107 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1108 	struct mlx5_flow_handle *rule;
1109 	struct mlx5_flow_spec *spec;
1110 	int err;
1111 
1112 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1113 	if (!spec)
1114 		return -ENOMEM;
1115 
1116 	setup_fte_no_frags(spec);
1117 	setup_fte_reg_a_with_tag(spec, sa_entry->kspi);
1118 
1119 	rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, flow_act, dest, num_dest);
1120 	if (IS_ERR(rule)) {
1121 		err = PTR_ERR(rule);
1122 		mlx5_core_err(mdev, "fail to add TX ipsec kspi rule err=%d\n", err);
1123 		goto err_add_kspi_flow;
1124 	}
1125 	ipsec_rule->kspi_rule = rule;
1126 	kvfree(spec);
1127 	return 0;
1128 
1129 err_add_kspi_flow:
1130 	kvfree(spec);
1131 	return err;
1132 }
1133 
tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_tx * tx,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)1134 static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry,
1135 								struct mlx5e_ipsec_tx *tx,
1136 								struct mlx5_flow_act *flow_act,
1137 								struct mlx5_flow_destination *dest,
1138 								int num_dest)
1139 {
1140 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1141 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1142 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1143 	struct mlx5_flow_handle *rule;
1144 	struct mlx5_flow_spec *spec;
1145 	int err;
1146 
1147 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1148 	if (!spec)
1149 		return -ENOMEM;
1150 
1151 	flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1152 
1153 	if(attrs->reqid) {
1154 		setup_fte_no_frags(spec);
1155 		setup_fte_reg_c0(spec, attrs->reqid);
1156 		rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest);
1157 		if (IS_ERR(rule)) {
1158 			err = PTR_ERR(rule);
1159 			mlx5_core_err(mdev, "fail to add TX ipsec reqid rule err=%d\n", err);
1160 			goto err_add_reqid_rule;
1161 		}
1162 		ipsec_rule->reqid_rule = rule;
1163 		memset(spec, 0, sizeof(*spec));
1164 	}
1165 
1166 	if (attrs->family == AF_INET)
1167 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1168 	else
1169 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1170 	setup_fte_no_frags(spec);
1171 
1172 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest);
1173 	if (IS_ERR(rule)) {
1174 		err = PTR_ERR(rule);
1175 		mlx5_core_err(mdev, "fail to add TX ipsec ip rule err=%d\n", err);
1176 		goto err_add_ip_rule;
1177 	}
1178 	ipsec_rule->rule = rule;
1179 	kvfree(spec);
1180 	return 0;
1181 
1182 err_add_ip_rule:
1183 	mlx5_del_flow_rules(&ipsec_rule->reqid_rule);
1184 err_add_reqid_rule:
1185 	kvfree(spec);
1186 	return err;
1187 }
1188 
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1189 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1190 {
1191 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1192 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1193 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1194 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1195 	struct mlx5_flow_destination dest[2] = {};
1196 	struct mlx5_flow_act flow_act = {};
1197 	struct mlx5e_ipsec_tx *tx;
1198 	struct mlx5_fc *counter;
1199 	int err;
1200 
1201 	tx = tx_ft_get(mdev, ipsec);
1202 	if (IS_ERR(tx))
1203 		return PTR_ERR(tx);
1204 
1205 	err = setup_pkt_reformat(mdev, attrs, &flow_act);
1206 	if (err)
1207 		goto err_pkt_reformat;
1208 
1209 	counter = mlx5_fc_create(mdev, false);
1210 	if (IS_ERR(counter)) {
1211 		err = PTR_ERR(counter);
1212 		goto err_add_cnt;
1213 	}
1214 
1215 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1216         flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1217         flow_act.flags |= FLOW_ACT_NO_APPEND;
1218         flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
1219                            MLX5_FLOW_CONTEXT_ACTION_COUNT;
1220 
1221 	if (attrs->drop)
1222 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1223 	else
1224 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1225 
1226 	dest[0].ft = tx->ft.status;
1227 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1228 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1229 	dest[1].counter_id = mlx5_fc_id(counter);
1230 
1231 	err = tx_add_kspi_rule(sa_entry, tx, &flow_act, dest, 2);
1232 	if (err) {
1233 		goto err_add_kspi_rule;
1234 	}
1235 
1236 	err = tx_add_reqid_ip_rules(sa_entry, tx, &flow_act, dest, 2);
1237 	if (err) {
1238 		goto err_add_reqid_ip_rule;
1239 	}
1240 
1241 	ipsec_rule->fc = counter;
1242 	ipsec_rule->pkt_reformat = flow_act.pkt_reformat;
1243 	return 0;
1244 
1245 err_add_reqid_ip_rule:
1246 	mlx5_del_flow_rules(&ipsec_rule->kspi_rule);
1247 err_add_kspi_rule:
1248 	mlx5_fc_destroy(mdev, counter);
1249 err_add_cnt:
1250 	if (flow_act.pkt_reformat)
1251 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1252 err_pkt_reformat:
1253 	tx_ft_put(ipsec);
1254 	return err;
1255 }
1256 
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1257 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1258 {
1259         struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1260         struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1261         struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx;
1262         struct mlx5_flow_destination dest[2] = {};
1263         struct mlx5_flow_act flow_act = {};
1264         struct mlx5_flow_handle *rule;
1265         struct mlx5_flow_spec *spec;
1266         struct mlx5_flow_table *ft;
1267         int err, dstn = 0;
1268 
1269         ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio);
1270         if (IS_ERR(ft))
1271             return PTR_ERR(ft);
1272 
1273         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1274         if (!spec) {
1275             err = -ENOMEM;
1276             goto err_alloc;
1277         }
1278 
1279         if (attrs->family == AF_INET)
1280                 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1281         else
1282                 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1283 
1284         setup_fte_no_frags(spec);
1285 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1286 
1287         switch (attrs->action) {
1288         case IPSEC_POLICY_IPSEC:
1289                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1290                 err = setup_modify_header(mdev, attrs->reqid,
1291                                           IPSEC_DIR_OUTBOUND, &flow_act);
1292                 if (err)
1293                         goto err_mod_header;
1294                  break;
1295         case IPSEC_POLICY_DISCARD:
1296                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1297                                    MLX5_FLOW_CONTEXT_ACTION_COUNT;
1298                 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1299                 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
1300                 dstn++;
1301                 break;
1302         default:
1303                 err = -EINVAL;
1304                 goto err_mod_header;
1305         }
1306 
1307         flow_act.flags |= FLOW_ACT_NO_APPEND;
1308         dest[dstn].ft = tx->ft.sa;
1309         dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1310         dstn++;
1311         rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1312         if (IS_ERR(rule)) {
1313                 err = PTR_ERR(rule);
1314                 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1315                 goto err_action;
1316         }
1317 
1318         kvfree(spec);
1319         pol_entry->ipsec_rule.rule = rule;
1320         pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1321         return 0;
1322 
1323 err_action:
1324         if (flow_act.modify_hdr)
1325                 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1326 err_mod_header:
1327         kvfree(spec);
1328 err_alloc:
1329         tx_ft_put_policy(pol_entry->ipsec, attrs->prio);
1330         return err;
1331 }
1332 
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1333 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1334 {
1335         struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1336         struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1337 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1338         struct mlx5_flow_destination dest[2];
1339         struct mlx5_flow_act flow_act = {};
1340         struct mlx5_flow_handle *rule;
1341         struct mlx5_flow_spec *spec;
1342         struct mlx5_flow_table *ft;
1343         struct mlx5e_ipsec_rx *rx;
1344 	int err, dstn = 0;
1345 
1346         rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6;
1347         ft = rx->chains ? ipsec_chains_get_table(rx->chains, attrs->prio) : rx->ft.pol;
1348         if (IS_ERR(ft))
1349                 return PTR_ERR(ft);
1350 
1351         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1352         if (!spec) {
1353                 err = -ENOMEM;
1354                 goto err_alloc;
1355         }
1356 
1357         switch (attrs->action) {
1358         case IPSEC_POLICY_IPSEC:
1359                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1360                 break;
1361         case IPSEC_POLICY_DISCARD:
1362                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1363                 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1364                 dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
1365                 dstn++;
1366                 break;
1367         default:
1368                 err = -EINVAL;
1369                 goto err_action;
1370         }
1371 
1372         flow_act.flags |= FLOW_ACT_NO_APPEND;
1373         dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1374         dest[dstn].ft = rx->ft.sa;
1375         dstn++;
1376 
1377 	if (attrs->family == AF_INET)
1378 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1379 	else
1380 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1381 
1382 	setup_fte_no_frags(spec);
1383 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1384 	if (attrs->vid != VLAN_NONE)
1385 		setup_fte_vid(spec, attrs->vid);
1386 	else
1387 		setup_fte_no_vid(spec);
1388 
1389 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1390 	if (IS_ERR(rule)) {
1391 		err = PTR_ERR(rule);
1392 		mlx5_core_err(mdev,
1393 		    "Failed to add RX IPsec policy rule err=%d\n", err);
1394 		goto err_action;
1395 	}
1396 	pol_entry->ipsec_rule.rule = rule;
1397 
1398 	/* Add also rule for zero vid */
1399 	if (attrs->vid == VLAN_NONE) {
1400 		clear_fte_vid(spec);
1401 		setup_fte_vid(spec, 0);
1402 		rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1403 		if (IS_ERR(rule)) {
1404 			err = PTR_ERR(rule);
1405 			mlx5_core_err(mdev,
1406 			    "Failed to add RX IPsec policy rule err=%d\n",
1407 			    err);
1408 			goto err_action;
1409 		}
1410 		pol_entry->ipsec_rule.vid_zero_rule = rule;
1411 	}
1412 
1413 	kvfree(spec);
1414         return 0;
1415 
1416 err_action:
1417 	if (pol_entry->ipsec_rule.rule != NULL)
1418 		mlx5_del_flow_rules(&pol_entry->ipsec_rule.rule);
1419 	kvfree(spec);
1420 err_alloc:
1421         if (rx->chains != NULL)
1422                 ipsec_chains_put_table(rx->chains, attrs->prio);
1423         return err;
1424 }
1425 
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)1426 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
1427 {
1428 	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
1429 	struct mlx5_core_dev *mdev = ipsec->mdev;
1430 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
1431 
1432 	mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
1433 	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
1434 	kfree(rx_ipv4->fc);
1435 	mlx5_fc_destroy(mdev, tx->fc->drop);
1436 	mlx5_fc_destroy(mdev, tx->fc->cnt);
1437 	kfree(tx->fc);
1438 }
1439 
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)1440 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
1441 {
1442 	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
1443 	struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
1444 	struct mlx5_core_dev *mdev = ipsec->mdev;
1445 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
1446 	struct mlx5e_ipsec_fc *fc;
1447 	struct mlx5_fc *counter;
1448 	int err;
1449 
1450 	fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
1451 	if (!fc)
1452 		return -ENOMEM;
1453 
1454 	tx->fc = fc;
1455 	counter = mlx5_fc_create(mdev, false);
1456 	if (IS_ERR(counter)) {
1457 		err = PTR_ERR(counter);
1458 		goto err_tx_fc_alloc;
1459 	}
1460 
1461 	fc->cnt = counter;
1462 	counter = mlx5_fc_create(mdev, false);
1463 	if (IS_ERR(counter)) {
1464 		err = PTR_ERR(counter);
1465 		goto err_tx_fc_cnt;
1466 	}
1467 
1468 	fc->drop = counter;
1469 
1470 	fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
1471 	if (!fc) {
1472 		err = -ENOMEM;
1473 		goto err_tx_fc_drop;
1474 	}
1475 
1476 	/* Both IPv4 and IPv6 point to same flow counters struct. */
1477 	rx_ipv4->fc = fc;
1478 	rx_ipv6->fc = fc;
1479 	counter = mlx5_fc_create(mdev, false);
1480 	if (IS_ERR(counter)) {
1481 		err = PTR_ERR(counter);
1482 		goto err_rx_fc_alloc;
1483 	}
1484 
1485 	fc->cnt = counter;
1486 	counter = mlx5_fc_create(mdev, false);
1487 	if (IS_ERR(counter)) {
1488 		err = PTR_ERR(counter);
1489 		goto err_rx_fc_cnt;
1490 	}
1491 
1492 	fc->drop = counter;
1493 	return 0;
1494 
1495 err_rx_fc_cnt:
1496 	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
1497 err_rx_fc_alloc:
1498 	kfree(rx_ipv4->fc);
1499 err_tx_fc_drop:
1500 	mlx5_fc_destroy(mdev, tx->fc->drop);
1501 err_tx_fc_cnt:
1502 	mlx5_fc_destroy(mdev, tx->fc->cnt);
1503 err_tx_fc_alloc:
1504 	kfree(tx->fc);
1505 	return err;
1506 }
1507 
ipsec_status_rule(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)1508 static int ipsec_status_rule(struct mlx5_core_dev *mdev,
1509 			     struct mlx5e_ipsec_rx *rx,
1510 			     struct mlx5_flow_destination *dest)
1511 {
1512 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1513 	struct mlx5_flow_act flow_act = {};
1514 	struct mlx5_modify_hdr *modify_hdr;
1515 	struct mlx5_flow_handle *rule;
1516 	struct mlx5_flow_spec *spec;
1517 	int err;
1518 
1519 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1520 	if (!spec)
1521 		return -ENOMEM;
1522 
1523 	/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
1524 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
1525 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
1526 	MLX5_SET(copy_action_in, action, src_offset, 0);
1527 	MLX5_SET(copy_action_in, action, length, 7);
1528 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1529 	MLX5_SET(copy_action_in, action, dst_offset, 24);
1530 
1531 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1532 					      1, action);
1533 
1534 	if (IS_ERR(modify_hdr)) {
1535 		err = PTR_ERR(modify_hdr);
1536 		mlx5_core_err(mdev,
1537 			      "fail to alloc ipsec copy modify_header_id err=%d\n", err);
1538 		goto out_spec;
1539 	}
1540 
1541 	/* create fte */
1542 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1543 		MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1544 		MLX5_FLOW_CONTEXT_ACTION_COUNT;
1545 	flow_act.modify_hdr = modify_hdr;
1546 
1547 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
1548 	if (IS_ERR(rule)) {
1549 		err = PTR_ERR(rule);
1550 		mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
1551 		goto out;
1552 	}
1553 
1554 	kvfree(spec);
1555 	rx->status.rule = rule;
1556 	rx->status.modify_hdr = modify_hdr;
1557 	return 0;
1558 
1559 out:
1560 	mlx5_modify_header_dealloc(mdev, modify_hdr);
1561 out_spec:
1562 	kvfree(spec);
1563 	return err;
1564 }
1565 
ipsec_fs_rx_roce_rules_destroy(struct mlx5e_ipsec_rx_roce * rx_roce)1566 static void ipsec_fs_rx_roce_rules_destroy(struct mlx5e_ipsec_rx_roce *rx_roce)
1567 {
1568 	if (!rx_roce->ns_rdma)
1569 		return;
1570 
1571 	mlx5_del_flow_rules(&rx_roce->roce_miss.rule);
1572 	mlx5_del_flow_rules(&rx_roce->rule);
1573 	mlx5_destroy_flow_group(rx_roce->roce_miss.group);
1574 	mlx5_destroy_flow_group(rx_roce->g);
1575 }
1576 
ipsec_fs_rx_catchall_rules_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx)1577 static void ipsec_fs_rx_catchall_rules_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
1578 {
1579 	mutex_lock(&rx->ft.mutex);
1580 	mlx5_del_flow_rules(&rx->sa.rule);
1581 	mlx5_destroy_flow_group(rx->sa.group);
1582 	if (rx->chains == NULL) {
1583 		mlx5_del_flow_rules(&rx->pol.rule);
1584 		mlx5_destroy_flow_group(rx->pol.group);
1585 	}
1586 	mlx5_del_flow_rules(&rx->status.rule);
1587 	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
1588 	ipsec_fs_rx_roce_rules_destroy(&rx->roce);
1589 	mutex_unlock(&rx->ft.mutex);
1590 }
1591 
ipsec_fs_rx_roce_table_destroy(struct mlx5e_ipsec_rx_roce * rx_roce)1592 static void ipsec_fs_rx_roce_table_destroy(struct mlx5e_ipsec_rx_roce *rx_roce)
1593 {
1594 	if (!rx_roce->ns_rdma)
1595 		return;
1596 
1597 	mlx5_destroy_flow_table(rx_roce->ft_rdma);
1598 	mlx5_destroy_flow_table(rx_roce->ft);
1599 }
1600 
ipsec_fs_rx_table_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx)1601 static void ipsec_fs_rx_table_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
1602 {
1603 	mutex_lock(&rx->ft.mutex);
1604 	if (rx->chains) {
1605 		ipsec_chains_destroy(rx->chains);
1606 	} else {
1607 		mlx5_del_flow_rules(&rx->pol.rule);
1608 		mlx5_destroy_flow_table(rx->ft.pol);
1609         }
1610 	mlx5_destroy_flow_table(rx->ft.sa);
1611 	mlx5_destroy_flow_table(rx->ft.status);
1612 	ipsec_fs_rx_roce_table_destroy(&rx->roce);
1613 	mutex_unlock(&rx->ft.mutex);
1614 }
1615 
ipsec_roce_setup_udp_dport(struct mlx5_flow_spec * spec,u16 dport)1616 static void ipsec_roce_setup_udp_dport(struct mlx5_flow_spec *spec, u16 dport)
1617 {
1618 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1619 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1620 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
1621 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
1622 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
1623 }
1624 
ipsec_roce_rx_rule_setup(struct mlx5_flow_destination * default_dst,struct mlx5e_ipsec_rx_roce * roce,struct mlx5_core_dev * mdev)1625 static int ipsec_roce_rx_rule_setup(struct mlx5_flow_destination *default_dst,
1626 				    struct mlx5e_ipsec_rx_roce *roce, struct mlx5_core_dev *mdev)
1627 {
1628 	struct mlx5_flow_destination dst = {};
1629 	struct mlx5_flow_act flow_act = {};
1630 	struct mlx5_flow_handle *rule;
1631 	struct mlx5_flow_spec *spec;
1632 	int err = 0;
1633 
1634 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1635 	if (!spec)
1636 		return -ENOMEM;
1637 
1638 	ipsec_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
1639 
1640 	//flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;//not needed it is added in command
1641 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
1642 	dst.ft = roce->ft_rdma;
1643 
1644 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1645 	rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
1646 	if (IS_ERR(rule)) {
1647 		err = PTR_ERR(rule);
1648 		mlx5_core_err(mdev, "Fail to add RX roce ipsec rule err=%d\n",
1649 			      err);
1650 		goto fail_add_rule;
1651 	}
1652 
1653 	roce->rule = rule;
1654 
1655 	rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, default_dst, 1);
1656 	if (IS_ERR(rule)) {
1657 		err = PTR_ERR(rule);
1658 		mlx5_core_err(mdev, "Fail to add RX roce ipsec miss rule err=%d\n",
1659 			      err);
1660 		goto fail_add_default_rule;
1661 	}
1662 
1663 	roce->roce_miss.rule = rule;
1664 
1665 	kvfree(spec);
1666 	return 0;
1667 
1668 fail_add_default_rule:
1669 	mlx5_del_flow_rules(&roce->rule);
1670 fail_add_rule:
1671 	kvfree(spec);
1672 	return err;
1673 }
1674 
ipsec_roce_rx_rules(struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * defdst,struct mlx5_core_dev * mdev)1675 static int ipsec_roce_rx_rules(struct mlx5e_ipsec_rx *rx, struct mlx5_flow_destination *defdst,
1676 			       struct mlx5_core_dev *mdev)
1677 {
1678 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1679 	struct mlx5_flow_group *g;
1680 	void *outer_headers_c;
1681 	u32 *in;
1682 	int err = 0;
1683 	int ix = 0;
1684 	u8 *mc;
1685 
1686 	if (!rx->roce.ns_rdma)
1687 		return 0;
1688 
1689 	in = kvzalloc(inlen, GFP_KERNEL);
1690 	if (!in)
1691 		return -ENOMEM;
1692 
1693 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1694 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
1695 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
1696 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
1697 
1698 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1699 	MLX5_SET_CFG(in, start_flow_index, ix);
1700 	ix += 1;
1701 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1702 	g = mlx5_create_flow_group(rx->roce.ft, in);
1703 	if (IS_ERR(g)) {
1704 		err = PTR_ERR(g);
1705 		mlx5_core_err(mdev, "Fail to create ipsec rx roce group at nic err=%d\n", err);
1706 		goto fail_group;
1707 	}
1708 	rx->roce.g = g;
1709 
1710 	memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
1711 	MLX5_SET_CFG(in, start_flow_index, ix);
1712 	ix += 1;
1713 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1714 	g = mlx5_create_flow_group(rx->roce.ft, in);
1715 	if (IS_ERR(g)) {
1716 		err = PTR_ERR(g);
1717 		mlx5_core_err(mdev, "Fail to create ipsec rx roce miss group at nic err=%d\n",
1718 			      err);
1719 		goto fail_mgroup;
1720 	}
1721 	rx->roce.roce_miss.group = g;
1722 
1723 	err = ipsec_roce_rx_rule_setup(defdst, &rx->roce, mdev);
1724 	if (err)
1725 		goto fail_setup_rule;
1726 
1727 	kvfree(in);
1728 	return 0;
1729 
1730 fail_setup_rule:
1731 	mlx5_destroy_flow_group(rx->roce.roce_miss.group);
1732 fail_mgroup:
1733 	mlx5_destroy_flow_group(rx->roce.g);
1734 fail_group:
1735 	kvfree(in);
1736 	return err;
1737 }
1738 
ipsec_fs_rx_catchall_rules(struct mlx5e_priv * priv,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * defdst)1739 static int ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv,
1740 				      struct mlx5e_ipsec_rx *rx,
1741 				      struct mlx5_flow_destination *defdst)
1742 {
1743 	struct mlx5_core_dev *mdev = priv->mdev;
1744 	struct mlx5_flow_destination dest[2] = {};
1745 	int err = 0;
1746 
1747 	mutex_lock(&rx->ft.mutex);
1748 	/* IPsec RoCE RX rules */
1749 	err = ipsec_roce_rx_rules(rx, defdst, mdev);
1750 	if (err)
1751 		goto out;
1752 
1753 	/* IPsec Rx IP Status table rule */
1754 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1755 	if (rx->roce.ft)
1756 		dest[0].ft = rx->roce.ft;
1757 	else
1758 		dest[0].ft = priv->fts.vlan.t;
1759 
1760 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1761         dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
1762         err = ipsec_status_rule(mdev, rx, dest);
1763         if (err)
1764                 goto err_roce_rules_destroy;
1765 
1766 	if (!rx->chains) {
1767 		/* IPsec Rx IP policy default miss rule */
1768 		err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, defdst);
1769 		if (err)
1770 			goto err_status_rule_destroy;
1771 	}
1772 
1773 	/* FIXME: This is workaround to current design
1774 	 * which installs SA on firt packet. So we need to forward this
1775 	 * packet to the stack. It doesn't work with RoCE and eswitch traffic,
1776 	 */
1777 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, defdst);
1778 	if (err)
1779 		goto err_status_sa_rule_destroy;
1780 
1781 	mutex_unlock(&rx->ft.mutex);
1782 	return 0;
1783 
1784 err_status_sa_rule_destroy:
1785 	if (!rx->chains) {
1786 		mlx5_del_flow_rules(&rx->pol.rule);
1787 		mlx5_destroy_flow_group(rx->pol.group);
1788 	}
1789 err_status_rule_destroy:
1790 	mlx5_del_flow_rules(&rx->status.rule);
1791 	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
1792 err_roce_rules_destroy:
1793 	ipsec_fs_rx_roce_rules_destroy(&rx->roce);
1794 out:
1795 	mutex_unlock(&rx->ft.mutex);
1796 	return err;
1797 }
1798 
ipsec_fs_rx_roce_tables_create(struct mlx5e_ipsec_rx * rx,int rx_init_level,int rdma_init_level)1799 static int ipsec_fs_rx_roce_tables_create(struct mlx5e_ipsec_rx *rx,
1800 					  int rx_init_level, int rdma_init_level)
1801 {
1802 	struct mlx5_flow_table_attr ft_attr = {};
1803 	struct mlx5_flow_table *ft;
1804 	int err = 0;
1805 
1806 	if (!rx->roce.ns_rdma)
1807 		return 0;
1808 
1809 	ft_attr.max_fte = 2;
1810 	ft_attr.level = rx_init_level;
1811 	ft = mlx5_create_flow_table(rx->ns, &ft_attr);
1812 	if (IS_ERR(ft)) {
1813 		err = PTR_ERR(ft);
1814 		return err;
1815 	}
1816 	rx->roce.ft = ft;
1817 
1818 	ft_attr.max_fte = 0;
1819 	ft_attr.level = rdma_init_level;
1820 	ft = mlx5_create_flow_table(rx->roce.ns_rdma, &ft_attr);
1821 	if (IS_ERR(ft)) {
1822 		err = PTR_ERR(ft);
1823 		goto out;
1824 	}
1825 	rx->roce.ft_rdma = ft;
1826 
1827 	return 0;
1828 out:
1829 	mlx5_destroy_flow_table(rx->roce.ft);
1830 	rx->roce.ft = NULL;
1831 	return err;
1832 }
1833 
ipsec_fs_rx_table_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx,int rx_init_level,int rdma_init_level)1834 static int ipsec_fs_rx_table_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx,
1835 				    int rx_init_level, int rdma_init_level)
1836 {
1837 	struct mlx5_flow_namespace *ns = rx->ns;
1838 	struct mlx5_flow_table *ft;
1839 	int err = 0;
1840 
1841 	mutex_lock(&rx->ft.mutex);
1842 
1843 	/* IPsec Rx IP SA table create */
1844 	ft = ipsec_rx_ft_create(ns, rx_init_level + 1, 0, 1);
1845 	if (IS_ERR(ft)) {
1846 		err = PTR_ERR(ft);
1847 		goto out;
1848 	}
1849 	rx->ft.sa = ft;
1850 
1851 	/* IPsec Rx IP Status table create */
1852 	ft = ipsec_rx_ft_create(ns, rx_init_level + 2, 0, 1);
1853 	if (IS_ERR(ft)) {
1854 		err = PTR_ERR(ft);
1855 		goto err_sa_table_destroy;
1856 	}
1857 	rx->ft.status = ft;
1858 
1859 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
1860 		rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
1861 				MLX5_FLOW_NAMESPACE_KERNEL, 0,
1862 				rx_init_level, &rx->ft.pol);
1863 		if (IS_ERR(rx->chains)) {
1864 			err = PTR_ERR(rx->chains);
1865 			goto err_status_table_destroy;
1866 		}
1867 	} else {
1868 		ft = ipsec_rx_ft_create(ns, rx_init_level, 0, 1);
1869 		if (IS_ERR(ft)) {
1870 			err = PTR_ERR(ft);
1871 			goto err_status_table_destroy;
1872 		}
1873 		rx->ft.pol = ft;
1874 	}
1875 
1876 	/* IPsec RoCE RX tables create*/
1877 	err = ipsec_fs_rx_roce_tables_create(rx, rx_init_level + 3,
1878 					     rdma_init_level);
1879 	if (err)
1880 		goto err_pol_table_destroy;
1881 
1882 	goto out;
1883 
1884 err_pol_table_destroy:
1885 	mlx5_destroy_flow_table(rx->ft.pol);
1886 err_status_table_destroy:
1887 	mlx5_destroy_flow_table(rx->ft.status);
1888 err_sa_table_destroy:
1889 	mlx5_destroy_flow_table(rx->ft.sa);
1890 out:
1891 	mutex_unlock(&rx->ft.mutex);
1892 	return err;
1893 }
1894 
1895 #define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
1896 
mlx5e_accel_ipsec_fs_init_roce(struct mlx5e_ipsec * ipsec)1897 static void mlx5e_accel_ipsec_fs_init_roce(struct mlx5e_ipsec *ipsec)
1898 {
1899 	struct mlx5_core_dev *mdev = ipsec->mdev;
1900 	struct mlx5_flow_namespace *ns;
1901 
1902 	if ((MLX5_CAP_GEN_2(ipsec->mdev, flow_table_type_2_type) &
1903 	      NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) {
1904 		mlx5_core_dbg(mdev, "Failed to init roce ns, capabilities not supported\n");
1905 		return;
1906 	}
1907 
1908 	ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
1909 	if (!ns) {
1910 		mlx5_core_err(mdev, "Failed to init roce rx ns\n");
1911 		return;
1912 	}
1913 
1914 	ipsec->rx_ipv4->roce.ns_rdma = ns;
1915 	ipsec->rx_ipv6->roce.ns_rdma = ns;
1916 
1917 	ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
1918 	if (!ns) {
1919 		ipsec->rx_ipv4->roce.ns_rdma = NULL;
1920 		ipsec->rx_ipv6->roce.ns_rdma = NULL;
1921 		mlx5_core_err(mdev, "Failed to init roce tx ns\n");
1922 		return;
1923 	}
1924 
1925 	ipsec->tx->roce.ns = ns;
1926 }
1927 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1928 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1929 {
1930 	if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND)
1931 		return tx_add_rule(sa_entry);
1932 
1933 	return rx_add_rule(sa_entry);
1934 }
1935 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1936 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1937 {
1938 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1939 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1940 
1941 	mlx5_del_flow_rules(&ipsec_rule->rule);
1942 	mlx5_del_flow_rules(&ipsec_rule->kspi_rule);
1943 	if (ipsec_rule->vid_zero_rule != NULL)
1944 		mlx5_del_flow_rules(&ipsec_rule->vid_zero_rule);
1945 	if (ipsec_rule->reqid_rule != NULL)
1946 		mlx5_del_flow_rules(&ipsec_rule->reqid_rule);
1947 	mlx5_fc_destroy(mdev, ipsec_rule->fc);
1948 	mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
1949 	if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND) {
1950 		tx_ft_put(sa_entry->ipsec);
1951 		return;
1952 	}
1953 
1954 	if (ipsec_rule->modify_hdr != NULL)
1955 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1956 }
1957 
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1958 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1959 {
1960 	if (pol_entry->attrs.dir == IPSEC_DIR_OUTBOUND)
1961 		return tx_add_policy(pol_entry);
1962 
1963 	return rx_add_policy(pol_entry);
1964 }
1965 
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1966 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1967 {
1968 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
1969 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1970 
1971 	mlx5_del_flow_rules(&ipsec_rule->rule);
1972 	if (ipsec_rule->vid_zero_rule != NULL)
1973 		mlx5_del_flow_rules(&ipsec_rule->vid_zero_rule);
1974 
1975 	if (pol_entry->attrs.dir == IPSEC_DIR_INBOUND) {
1976 		struct mlx5e_ipsec_rx *rx;
1977 
1978                 rx = (pol_entry->attrs.family == AF_INET)
1979                          ? pol_entry->ipsec->rx_ipv4
1980                          : pol_entry->ipsec->rx_ipv6;
1981                 if (rx->chains)
1982                         ipsec_chains_put_table(rx->chains,
1983                                                pol_entry->attrs.prio);
1984                 return;
1985 	}
1986 
1987 	if (ipsec_rule->modify_hdr)
1988 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1989 
1990 	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio);
1991 }
1992 
mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv * priv)1993 void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv)
1994 {
1995 	/* Check if IPsec supported */
1996 	if (!priv->ipsec)
1997 		return;
1998 
1999 	ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv4);
2000 	ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6);
2001 }
2002 
mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv * priv)2003 int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv)
2004 {
2005 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2006 	struct mlx5_flow_destination dest = {};
2007 	int err = 0;
2008 
2009 	/* Check if IPsec supported */
2010 	if (!ipsec)
2011 		return 0;
2012 
2013 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2014 	dest.ft = priv->fts.vlan.t;
2015 	err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv6, &dest);
2016 	if (err)
2017 		goto out;
2018 
2019 	err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv4, &dest);
2020 	if (err)
2021 		ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6);
2022 out:
2023 	return err;
2024 }
2025 
mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv * priv)2026 void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv)
2027 {
2028 	struct mlx5_core_dev *mdev = priv->mdev;
2029 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2030 
2031 	/* Check if IPsec supported */
2032 	if (!ipsec)
2033 		return;
2034 
2035 	ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv6);
2036 	ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv4);
2037 }
2038 
mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv * priv)2039 int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv)
2040 {
2041 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2042 	int err = 0;
2043 
2044 	/* Check if IPsec supported */
2045 	if (!ipsec)
2046 		return 0;
2047 
2048 	err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv4, 0, 0);
2049 	if (err)
2050 		goto out;
2051 
2052 	err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv6, 4, 1);
2053 	if (err) {
2054 		ipsec_fs_rx_table_destroy(priv->mdev, ipsec->rx_ipv4);
2055 		goto out;
2056 	}
2057 
2058 	priv->fts.ipsec_ft = priv->ipsec->rx_ipv4->ft.pol;
2059 out:
2060 	return err;
2061 }
2062 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)2063 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
2064 {
2065 	WARN_ON(ipsec->tx->ft.refcnt);
2066 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
2067 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
2068 	mutex_destroy(&ipsec->tx->ft.mutex);
2069 	ipsec_fs_destroy_counters(ipsec);
2070 	kfree(ipsec->rx_ipv6);
2071 	kfree(ipsec->rx_ipv4);
2072 	kfree(ipsec->tx);
2073 }
2074 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec)2075 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
2076 {
2077 	struct mlx5_flow_namespace *tns, *rns;
2078 	int err = -ENOMEM;
2079 
2080 	tns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2081 	if (!tns)
2082 		return -EOPNOTSUPP;
2083 
2084 	rns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2085 	if (!rns)
2086 		return -EOPNOTSUPP;
2087 
2088 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2089 	if (!ipsec->tx)
2090 		return -ENOMEM;
2091 
2092 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2093 	if (!ipsec->rx_ipv4)
2094 		goto err_tx;
2095 
2096 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2097 	if (!ipsec->rx_ipv6)
2098 		goto err_rx_ipv4;
2099 
2100 	err = ipsec_fs_init_counters(ipsec);
2101 	if (err)
2102 		goto err_rx_ipv6;
2103 
2104 	ipsec->tx->ns = tns;
2105 	mutex_init(&ipsec->tx->ft.mutex);
2106 	ipsec->rx_ipv4->ns = rns;
2107 	ipsec->rx_ipv6->ns = rns;
2108 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
2109 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
2110 
2111 	mlx5e_accel_ipsec_fs_init_roce(ipsec);
2112 
2113 	return 0;
2114 
2115 err_rx_ipv6:
2116 	kfree(ipsec->rx_ipv6);
2117 err_rx_ipv4:
2118 	kfree(ipsec->rx_ipv4);
2119 err_tx:
2120 	kfree(ipsec->tx);
2121 	return err;
2122 }
2123 
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2124 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2125 {
2126         struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2127         int err;
2128 
2129         memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2130         memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2131 
2132         err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2133         if (err)
2134                 return;
2135         mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2136         memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2137 }
2138