xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c (revision 033771c085c2ed73cb29dd25e1ec8c4b2991cad9)
1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <net/netevent.h>
39 
40 #include "en.h"
41 #include "eswitch.h"
42 #include "ipsec.h"
43 #include "ipsec_rxtx.h"
44 #include "en_rep.h"
45 
46 #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
47 #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
48 
to_ipsec_sa_entry(struct xfrm_state * x)49 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
50 {
51 	return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
52 }
53 
to_ipsec_pol_entry(struct xfrm_policy * x)54 static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
55 {
56 	return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
57 }
58 
mlx5e_ipsec_handle_sw_limits(struct work_struct * _work)59 static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
60 {
61 	struct mlx5e_ipsec_dwork *dwork =
62 		container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
63 	struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
64 	struct xfrm_state *x = sa_entry->x;
65 
66 	if (sa_entry->attrs.drop)
67 		return;
68 
69 	spin_lock_bh(&x->lock);
70 	xfrm_state_check_expire(x);
71 	if (x->km.state == XFRM_STATE_EXPIRED) {
72 		sa_entry->attrs.drop = true;
73 		spin_unlock_bh(&x->lock);
74 
75 		mlx5e_accel_ipsec_fs_modify(sa_entry);
76 		return;
77 	}
78 	spin_unlock_bh(&x->lock);
79 
80 	queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
81 			   MLX5_IPSEC_RESCHED);
82 }
83 
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)84 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
85 {
86 	struct xfrm_state *x = sa_entry->x;
87 	u32 seq_bottom = 0;
88 	u32 esn, esn_msb;
89 	u8 overlap;
90 
91 	switch (x->xso.type) {
92 	case XFRM_DEV_OFFLOAD_PACKET:
93 		switch (x->xso.dir) {
94 		case XFRM_DEV_OFFLOAD_IN:
95 			esn = x->replay_esn->seq;
96 			esn_msb = x->replay_esn->seq_hi;
97 			break;
98 		case XFRM_DEV_OFFLOAD_OUT:
99 			esn = x->replay_esn->oseq;
100 			esn_msb = x->replay_esn->oseq_hi;
101 			break;
102 		default:
103 			WARN_ON(true);
104 			return false;
105 		}
106 		break;
107 	case XFRM_DEV_OFFLOAD_CRYPTO:
108 		/* Already parsed by XFRM core */
109 		esn = x->replay_esn->seq;
110 		break;
111 	default:
112 		WARN_ON(true);
113 		return false;
114 	}
115 
116 	overlap = sa_entry->esn_state.overlap;
117 
118 	if (esn >= x->replay_esn->replay_window)
119 		seq_bottom = esn - x->replay_esn->replay_window + 1;
120 
121 	if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
122 		esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
123 
124 	if (sa_entry->esn_state.esn_msb)
125 		sa_entry->esn_state.esn = esn;
126 	else
127 		/* According to RFC4303, section "3.3.3. Sequence Number Generation",
128 		 * the first packet sent using a given SA will contain a sequence
129 		 * number of 1.
130 		 */
131 		sa_entry->esn_state.esn = max_t(u32, esn, 1);
132 	sa_entry->esn_state.esn_msb = esn_msb;
133 
134 	if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
135 		sa_entry->esn_state.overlap = 0;
136 		return true;
137 	} else if (unlikely(!overlap &&
138 			    (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
139 		sa_entry->esn_state.overlap = 1;
140 		return true;
141 	}
142 
143 	return false;
144 }
145 
mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)146 static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
147 				    struct mlx5_accel_esp_xfrm_attrs *attrs)
148 {
149 	struct xfrm_state *x = sa_entry->x;
150 	s64 start_value, n;
151 
152 	attrs->lft.hard_packet_limit = x->lft.hard_packet_limit;
153 	attrs->lft.soft_packet_limit = x->lft.soft_packet_limit;
154 	if (x->lft.soft_packet_limit == XFRM_INF)
155 		return;
156 
157 	/* Compute hard limit initial value and number of rounds.
158 	 *
159 	 * The counting pattern of hardware counter goes:
160 	 *                value  -> 2^31-1
161 	 *      2^31  | (2^31-1) -> 2^31-1
162 	 *      2^31  | (2^31-1) -> 2^31-1
163 	 *      [..]
164 	 *      2^31  | (2^31-1) -> 0
165 	 *
166 	 * The pattern is created by using an ASO operation to atomically set
167 	 * bit 31 after the down counter clears bit 31. This is effectively an
168 	 * atomic addition of 2**31 to the counter.
169 	 *
170 	 * We wish to configure the counter, within the above pattern, so that
171 	 * when it reaches 0, it has hit the hard limit. This is defined by this
172 	 * system of equations:
173 	 *
174 	 *      hard_limit == start_value + n * 2^31
175 	 *      n >= 0
176 	 *      start_value < 2^32, start_value >= 0
177 	 *
178 	 * These equations are not single-solution, there are often two choices:
179 	 *      hard_limit == start_value + n * 2^31
180 	 *      hard_limit == (start_value+2^31) + (n-1) * 2^31
181 	 *
182 	 * The algorithm selects the solution that keeps the counter value
183 	 * above 2^31 until the final iteration.
184 	 */
185 
186 	/* Start by estimating n and compute start_value */
187 	n = attrs->lft.hard_packet_limit / BIT_ULL(31);
188 	start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
189 
190 	/* Choose the best of the two solutions: */
191 	if (n >= 1)
192 		n -= 1;
193 
194 	/* Computed values solve the system of equations: */
195 	start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
196 
197 	/* The best solution means: when there are multiple iterations we must
198 	 * start above 2^31 and count down to 2**31 to get the interrupt.
199 	 */
200 	attrs->lft.hard_packet_limit = lower_32_bits(start_value);
201 	attrs->lft.numb_rounds_hard = (u64)n;
202 
203 	/* Compute soft limit initial value and number of rounds.
204 	 *
205 	 * The soft_limit is achieved by adjusting the counter's
206 	 * interrupt_value. This is embedded in the counting pattern created by
207 	 * hard packet calculations above.
208 	 *
209 	 * We wish to compute the interrupt_value for the soft_limit. This is
210 	 * defined by this system of equations:
211 	 *
212 	 *      soft_limit == start_value - soft_value + n * 2^31
213 	 *      n >= 0
214 	 *      soft_value < 2^32, soft_value >= 0
215 	 *      for n == 0 start_value > soft_value
216 	 *
217 	 * As with compute_hard_n_value() the equations are not single-solution.
218 	 * The algorithm selects the solution that has:
219 	 *      2^30 <= soft_limit < 2^31 + 2^30
220 	 * for the interior iterations, which guarantees a large guard band
221 	 * around the counter hard limit and next interrupt.
222 	 */
223 
224 	/* Start by estimating n and compute soft_value */
225 	n = (x->lft.soft_packet_limit - attrs->lft.hard_packet_limit) / BIT_ULL(31);
226 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) -
227 		      x->lft.soft_packet_limit;
228 
229 	/* Compare against constraints and adjust n */
230 	if (n < 0)
231 		n = 0;
232 	else if (start_value >= BIT_ULL(32))
233 		n -= 1;
234 	else if (start_value < 0)
235 		n += 1;
236 
237 	/* Choose the best of the two solutions: */
238 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
239 	if (n != attrs->lft.numb_rounds_hard && start_value < BIT_ULL(30))
240 		n += 1;
241 
242 	/* Note that the upper limit of soft_value happens naturally because we
243 	 * always select the lowest soft_value.
244 	 */
245 
246 	/* Computed values solve the system of equations: */
247 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
248 
249 	/* The best solution means: when there are multiple iterations we must
250 	 * not fall below 2^30 as that would get too close to the false
251 	 * hard_limit and when we reach an interior iteration for soft_limit it
252 	 * has to be far away from 2**32-1 which is the counter reset point
253 	 * after the +2^31 to accommodate latency.
254 	 */
255 	attrs->lft.soft_packet_limit = lower_32_bits(start_value);
256 	attrs->lft.numb_rounds_soft = (u64)n;
257 }
258 
mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)259 static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
260 				  struct mlx5_accel_esp_xfrm_attrs *attrs)
261 {
262 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
263 	struct xfrm_state *x = sa_entry->x;
264 	struct net_device *netdev;
265 	struct neighbour *n;
266 	u8 addr[ETH_ALEN];
267 	const void *pkey;
268 	u8 *dst, *src;
269 
270 	if (attrs->mode != XFRM_MODE_TUNNEL ||
271 	    attrs->type != XFRM_DEV_OFFLOAD_PACKET)
272 		return;
273 
274 	netdev = x->xso.real_dev;
275 
276 	mlx5_query_mac_address(mdev, addr);
277 	switch (attrs->dir) {
278 	case XFRM_DEV_OFFLOAD_IN:
279 		src = attrs->dmac;
280 		dst = attrs->smac;
281 		pkey = &attrs->saddr.a4;
282 		break;
283 	case XFRM_DEV_OFFLOAD_OUT:
284 		src = attrs->smac;
285 		dst = attrs->dmac;
286 		pkey = &attrs->daddr.a4;
287 		break;
288 	default:
289 		return;
290 	}
291 
292 	ether_addr_copy(src, addr);
293 	n = neigh_lookup(&arp_tbl, pkey, netdev);
294 	if (!n) {
295 		n = neigh_create(&arp_tbl, pkey, netdev);
296 		if (IS_ERR(n))
297 			return;
298 		neigh_event_send(n, NULL);
299 		attrs->drop = true;
300 	} else {
301 		neigh_ha_snapshot(addr, n, netdev);
302 		ether_addr_copy(dst, addr);
303 	}
304 	neigh_release(n);
305 }
306 
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)307 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
308 					struct mlx5_accel_esp_xfrm_attrs *attrs)
309 {
310 	struct xfrm_state *x = sa_entry->x;
311 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
312 	struct aead_geniv_ctx *geniv_ctx;
313 	struct crypto_aead *aead;
314 	unsigned int crypto_data_len, key_len;
315 	int ivsize;
316 
317 	memset(attrs, 0, sizeof(*attrs));
318 
319 	/* key */
320 	crypto_data_len = (x->aead->alg_key_len + 7) / 8;
321 	key_len = crypto_data_len - 4; /* 4 bytes salt at end */
322 
323 	memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
324 	aes_gcm->key_len = key_len * 8;
325 
326 	/* salt and seq_iv */
327 	aead = x->data;
328 	geniv_ctx = crypto_aead_ctx(aead);
329 	ivsize = crypto_aead_ivsize(aead);
330 	memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
331 	memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
332 	       sizeof(aes_gcm->salt));
333 
334 	attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
335 
336 	/* iv len */
337 	aes_gcm->icv_len = x->aead->alg_icv_len;
338 
339 	attrs->dir = x->xso.dir;
340 
341 	/* esn */
342 	if (x->props.flags & XFRM_STATE_ESN) {
343 		attrs->replay_esn.trigger = true;
344 		attrs->replay_esn.esn = sa_entry->esn_state.esn;
345 		attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
346 		attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
347 		if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
348 			goto skip_replay_window;
349 
350 		switch (x->replay_esn->replay_window) {
351 		case 32:
352 			attrs->replay_esn.replay_window =
353 				MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
354 			break;
355 		case 64:
356 			attrs->replay_esn.replay_window =
357 				MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
358 			break;
359 		case 128:
360 			attrs->replay_esn.replay_window =
361 				MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
362 			break;
363 		case 256:
364 			attrs->replay_esn.replay_window =
365 				MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
366 			break;
367 		default:
368 			WARN_ON(true);
369 			return;
370 		}
371 	}
372 
373 skip_replay_window:
374 	/* spi */
375 	attrs->spi = be32_to_cpu(x->id.spi);
376 
377 	/* source , destination ips */
378 	memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
379 	memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
380 	attrs->family = x->props.family;
381 	attrs->type = x->xso.type;
382 	attrs->reqid = x->props.reqid;
383 	attrs->upspec.dport = ntohs(x->sel.dport);
384 	attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
385 	attrs->upspec.sport = ntohs(x->sel.sport);
386 	attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
387 	attrs->upspec.proto = x->sel.proto;
388 	attrs->mode = x->props.mode;
389 
390 	mlx5e_ipsec_init_limits(sa_entry, attrs);
391 	mlx5e_ipsec_init_macs(sa_entry, attrs);
392 
393 	if (x->encap) {
394 		attrs->encap = true;
395 		attrs->sport = x->encap->encap_sport;
396 		attrs->dport = x->encap->encap_dport;
397 	}
398 }
399 
mlx5e_xfrm_validate_state(struct mlx5_core_dev * mdev,struct xfrm_state * x,struct netlink_ext_ack * extack)400 static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
401 				     struct xfrm_state *x,
402 				     struct netlink_ext_ack *extack)
403 {
404 	if (x->props.aalgo != SADB_AALG_NONE) {
405 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
406 		return -EINVAL;
407 	}
408 	if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
409 		NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
410 		return -EINVAL;
411 	}
412 	if (x->props.calgo != SADB_X_CALG_NONE) {
413 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
414 		return -EINVAL;
415 	}
416 	if (x->props.flags & XFRM_STATE_ESN &&
417 	    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
418 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
419 		return -EINVAL;
420 	}
421 	if (x->props.family != AF_INET &&
422 	    x->props.family != AF_INET6) {
423 		NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
424 		return -EINVAL;
425 	}
426 	if (x->id.proto != IPPROTO_ESP) {
427 		NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
428 		return -EINVAL;
429 	}
430 	if (x->encap) {
431 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
432 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
433 			return -EINVAL;
434 		}
435 
436 		if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
437 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported");
438 			return -EINVAL;
439 		}
440 
441 		if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) {
442 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only");
443 			return -EINVAL;
444 		}
445 
446 		if (x->props.mode != XFRM_MODE_TRANSPORT) {
447 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only");
448 			return -EINVAL;
449 		}
450 	}
451 	if (!x->aead) {
452 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
453 		return -EINVAL;
454 	}
455 	if (x->aead->alg_icv_len != 128) {
456 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
457 		return -EINVAL;
458 	}
459 	if ((x->aead->alg_key_len != 128 + 32) &&
460 	    (x->aead->alg_key_len != 256 + 32)) {
461 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
462 		return -EINVAL;
463 	}
464 	if (x->tfcpad) {
465 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
466 		return -EINVAL;
467 	}
468 	if (!x->geniv) {
469 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
470 		return -EINVAL;
471 	}
472 	if (strcmp(x->geniv, "seqiv")) {
473 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
474 		return -EINVAL;
475 	}
476 
477 	if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
478 	    x->sel.proto != IPPROTO_TCP) {
479 		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
480 		return -EINVAL;
481 	}
482 
483 	if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) {
484 		NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
485 		return -EINVAL;
486 	}
487 
488 	switch (x->xso.type) {
489 	case XFRM_DEV_OFFLOAD_CRYPTO:
490 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
491 			NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
492 			return -EINVAL;
493 		}
494 
495 		break;
496 	case XFRM_DEV_OFFLOAD_PACKET:
497 		if (!(mlx5_ipsec_device_caps(mdev) &
498 		      MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
499 			NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
500 			return -EINVAL;
501 		}
502 
503 		if (x->props.mode == XFRM_MODE_TUNNEL &&
504 		    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)) {
505 			NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported for tunnel mode");
506 			return -EINVAL;
507 		}
508 
509 		if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
510 		    x->replay_esn->replay_window != 32 &&
511 		    x->replay_esn->replay_window != 64 &&
512 		    x->replay_esn->replay_window != 128 &&
513 		    x->replay_esn->replay_window != 256) {
514 			NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
515 			return -EINVAL;
516 		}
517 
518 		if (!x->props.reqid) {
519 			NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
520 			return -EINVAL;
521 		}
522 
523 		if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit &&
524 		    x->lft.hard_byte_limit != XFRM_INF) {
525 			/* XFRM stack doesn't prevent such configuration :(. */
526 			NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one");
527 			return -EINVAL;
528 		}
529 
530 		if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) {
531 			NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0");
532 			return -EINVAL;
533 		}
534 
535 		if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
536 		    x->lft.hard_packet_limit != XFRM_INF) {
537 			/* XFRM stack doesn't prevent such configuration :(. */
538 			NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
539 			return -EINVAL;
540 		}
541 
542 		if (!x->lft.soft_packet_limit || !x->lft.hard_packet_limit) {
543 			NL_SET_ERR_MSG_MOD(extack, "Soft/hard packet limits can't be 0");
544 			return -EINVAL;
545 		}
546 		break;
547 	default:
548 		NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
549 		return -EINVAL;
550 	}
551 	return 0;
552 }
553 
mlx5e_ipsec_modify_state(struct work_struct * _work)554 static void mlx5e_ipsec_modify_state(struct work_struct *_work)
555 {
556 	struct mlx5e_ipsec_work *work =
557 		container_of(_work, struct mlx5e_ipsec_work, work);
558 	struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
559 	struct mlx5_accel_esp_xfrm_attrs *attrs;
560 
561 	attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
562 
563 	mlx5_accel_esp_modify_xfrm(sa_entry, attrs);
564 }
565 
mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry * sa_entry)566 static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry)
567 {
568 	struct xfrm_state *x = sa_entry->x;
569 
570 	if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO ||
571 	    x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
572 		return;
573 
574 	if (x->props.flags & XFRM_STATE_ESN) {
575 		sa_entry->set_iv_op = mlx5e_ipsec_set_iv_esn;
576 		return;
577 	}
578 
579 	sa_entry->set_iv_op = mlx5e_ipsec_set_iv;
580 }
581 
mlx5e_ipsec_handle_netdev_event(struct work_struct * _work)582 static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work)
583 {
584 	struct mlx5e_ipsec_work *work =
585 		container_of(_work, struct mlx5e_ipsec_work, work);
586 	struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
587 	struct mlx5e_ipsec_netevent_data *data = work->data;
588 	struct mlx5_accel_esp_xfrm_attrs *attrs;
589 
590 	attrs = &sa_entry->attrs;
591 
592 	switch (attrs->dir) {
593 	case XFRM_DEV_OFFLOAD_IN:
594 		ether_addr_copy(attrs->smac, data->addr);
595 		break;
596 	case XFRM_DEV_OFFLOAD_OUT:
597 		ether_addr_copy(attrs->dmac, data->addr);
598 		break;
599 	default:
600 		WARN_ON_ONCE(true);
601 	}
602 	attrs->drop = false;
603 	mlx5e_accel_ipsec_fs_modify(sa_entry);
604 }
605 
mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry * sa_entry)606 static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
607 {
608 	struct xfrm_state *x = sa_entry->x;
609 	struct mlx5e_ipsec_work *work;
610 	void *data = NULL;
611 
612 	switch (x->xso.type) {
613 	case XFRM_DEV_OFFLOAD_CRYPTO:
614 		if (!(x->props.flags & XFRM_STATE_ESN))
615 			return 0;
616 		break;
617 	case XFRM_DEV_OFFLOAD_PACKET:
618 		if (x->props.mode != XFRM_MODE_TUNNEL)
619 			return 0;
620 		break;
621 	default:
622 		break;
623 	}
624 
625 	work = kzalloc(sizeof(*work), GFP_KERNEL);
626 	if (!work)
627 		return -ENOMEM;
628 
629 	switch (x->xso.type) {
630 	case XFRM_DEV_OFFLOAD_CRYPTO:
631 		data = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
632 		if (!data)
633 			goto free_work;
634 
635 		INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
636 		break;
637 	case XFRM_DEV_OFFLOAD_PACKET:
638 		data = kzalloc(sizeof(struct mlx5e_ipsec_netevent_data),
639 			       GFP_KERNEL);
640 		if (!data)
641 			goto free_work;
642 
643 		INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
644 		break;
645 	default:
646 		break;
647 	}
648 
649 	work->data = data;
650 	work->sa_entry = sa_entry;
651 	sa_entry->work = work;
652 	return 0;
653 
654 free_work:
655 	kfree(work);
656 	return -ENOMEM;
657 }
658 
mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry * sa_entry)659 static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
660 {
661 	struct xfrm_state *x = sa_entry->x;
662 	struct mlx5e_ipsec_dwork *dwork;
663 
664 	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
665 		return 0;
666 
667 	if (x->lft.soft_packet_limit == XFRM_INF &&
668 	    x->lft.hard_packet_limit == XFRM_INF &&
669 	    x->lft.soft_byte_limit == XFRM_INF &&
670 	    x->lft.hard_byte_limit == XFRM_INF)
671 		return 0;
672 
673 	dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
674 	if (!dwork)
675 		return -ENOMEM;
676 
677 	dwork->sa_entry = sa_entry;
678 	INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits);
679 	sa_entry->dwork = dwork;
680 	return 0;
681 }
682 
mlx5e_xfrm_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)683 static int mlx5e_xfrm_add_state(struct xfrm_state *x,
684 				struct netlink_ext_ack *extack)
685 {
686 	struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
687 	struct net_device *netdev = x->xso.real_dev;
688 	struct mlx5e_ipsec *ipsec;
689 	struct mlx5e_priv *priv;
690 	gfp_t gfp;
691 	int err;
692 
693 	priv = netdev_priv(netdev);
694 	if (!priv->ipsec)
695 		return -EOPNOTSUPP;
696 
697 	ipsec = priv->ipsec;
698 	gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL;
699 	sa_entry = kzalloc(sizeof(*sa_entry), gfp);
700 	if (!sa_entry)
701 		return -ENOMEM;
702 
703 	sa_entry->x = x;
704 	sa_entry->ipsec = ipsec;
705 	/* Check if this SA is originated from acquire flow temporary SA */
706 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
707 		goto out;
708 
709 	err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
710 	if (err)
711 		goto err_xfrm;
712 
713 	if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
714 		err = -EBUSY;
715 		goto err_xfrm;
716 	}
717 
718 	/* check esn */
719 	if (x->props.flags & XFRM_STATE_ESN)
720 		mlx5e_ipsec_update_esn_state(sa_entry);
721 
722 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
723 
724 	err = mlx5_ipsec_create_work(sa_entry);
725 	if (err)
726 		goto unblock_ipsec;
727 
728 	err = mlx5e_ipsec_create_dwork(sa_entry);
729 	if (err)
730 		goto release_work;
731 
732 	/* create hw context */
733 	err = mlx5_ipsec_create_sa_ctx(sa_entry);
734 	if (err)
735 		goto release_dwork;
736 
737 	err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
738 	if (err)
739 		goto err_hw_ctx;
740 
741 	if (x->props.mode == XFRM_MODE_TUNNEL &&
742 	    x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
743 	    !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
744 		NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
745 		err = -EINVAL;
746 		goto err_add_rule;
747 	}
748 
749 	/* We use *_bh() variant because xfrm_timer_handler(), which runs
750 	 * in softirq context, can reach our state delete logic and we need
751 	 * xa_erase_bh() there.
752 	 */
753 	err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
754 			   GFP_KERNEL);
755 	if (err)
756 		goto err_add_rule;
757 
758 	mlx5e_ipsec_set_esn_ops(sa_entry);
759 
760 	if (sa_entry->dwork)
761 		queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
762 				   MLX5_IPSEC_RESCHED);
763 
764 	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
765 	    x->props.mode == XFRM_MODE_TUNNEL)
766 		xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
767 			    MLX5E_IPSEC_TUNNEL_SA);
768 
769 out:
770 	x->xso.offload_handle = (unsigned long)sa_entry;
771 	return 0;
772 
773 err_add_rule:
774 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
775 err_hw_ctx:
776 	mlx5_ipsec_free_sa_ctx(sa_entry);
777 release_dwork:
778 	kfree(sa_entry->dwork);
779 release_work:
780 	if (sa_entry->work)
781 		kfree(sa_entry->work->data);
782 	kfree(sa_entry->work);
783 unblock_ipsec:
784 	mlx5_eswitch_unblock_ipsec(priv->mdev);
785 err_xfrm:
786 	kfree(sa_entry);
787 	NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
788 	return err;
789 }
790 
mlx5e_xfrm_del_state(struct xfrm_state * x)791 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
792 {
793 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
794 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
795 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
796 	struct mlx5e_ipsec_sa_entry *old;
797 
798 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
799 		return;
800 
801 	old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
802 	WARN_ON(old != sa_entry);
803 
804 	if (attrs->mode == XFRM_MODE_TUNNEL &&
805 	    attrs->type == XFRM_DEV_OFFLOAD_PACKET)
806 		/* Make sure that no ARP requests are running in parallel */
807 		flush_workqueue(ipsec->wq);
808 
809 }
810 
mlx5e_xfrm_free_state(struct xfrm_state * x)811 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
812 {
813 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
814 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
815 
816 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
817 		goto sa_entry_free;
818 
819 	if (sa_entry->work)
820 		cancel_work_sync(&sa_entry->work->work);
821 
822 	if (sa_entry->dwork)
823 		cancel_delayed_work_sync(&sa_entry->dwork->dwork);
824 
825 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
826 	mlx5_ipsec_free_sa_ctx(sa_entry);
827 	kfree(sa_entry->dwork);
828 	if (sa_entry->work)
829 		kfree(sa_entry->work->data);
830 	kfree(sa_entry->work);
831 	mlx5_eswitch_unblock_ipsec(ipsec->mdev);
832 sa_entry_free:
833 	kfree(sa_entry);
834 }
835 
mlx5e_ipsec_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)836 static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
837 				      unsigned long event, void *ptr)
838 {
839 	struct mlx5_accel_esp_xfrm_attrs *attrs;
840 	struct mlx5e_ipsec_netevent_data *data;
841 	struct mlx5e_ipsec_sa_entry *sa_entry;
842 	struct mlx5e_ipsec *ipsec;
843 	struct neighbour *n = ptr;
844 	struct net_device *netdev;
845 	struct xfrm_state *x;
846 	unsigned long idx;
847 
848 	if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
849 		return NOTIFY_DONE;
850 
851 	ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb);
852 	xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
853 		attrs = &sa_entry->attrs;
854 
855 		if (attrs->family == AF_INET) {
856 			if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
857 			    !neigh_key_eq32(n, &attrs->daddr.a4))
858 				continue;
859 		} else {
860 			if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
861 			    !neigh_key_eq128(n, &attrs->daddr.a4))
862 				continue;
863 		}
864 
865 		x = sa_entry->x;
866 		netdev = x->xso.real_dev;
867 		data = sa_entry->work->data;
868 
869 		neigh_ha_snapshot(data->addr, n, netdev);
870 		queue_work(ipsec->wq, &sa_entry->work->work);
871 	}
872 
873 	return NOTIFY_DONE;
874 }
875 
mlx5e_ipsec_init(struct mlx5e_priv * priv)876 void mlx5e_ipsec_init(struct mlx5e_priv *priv)
877 {
878 	struct mlx5e_ipsec *ipsec;
879 	int ret = -ENOMEM;
880 
881 	if (!mlx5_ipsec_device_caps(priv->mdev)) {
882 		netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
883 		return;
884 	}
885 
886 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
887 	if (!ipsec)
888 		return;
889 
890 	xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
891 	ipsec->mdev = priv->mdev;
892 	init_completion(&ipsec->comp);
893 	ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
894 				    priv->netdev->name);
895 	if (!ipsec->wq)
896 		goto err_wq;
897 
898 	if (mlx5_ipsec_device_caps(priv->mdev) &
899 	    MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
900 		ret = mlx5e_ipsec_aso_init(ipsec);
901 		if (ret)
902 			goto err_aso;
903 	}
904 
905 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) {
906 		ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event;
907 		ret = register_netevent_notifier(&ipsec->netevent_nb);
908 		if (ret)
909 			goto clear_aso;
910 	}
911 
912 	ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
913 	ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom);
914 	if (ret)
915 		goto err_fs_init;
916 
917 	ipsec->fs = priv->fs;
918 	priv->ipsec = ipsec;
919 	netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
920 	return;
921 
922 err_fs_init:
923 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
924 		unregister_netevent_notifier(&ipsec->netevent_nb);
925 clear_aso:
926 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
927 		mlx5e_ipsec_aso_cleanup(ipsec);
928 err_aso:
929 	destroy_workqueue(ipsec->wq);
930 err_wq:
931 	kfree(ipsec);
932 	mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
933 	return;
934 }
935 
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)936 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
937 {
938 	struct mlx5e_ipsec *ipsec = priv->ipsec;
939 
940 	if (!ipsec)
941 		return;
942 
943 	mlx5e_accel_ipsec_fs_cleanup(ipsec);
944 	if (ipsec->netevent_nb.notifier_call) {
945 		unregister_netevent_notifier(&ipsec->netevent_nb);
946 		ipsec->netevent_nb.notifier_call = NULL;
947 	}
948 	if (ipsec->aso)
949 		mlx5e_ipsec_aso_cleanup(ipsec);
950 	destroy_workqueue(ipsec->wq);
951 	kfree(ipsec);
952 	priv->ipsec = NULL;
953 }
954 
mlx5e_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)955 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
956 {
957 	if (x->props.family == AF_INET) {
958 		/* Offload with IPv4 options is not supported yet */
959 		if (ip_hdr(skb)->ihl > 5)
960 			return false;
961 	} else {
962 		/* Offload with IPv6 extension headers is not support yet */
963 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
964 			return false;
965 	}
966 
967 	return true;
968 }
969 
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)970 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
971 {
972 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
973 	struct mlx5e_ipsec_work *work = sa_entry->work;
974 	struct mlx5e_ipsec_sa_entry *sa_entry_shadow;
975 	bool need_update;
976 
977 	need_update = mlx5e_ipsec_update_esn_state(sa_entry);
978 	if (!need_update)
979 		return;
980 
981 	sa_entry_shadow = work->data;
982 	memset(sa_entry_shadow, 0x00, sizeof(*sa_entry_shadow));
983 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry_shadow->attrs);
984 	queue_work(sa_entry->ipsec->wq, &work->work);
985 }
986 
mlx5e_xfrm_update_stats(struct xfrm_state * x)987 static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
988 {
989 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
990 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
991 	struct net *net = dev_net(x->xso.dev);
992 	u64 trailer_packets = 0, trailer_bytes = 0;
993 	u64 replay_packets = 0, replay_bytes = 0;
994 	u64 auth_packets = 0, auth_bytes = 0;
995 	u64 success_packets, success_bytes;
996 	u64 packets, bytes, lastuse;
997 	size_t headers;
998 
999 	lockdep_assert(lockdep_is_held(&x->lock) ||
1000 		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
1001 		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
1002 
1003 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
1004 		return;
1005 
1006 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1007 		mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
1008 				     &auth_packets, &lastuse);
1009 		x->stats.integrity_failed += auth_packets;
1010 		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
1011 
1012 		mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
1013 				     &trailer_packets, &lastuse);
1014 		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
1015 	}
1016 
1017 	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1018 		return;
1019 
1020 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1021 		mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
1022 				     &replay_packets, &lastuse);
1023 		x->stats.replay += replay_packets;
1024 		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
1025 	}
1026 
1027 	mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
1028 	success_packets = packets - auth_packets - trailer_packets - replay_packets;
1029 	x->curlft.packets += success_packets;
1030 	/* NIC counts all bytes passed through flow steering and doesn't have
1031 	 * an ability to count payload data size which is needed for SA.
1032 	 *
1033 	 * To overcome HW limitestion, let's approximate the payload size
1034 	 * by removing always available headers.
1035 	 */
1036 	headers = sizeof(struct ethhdr);
1037 	if (sa_entry->attrs.family == AF_INET)
1038 		headers += sizeof(struct iphdr);
1039 	else
1040 		headers += sizeof(struct ipv6hdr);
1041 
1042 	success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
1043 	x->curlft.bytes += success_bytes - headers * success_packets;
1044 }
1045 
mlx5e_xfrm_validate_policy(struct mlx5_core_dev * mdev,struct xfrm_policy * x,struct netlink_ext_ack * extack)1046 static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
1047 				      struct xfrm_policy *x,
1048 				      struct netlink_ext_ack *extack)
1049 {
1050 	struct xfrm_selector *sel = &x->selector;
1051 
1052 	if (x->type != XFRM_POLICY_TYPE_MAIN) {
1053 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
1054 		return -EINVAL;
1055 	}
1056 
1057 	/* Please pay attention that we support only one template */
1058 	if (x->xfrm_nr > 1) {
1059 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
1060 		return -EINVAL;
1061 	}
1062 
1063 	if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
1064 	    x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
1065 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
1066 		return -EINVAL;
1067 	}
1068 
1069 	if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP &&
1070 	    addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) {
1071 		NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0");
1072 		return -EINVAL;
1073 	}
1074 
1075 	if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
1076 		NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
1077 		return -EINVAL;
1078 	}
1079 
1080 	if (x->selector.proto != IPPROTO_IP &&
1081 	    x->selector.proto != IPPROTO_UDP &&
1082 	    x->selector.proto != IPPROTO_TCP) {
1083 		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
1084 		return -EINVAL;
1085 	}
1086 
1087 	if (x->priority) {
1088 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
1089 			NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority");
1090 			return -EINVAL;
1091 		}
1092 
1093 		if (x->priority == U32_MAX) {
1094 			NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority");
1095 			return -EINVAL;
1096 		}
1097 	}
1098 
1099 	if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
1100 	    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
1101 		NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
1102 		return -EINVAL;
1103 	}
1104 
1105 	return 0;
1106 }
1107 
1108 static void
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry * pol_entry,struct mlx5_accel_pol_xfrm_attrs * attrs)1109 mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
1110 				  struct mlx5_accel_pol_xfrm_attrs *attrs)
1111 {
1112 	struct xfrm_policy *x = pol_entry->x;
1113 	struct xfrm_selector *sel;
1114 
1115 	sel = &x->selector;
1116 	memset(attrs, 0, sizeof(*attrs));
1117 
1118 	memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
1119 	memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
1120 	attrs->family = sel->family;
1121 	attrs->dir = x->xdo.dir;
1122 	attrs->action = x->action;
1123 	attrs->type = XFRM_DEV_OFFLOAD_PACKET;
1124 	attrs->reqid = x->xfrm_vec[0].reqid;
1125 	attrs->upspec.dport = ntohs(sel->dport);
1126 	attrs->upspec.dport_mask = ntohs(sel->dport_mask);
1127 	attrs->upspec.sport = ntohs(sel->sport);
1128 	attrs->upspec.sport_mask = ntohs(sel->sport_mask);
1129 	attrs->upspec.proto = sel->proto;
1130 	attrs->prio = x->priority;
1131 }
1132 
mlx5e_xfrm_add_policy(struct xfrm_policy * x,struct netlink_ext_ack * extack)1133 static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
1134 				 struct netlink_ext_ack *extack)
1135 {
1136 	struct net_device *netdev = x->xdo.real_dev;
1137 	struct mlx5e_ipsec_pol_entry *pol_entry;
1138 	struct mlx5e_priv *priv;
1139 	int err;
1140 
1141 	priv = netdev_priv(netdev);
1142 	if (!priv->ipsec) {
1143 		NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
1144 		return -EOPNOTSUPP;
1145 	}
1146 
1147 	err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack);
1148 	if (err)
1149 		return err;
1150 
1151 	pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
1152 	if (!pol_entry)
1153 		return -ENOMEM;
1154 
1155 	pol_entry->x = x;
1156 	pol_entry->ipsec = priv->ipsec;
1157 
1158 	if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
1159 		err = -EBUSY;
1160 		goto ipsec_busy;
1161 	}
1162 
1163 	mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
1164 	err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
1165 	if (err)
1166 		goto err_fs;
1167 
1168 	x->xdo.offload_handle = (unsigned long)pol_entry;
1169 	return 0;
1170 
1171 err_fs:
1172 	mlx5_eswitch_unblock_ipsec(priv->mdev);
1173 ipsec_busy:
1174 	kfree(pol_entry);
1175 	NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
1176 	return err;
1177 }
1178 
mlx5e_xfrm_del_policy(struct xfrm_policy * x)1179 static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
1180 {
1181 	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1182 
1183 	mlx5e_accel_ipsec_fs_del_pol(pol_entry);
1184 	mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
1185 }
1186 
mlx5e_xfrm_free_policy(struct xfrm_policy * x)1187 static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
1188 {
1189 	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1190 
1191 	kfree(pol_entry);
1192 }
1193 
1194 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
1195 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
1196 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
1197 	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
1198 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
1199 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
1200 
1201 	.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
1202 	.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
1203 	.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
1204 	.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
1205 };
1206 
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)1207 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1208 {
1209 	struct mlx5_core_dev *mdev = priv->mdev;
1210 	struct net_device *netdev = priv->netdev;
1211 
1212 	if (!mlx5_ipsec_device_caps(mdev))
1213 		return;
1214 
1215 	mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
1216 
1217 	netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
1218 	netdev->features |= NETIF_F_HW_ESP;
1219 	netdev->hw_enc_features |= NETIF_F_HW_ESP;
1220 
1221 	if (!MLX5_CAP_ETH(mdev, swp_csum)) {
1222 		mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
1223 		return;
1224 	}
1225 
1226 	netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
1227 	netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
1228 
1229 	if (!MLX5_CAP_ETH(mdev, swp_lso)) {
1230 		mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
1231 		return;
1232 	}
1233 
1234 	netdev->gso_partial_features |= NETIF_F_GSO_ESP;
1235 	mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
1236 	netdev->features |= NETIF_F_GSO_ESP;
1237 	netdev->hw_features |= NETIF_F_GSO_ESP;
1238 	netdev->hw_enc_features |= NETIF_F_GSO_ESP;
1239 }
1240