1 /*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <net/netevent.h>
39 #include <net/ipv6_stubs.h>
40
41 #include "en.h"
42 #include "eswitch.h"
43 #include "ipsec.h"
44 #include "ipsec_rxtx.h"
45 #include "en_rep.h"
46
47 #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
48 #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
49
to_ipsec_sa_entry(struct xfrm_state * x)50 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
51 {
52 return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
53 }
54
to_ipsec_pol_entry(struct xfrm_policy * x)55 static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
56 {
57 return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
58 }
59
mlx5e_ipsec_handle_sw_limits(struct work_struct * _work)60 static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
61 {
62 struct mlx5e_ipsec_dwork *dwork =
63 container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
64 struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
65 struct xfrm_state *x = sa_entry->x;
66
67 if (sa_entry->attrs.drop)
68 return;
69
70 spin_lock_bh(&x->lock);
71 if (x->km.state == XFRM_STATE_EXPIRED) {
72 sa_entry->attrs.drop = true;
73 spin_unlock_bh(&x->lock);
74
75 mlx5e_accel_ipsec_fs_modify(sa_entry);
76 return;
77 }
78
79 if (x->km.state != XFRM_STATE_VALID) {
80 spin_unlock_bh(&x->lock);
81 return;
82 }
83
84 xfrm_state_check_expire(x);
85 spin_unlock_bh(&x->lock);
86
87 queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
88 MLX5_IPSEC_RESCHED);
89 }
90
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)91 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
92 {
93 struct xfrm_state *x = sa_entry->x;
94 u32 seq_bottom = 0;
95 u32 esn, esn_msb;
96 u8 overlap;
97
98 switch (x->xso.dir) {
99 case XFRM_DEV_OFFLOAD_IN:
100 esn = x->replay_esn->seq;
101 esn_msb = x->replay_esn->seq_hi;
102 break;
103 case XFRM_DEV_OFFLOAD_OUT:
104 esn = x->replay_esn->oseq;
105 esn_msb = x->replay_esn->oseq_hi;
106 break;
107 default:
108 WARN_ON(true);
109 return false;
110 }
111
112 overlap = sa_entry->esn_state.overlap;
113
114 if (!x->replay_esn->replay_window) {
115 seq_bottom = esn;
116 } else {
117 if (esn >= x->replay_esn->replay_window)
118 seq_bottom = esn - x->replay_esn->replay_window + 1;
119
120 if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
121 esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
122 }
123
124 if (sa_entry->esn_state.esn_msb)
125 sa_entry->esn_state.esn = esn;
126 else
127 /* According to RFC4303, section "3.3.3. Sequence Number Generation",
128 * the first packet sent using a given SA will contain a sequence
129 * number of 1.
130 */
131 sa_entry->esn_state.esn = max_t(u32, esn, 1);
132 sa_entry->esn_state.esn_msb = esn_msb;
133
134 if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
135 sa_entry->esn_state.overlap = 0;
136 return true;
137 } else if (unlikely(!overlap &&
138 (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
139 sa_entry->esn_state.overlap = 1;
140 return true;
141 }
142
143 return false;
144 }
145
mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)146 static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
147 struct mlx5_accel_esp_xfrm_attrs *attrs)
148 {
149 struct xfrm_state *x = sa_entry->x;
150 s64 start_value, n;
151
152 attrs->lft.hard_packet_limit = x->lft.hard_packet_limit;
153 attrs->lft.soft_packet_limit = x->lft.soft_packet_limit;
154 if (x->lft.soft_packet_limit == XFRM_INF)
155 return;
156
157 /* Compute hard limit initial value and number of rounds.
158 *
159 * The counting pattern of hardware counter goes:
160 * value -> 2^31-1
161 * 2^31 | (2^31-1) -> 2^31-1
162 * 2^31 | (2^31-1) -> 2^31-1
163 * [..]
164 * 2^31 | (2^31-1) -> 0
165 *
166 * The pattern is created by using an ASO operation to atomically set
167 * bit 31 after the down counter clears bit 31. This is effectively an
168 * atomic addition of 2**31 to the counter.
169 *
170 * We wish to configure the counter, within the above pattern, so that
171 * when it reaches 0, it has hit the hard limit. This is defined by this
172 * system of equations:
173 *
174 * hard_limit == start_value + n * 2^31
175 * n >= 0
176 * start_value < 2^32, start_value >= 0
177 *
178 * These equations are not single-solution, there are often two choices:
179 * hard_limit == start_value + n * 2^31
180 * hard_limit == (start_value+2^31) + (n-1) * 2^31
181 *
182 * The algorithm selects the solution that keeps the counter value
183 * above 2^31 until the final iteration.
184 */
185
186 /* Start by estimating n and compute start_value */
187 n = attrs->lft.hard_packet_limit / BIT_ULL(31);
188 start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
189
190 /* Choose the best of the two solutions: */
191 if (n >= 1)
192 n -= 1;
193
194 /* Computed values solve the system of equations: */
195 start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
196
197 /* The best solution means: when there are multiple iterations we must
198 * start above 2^31 and count down to 2**31 to get the interrupt.
199 */
200 attrs->lft.hard_packet_limit = lower_32_bits(start_value);
201 attrs->lft.numb_rounds_hard = (u64)n;
202
203 /* Compute soft limit initial value and number of rounds.
204 *
205 * The soft_limit is achieved by adjusting the counter's
206 * interrupt_value. This is embedded in the counting pattern created by
207 * hard packet calculations above.
208 *
209 * We wish to compute the interrupt_value for the soft_limit. This is
210 * defined by this system of equations:
211 *
212 * soft_limit == start_value - soft_value + n * 2^31
213 * n >= 0
214 * soft_value < 2^32, soft_value >= 0
215 * for n == 0 start_value > soft_value
216 *
217 * As with compute_hard_n_value() the equations are not single-solution.
218 * The algorithm selects the solution that has:
219 * 2^30 <= soft_limit < 2^31 + 2^30
220 * for the interior iterations, which guarantees a large guard band
221 * around the counter hard limit and next interrupt.
222 */
223
224 /* Start by estimating n and compute soft_value */
225 n = (x->lft.soft_packet_limit - attrs->lft.hard_packet_limit) / BIT_ULL(31);
226 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) -
227 x->lft.soft_packet_limit;
228
229 /* Compare against constraints and adjust n */
230 if (n < 0)
231 n = 0;
232 else if (start_value >= BIT_ULL(32))
233 n -= 1;
234 else if (start_value < 0)
235 n += 1;
236
237 /* Choose the best of the two solutions: */
238 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
239 if (n != attrs->lft.numb_rounds_hard && start_value < BIT_ULL(30))
240 n += 1;
241
242 /* Note that the upper limit of soft_value happens naturally because we
243 * always select the lowest soft_value.
244 */
245
246 /* Computed values solve the system of equations: */
247 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
248
249 /* The best solution means: when there are multiple iterations we must
250 * not fall below 2^30 as that would get too close to the false
251 * hard_limit and when we reach an interior iteration for soft_limit it
252 * has to be far away from 2**32-1 which is the counter reset point
253 * after the +2^31 to accommodate latency.
254 */
255 attrs->lft.soft_packet_limit = lower_32_bits(start_value);
256 attrs->lft.numb_rounds_soft = (u64)n;
257 }
258
mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)259 static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
260 struct mlx5_accel_esp_xfrm_attrs *attrs)
261 {
262 struct mlx5e_ipsec_addr *addrs = &attrs->addrs;
263 struct net_device *netdev = sa_entry->dev;
264 struct xfrm_state *x = sa_entry->x;
265 struct dst_entry *rt_dst_entry;
266 struct flowi4 fl4 = {};
267 struct flowi6 fl6 = {};
268 struct neighbour *n;
269 u8 addr[ETH_ALEN];
270 struct rtable *rt;
271 const void *pkey;
272 u8 *dst, *src;
273
274 if (attrs->mode != XFRM_MODE_TUNNEL ||
275 attrs->type != XFRM_DEV_OFFLOAD_PACKET)
276 return;
277
278 ether_addr_copy(addr, netdev->dev_addr);
279 switch (attrs->dir) {
280 case XFRM_DEV_OFFLOAD_IN:
281 src = attrs->dmac;
282 dst = attrs->smac;
283
284 switch (addrs->family) {
285 case AF_INET:
286 fl4.flowi4_proto = x->sel.proto;
287 fl4.daddr = addrs->saddr.a4;
288 fl4.saddr = addrs->daddr.a4;
289 pkey = &addrs->saddr.a4;
290 break;
291 case AF_INET6:
292 fl6.flowi6_proto = x->sel.proto;
293 memcpy(fl6.daddr.s6_addr32, addrs->saddr.a6, 16);
294 memcpy(fl6.saddr.s6_addr32, addrs->daddr.a6, 16);
295 pkey = &addrs->saddr.a6;
296 break;
297 default:
298 return;
299 }
300 break;
301 case XFRM_DEV_OFFLOAD_OUT:
302 src = attrs->smac;
303 dst = attrs->dmac;
304 switch (addrs->family) {
305 case AF_INET:
306 fl4.flowi4_proto = x->sel.proto;
307 fl4.daddr = addrs->daddr.a4;
308 fl4.saddr = addrs->saddr.a4;
309 pkey = &addrs->daddr.a4;
310 break;
311 case AF_INET6:
312 fl6.flowi6_proto = x->sel.proto;
313 memcpy(fl6.daddr.s6_addr32, addrs->daddr.a6, 16);
314 memcpy(fl6.saddr.s6_addr32, addrs->saddr.a6, 16);
315 pkey = &addrs->daddr.a6;
316 break;
317 default:
318 return;
319 }
320 break;
321 default:
322 return;
323 }
324
325 ether_addr_copy(src, addr);
326
327 /* Destination can refer to a routed network, so perform FIB lookup
328 * to resolve nexthop and get its MAC. Neighbour resolution is used as
329 * fallback.
330 */
331 switch (addrs->family) {
332 case AF_INET:
333 rt = ip_route_output_key(dev_net(netdev), &fl4);
334 if (IS_ERR(rt))
335 goto neigh;
336
337 if (rt->rt_type != RTN_UNICAST) {
338 ip_rt_put(rt);
339 goto neigh;
340 }
341 rt_dst_entry = &rt->dst;
342 break;
343 case AF_INET6:
344 if (!IS_ENABLED(CONFIG_IPV6) ||
345 ip6_dst_lookup(dev_net(netdev), NULL, &rt_dst_entry, &fl6))
346 goto neigh;
347 break;
348 default:
349 return;
350 }
351
352 n = dst_neigh_lookup(rt_dst_entry, pkey);
353 if (!n) {
354 dst_release(rt_dst_entry);
355 goto neigh;
356 }
357
358 neigh_ha_snapshot(addr, n, netdev);
359 ether_addr_copy(dst, addr);
360 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT &&
361 is_zero_ether_addr(addr))
362 neigh_event_send(n, NULL);
363 dst_release(rt_dst_entry);
364 neigh_release(n);
365 return;
366
367 neigh:
368 n = neigh_lookup(&arp_tbl, pkey, netdev);
369 if (!n) {
370 n = neigh_create(&arp_tbl, pkey, netdev);
371 if (IS_ERR(n))
372 return;
373 neigh_event_send(n, NULL);
374 attrs->drop = true;
375 } else {
376 neigh_ha_snapshot(addr, n, netdev);
377 ether_addr_copy(dst, addr);
378 }
379 neigh_release(n);
380 }
381
mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr * addrs)382 static void mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr *addrs)
383 {
384 /*
385 * State doesn't have subnet prefixes in outer headers.
386 * The match is performed for exaxt source/destination addresses.
387 */
388 memset(addrs->smask.m6, 0xFF, sizeof(__be32) * 4);
389 memset(addrs->dmask.m6, 0xFF, sizeof(__be32) * 4);
390 }
391
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)392 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
393 struct mlx5_accel_esp_xfrm_attrs *attrs)
394 {
395 struct xfrm_state *x = sa_entry->x;
396 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
397 struct aead_geniv_ctx *geniv_ctx;
398 struct crypto_aead *aead;
399 unsigned int crypto_data_len, key_len;
400 int ivsize;
401
402 memset(attrs, 0, sizeof(*attrs));
403
404 /* key */
405 crypto_data_len = (x->aead->alg_key_len + 7) / 8;
406 key_len = crypto_data_len - 4; /* 4 bytes salt at end */
407
408 memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
409 aes_gcm->key_len = key_len * 8;
410
411 /* salt and seq_iv */
412 aead = x->data;
413 geniv_ctx = crypto_aead_ctx(aead);
414 ivsize = crypto_aead_ivsize(aead);
415 memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
416 memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
417 sizeof(aes_gcm->salt));
418
419 attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
420
421 /* iv len */
422 aes_gcm->icv_len = x->aead->alg_icv_len;
423
424 attrs->dir = x->xso.dir;
425
426 /* esn */
427 if (x->props.flags & XFRM_STATE_ESN) {
428 attrs->replay_esn.trigger = true;
429 attrs->replay_esn.esn = sa_entry->esn_state.esn;
430 attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
431 attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
432 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT ||
433 x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
434 goto skip_replay_window;
435
436 switch (x->replay_esn->replay_window) {
437 case 32:
438 attrs->replay_esn.replay_window =
439 MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
440 break;
441 case 64:
442 attrs->replay_esn.replay_window =
443 MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
444 break;
445 case 128:
446 attrs->replay_esn.replay_window =
447 MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
448 break;
449 case 256:
450 attrs->replay_esn.replay_window =
451 MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
452 break;
453 default:
454 WARN_ON(true);
455 return;
456 }
457 }
458
459 skip_replay_window:
460 /* spi */
461 attrs->spi = be32_to_cpu(x->id.spi);
462
463 /* source , destination ips */
464 memcpy(&attrs->addrs.saddr, x->props.saddr.a6,
465 sizeof(attrs->addrs.saddr));
466 memcpy(&attrs->addrs.daddr, x->id.daddr.a6, sizeof(attrs->addrs.daddr));
467 attrs->addrs.family = x->props.family;
468 mlx5e_ipsec_state_mask(&attrs->addrs);
469 attrs->type = x->xso.type;
470 attrs->reqid = x->props.reqid;
471 attrs->upspec.dport = ntohs(x->sel.dport);
472 attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
473 attrs->upspec.sport = ntohs(x->sel.sport);
474 attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
475 attrs->upspec.proto = x->sel.proto;
476 attrs->mode = x->props.mode;
477
478 mlx5e_ipsec_init_limits(sa_entry, attrs);
479 mlx5e_ipsec_init_macs(sa_entry, attrs);
480
481 if (x->encap) {
482 attrs->encap = true;
483 attrs->sport = x->encap->encap_sport;
484 attrs->dport = x->encap->encap_dport;
485 }
486 }
487
mlx5e_xfrm_validate_state(struct mlx5_core_dev * mdev,struct xfrm_state * x,struct netlink_ext_ack * extack)488 static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
489 struct xfrm_state *x,
490 struct netlink_ext_ack *extack)
491 {
492 if (x->props.aalgo != SADB_AALG_NONE) {
493 NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
494 return -EINVAL;
495 }
496 if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
497 NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
498 return -EINVAL;
499 }
500 if (x->props.calgo != SADB_X_CALG_NONE) {
501 NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
502 return -EINVAL;
503 }
504 if (x->props.flags & XFRM_STATE_ESN &&
505 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
506 NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
507 return -EINVAL;
508 }
509 if (x->props.family != AF_INET &&
510 x->props.family != AF_INET6) {
511 NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
512 return -EINVAL;
513 }
514 if (x->id.proto != IPPROTO_ESP) {
515 NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
516 return -EINVAL;
517 }
518 if (x->encap) {
519 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
520 NL_SET_ERR_MSG_MOD(extack,
521 "Encapsulation is not supported");
522 return -EINVAL;
523 }
524
525 if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
526 NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported");
527 return -EINVAL;
528 }
529
530 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) {
531 NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only");
532 return -EINVAL;
533 }
534
535 if (x->props.mode != XFRM_MODE_TRANSPORT) {
536 NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only");
537 return -EINVAL;
538 }
539 }
540 if (!x->aead) {
541 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
542 return -EINVAL;
543 }
544 if (x->aead->alg_icv_len != 128) {
545 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
546 return -EINVAL;
547 }
548 if ((x->aead->alg_key_len != 128 + 32) &&
549 (x->aead->alg_key_len != 256 + 32)) {
550 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
551 return -EINVAL;
552 }
553 if (x->tfcpad) {
554 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
555 return -EINVAL;
556 }
557 if (!x->geniv) {
558 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
559 return -EINVAL;
560 }
561 if (strcmp(x->geniv, "seqiv")) {
562 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
563 return -EINVAL;
564 }
565
566 if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
567 x->sel.proto != IPPROTO_TCP) {
568 NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
569 return -EINVAL;
570 }
571
572 if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) {
573 NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
574 return -EINVAL;
575 }
576
577 switch (x->xso.type) {
578 case XFRM_DEV_OFFLOAD_CRYPTO:
579 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
580 NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
581 return -EINVAL;
582 }
583
584 break;
585 case XFRM_DEV_OFFLOAD_PACKET:
586 if (!(mlx5_ipsec_device_caps(mdev) &
587 MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
588 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
589 return -EINVAL;
590 }
591
592 if (x->props.mode == XFRM_MODE_TUNNEL &&
593 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)) {
594 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported for tunnel mode");
595 return -EINVAL;
596 }
597
598 if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
599 x->replay_esn->replay_window != 32 &&
600 x->replay_esn->replay_window != 64 &&
601 x->replay_esn->replay_window != 128 &&
602 x->replay_esn->replay_window != 256) {
603 NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
604 return -EINVAL;
605 }
606
607 if (!x->props.reqid) {
608 NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
609 return -EINVAL;
610 }
611
612 if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit &&
613 x->lft.hard_byte_limit != XFRM_INF) {
614 /* XFRM stack doesn't prevent such configuration :(. */
615 NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one");
616 return -EINVAL;
617 }
618
619 if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) {
620 NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0");
621 return -EINVAL;
622 }
623
624 if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
625 x->lft.hard_packet_limit != XFRM_INF) {
626 /* XFRM stack doesn't prevent such configuration :(. */
627 NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
628 return -EINVAL;
629 }
630
631 if (!x->lft.soft_packet_limit || !x->lft.hard_packet_limit) {
632 NL_SET_ERR_MSG_MOD(extack, "Soft/hard packet limits can't be 0");
633 return -EINVAL;
634 }
635 break;
636 default:
637 NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
638 return -EINVAL;
639 }
640 return 0;
641 }
642
mlx5e_ipsec_modify_state(struct work_struct * _work)643 static void mlx5e_ipsec_modify_state(struct work_struct *_work)
644 {
645 struct mlx5e_ipsec_work *work =
646 container_of(_work, struct mlx5e_ipsec_work, work);
647 struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
648 struct mlx5_accel_esp_xfrm_attrs *attrs;
649
650 attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
651
652 mlx5_accel_esp_modify_xfrm(sa_entry, attrs);
653 }
654
mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry * sa_entry)655 static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry)
656 {
657 struct xfrm_state *x = sa_entry->x;
658
659 if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO ||
660 x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
661 return;
662
663 if (x->props.flags & XFRM_STATE_ESN) {
664 sa_entry->set_iv_op = mlx5e_ipsec_set_iv_esn;
665 return;
666 }
667
668 sa_entry->set_iv_op = mlx5e_ipsec_set_iv;
669 }
670
mlx5e_ipsec_handle_netdev_event(struct work_struct * _work)671 static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work)
672 {
673 struct mlx5e_ipsec_work *work =
674 container_of(_work, struct mlx5e_ipsec_work, work);
675 struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
676 struct mlx5e_ipsec_netevent_data *data = work->data;
677 struct mlx5_accel_esp_xfrm_attrs *attrs;
678
679 attrs = &sa_entry->attrs;
680
681 switch (attrs->dir) {
682 case XFRM_DEV_OFFLOAD_IN:
683 ether_addr_copy(attrs->smac, data->addr);
684 break;
685 case XFRM_DEV_OFFLOAD_OUT:
686 ether_addr_copy(attrs->dmac, data->addr);
687 break;
688 default:
689 WARN_ON_ONCE(true);
690 }
691 attrs->drop = false;
692 mlx5e_accel_ipsec_fs_modify(sa_entry);
693 }
694
mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry * sa_entry)695 static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
696 {
697 struct xfrm_state *x = sa_entry->x;
698 struct mlx5e_ipsec_work *work;
699 void *data = NULL;
700
701 switch (x->xso.type) {
702 case XFRM_DEV_OFFLOAD_CRYPTO:
703 if (!(x->props.flags & XFRM_STATE_ESN))
704 return 0;
705 break;
706 case XFRM_DEV_OFFLOAD_PACKET:
707 if (x->props.mode != XFRM_MODE_TUNNEL)
708 return 0;
709 break;
710 default:
711 break;
712 }
713
714 work = kzalloc_obj(*work);
715 if (!work)
716 return -ENOMEM;
717
718 switch (x->xso.type) {
719 case XFRM_DEV_OFFLOAD_CRYPTO:
720 data = kzalloc_obj(*sa_entry);
721 if (!data)
722 goto free_work;
723
724 INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
725 break;
726 case XFRM_DEV_OFFLOAD_PACKET:
727 data = kzalloc_obj(struct mlx5e_ipsec_netevent_data);
728 if (!data)
729 goto free_work;
730
731 INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
732 break;
733 default:
734 break;
735 }
736
737 work->data = data;
738 work->sa_entry = sa_entry;
739 sa_entry->work = work;
740 return 0;
741
742 free_work:
743 kfree(work);
744 return -ENOMEM;
745 }
746
mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry * sa_entry)747 static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
748 {
749 struct xfrm_state *x = sa_entry->x;
750 struct mlx5e_ipsec_dwork *dwork;
751
752 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
753 return 0;
754
755 if (x->lft.soft_packet_limit == XFRM_INF &&
756 x->lft.hard_packet_limit == XFRM_INF &&
757 x->lft.soft_byte_limit == XFRM_INF &&
758 x->lft.hard_byte_limit == XFRM_INF)
759 return 0;
760
761 dwork = kzalloc_obj(*dwork);
762 if (!dwork)
763 return -ENOMEM;
764
765 dwork->sa_entry = sa_entry;
766 INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits);
767 sa_entry->dwork = dwork;
768 return 0;
769 }
770
mlx5e_xfrm_add_state(struct net_device * dev,struct xfrm_state * x,struct netlink_ext_ack * extack)771 static int mlx5e_xfrm_add_state(struct net_device *dev,
772 struct xfrm_state *x,
773 struct netlink_ext_ack *extack)
774 {
775 struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
776 bool allow_tunnel_mode = false;
777 struct mlx5e_ipsec *ipsec;
778 struct mlx5e_priv *priv;
779 gfp_t gfp;
780 int err;
781
782 priv = netdev_priv(dev);
783 if (!priv->ipsec)
784 return -EOPNOTSUPP;
785
786 ipsec = priv->ipsec;
787 gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL;
788 sa_entry = kzalloc_obj(*sa_entry, gfp);
789 if (!sa_entry)
790 return -ENOMEM;
791
792 sa_entry->x = x;
793 sa_entry->dev = dev;
794 sa_entry->ipsec = ipsec;
795 /* Check if this SA is originated from acquire flow temporary SA */
796 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
797 goto out;
798
799 err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
800 if (err)
801 goto err_xfrm;
802
803 if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
804 err = -EBUSY;
805 goto err_xfrm;
806 }
807
808 err = mlx5_eswitch_block_mode(priv->mdev);
809 if (err)
810 goto unblock_ipsec;
811
812 if (x->props.mode == XFRM_MODE_TUNNEL &&
813 x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
814 allow_tunnel_mode = mlx5e_ipsec_fs_tunnel_allowed(sa_entry);
815 if (!allow_tunnel_mode) {
816 NL_SET_ERR_MSG_MOD(extack,
817 "Packet offload tunnel mode is disabled due to encap settings");
818 err = -EINVAL;
819 goto unblock_mode;
820 }
821 }
822
823 /* check esn */
824 if (x->props.flags & XFRM_STATE_ESN)
825 mlx5e_ipsec_update_esn_state(sa_entry);
826 else
827 /* According to RFC4303, section "3.3.3. Sequence Number Generation",
828 * the first packet sent using a given SA will contain a sequence
829 * number of 1.
830 */
831 sa_entry->esn_state.esn = 1;
832
833 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
834
835 err = mlx5_ipsec_create_work(sa_entry);
836 if (err)
837 goto unblock_encap;
838
839 err = mlx5e_ipsec_create_dwork(sa_entry);
840 if (err)
841 goto release_work;
842
843 /* create hw context */
844 err = mlx5_ipsec_create_sa_ctx(sa_entry);
845 if (err)
846 goto release_dwork;
847
848 err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
849 if (err)
850 goto err_hw_ctx;
851
852 /* We use *_bh() variant because xfrm_timer_handler(), which runs
853 * in softirq context, can reach our state delete logic and we need
854 * xa_erase_bh() there.
855 */
856 err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
857 GFP_KERNEL);
858 if (err)
859 goto err_add_rule;
860
861 mlx5e_ipsec_set_esn_ops(sa_entry);
862
863 if (sa_entry->dwork)
864 queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
865 MLX5_IPSEC_RESCHED);
866
867 if (allow_tunnel_mode) {
868 xa_lock_bh(&ipsec->sadb);
869 __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
870 MLX5E_IPSEC_TUNNEL_SA);
871 xa_unlock_bh(&ipsec->sadb);
872 }
873
874 out:
875 x->xso.offload_handle = (unsigned long)sa_entry;
876 if (allow_tunnel_mode)
877 mlx5_eswitch_unblock_encap(priv->mdev);
878
879 mlx5_eswitch_unblock_mode(priv->mdev);
880
881 return 0;
882
883 err_add_rule:
884 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
885 err_hw_ctx:
886 mlx5_ipsec_free_sa_ctx(sa_entry);
887 release_dwork:
888 kfree(sa_entry->dwork);
889 release_work:
890 if (sa_entry->work)
891 kfree(sa_entry->work->data);
892 kfree(sa_entry->work);
893 unblock_encap:
894 if (allow_tunnel_mode)
895 mlx5_eswitch_unblock_encap(priv->mdev);
896 unblock_mode:
897 mlx5_eswitch_unblock_mode(priv->mdev);
898 unblock_ipsec:
899 mlx5_eswitch_unblock_ipsec(priv->mdev);
900 err_xfrm:
901 kfree(sa_entry);
902 NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
903 return err;
904 }
905
mlx5e_xfrm_del_state(struct net_device * dev,struct xfrm_state * x)906 static void mlx5e_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
907 {
908 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
909 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
910 struct mlx5e_ipsec_sa_entry *old;
911
912 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
913 return;
914
915 old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
916 WARN_ON(old != sa_entry);
917 }
918
mlx5e_xfrm_free_state(struct net_device * dev,struct xfrm_state * x)919 static void mlx5e_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
920 {
921 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
922 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
923
924 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
925 goto sa_entry_free;
926
927 if (sa_entry->work)
928 cancel_work_sync(&sa_entry->work->work);
929
930 if (sa_entry->dwork)
931 cancel_delayed_work_sync(&sa_entry->dwork->dwork);
932
933 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
934 mlx5_ipsec_free_sa_ctx(sa_entry);
935 kfree(sa_entry->dwork);
936 if (sa_entry->work)
937 kfree(sa_entry->work->data);
938 kfree(sa_entry->work);
939 mlx5_eswitch_unblock_ipsec(ipsec->mdev);
940 sa_entry_free:
941 kfree(sa_entry);
942 }
943
mlx5e_ipsec_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)944 static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
945 unsigned long event, void *ptr)
946 {
947 struct mlx5_accel_esp_xfrm_attrs *attrs;
948 struct mlx5e_ipsec_netevent_data *data;
949 struct mlx5e_ipsec_sa_entry *sa_entry;
950 struct mlx5e_ipsec *ipsec;
951 struct neighbour *n = ptr;
952 unsigned long idx;
953
954 if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
955 return NOTIFY_DONE;
956
957 ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb);
958 xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
959 attrs = &sa_entry->attrs;
960
961 if (attrs->addrs.family == AF_INET) {
962 if (!neigh_key_eq32(n, &attrs->addrs.saddr.a4) &&
963 !neigh_key_eq32(n, &attrs->addrs.daddr.a4))
964 continue;
965 } else {
966 if (!neigh_key_eq128(n, &attrs->addrs.saddr.a4) &&
967 !neigh_key_eq128(n, &attrs->addrs.daddr.a4))
968 continue;
969 }
970
971 data = sa_entry->work->data;
972
973 neigh_ha_snapshot(data->addr, n, sa_entry->dev);
974 queue_work(ipsec->wq, &sa_entry->work->work);
975 }
976
977 return NOTIFY_DONE;
978 }
979
mlx5e_ipsec_init(struct mlx5e_priv * priv)980 void mlx5e_ipsec_init(struct mlx5e_priv *priv)
981 {
982 struct mlx5e_ipsec *ipsec;
983 int ret = -ENOMEM;
984
985 if (!mlx5_ipsec_device_caps(priv->mdev)) {
986 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
987 return;
988 }
989
990 ipsec = kzalloc_obj(*ipsec);
991 if (!ipsec)
992 return;
993
994 xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
995 ipsec->mdev = priv->mdev;
996 init_completion(&ipsec->comp);
997 ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
998 priv->netdev->name);
999 if (!ipsec->wq)
1000 goto err_wq;
1001
1002 if (mlx5_ipsec_device_caps(priv->mdev) &
1003 MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
1004 ret = mlx5e_ipsec_aso_init(ipsec);
1005 if (ret)
1006 goto err_aso;
1007 }
1008
1009 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) {
1010 ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event;
1011 ret = register_netevent_notifier(&ipsec->netevent_nb);
1012 if (ret)
1013 goto clear_aso;
1014 }
1015
1016 ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
1017 ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom);
1018 if (ret)
1019 goto err_fs_init;
1020
1021 ipsec->fs = priv->fs;
1022 priv->ipsec = ipsec;
1023 netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
1024 return;
1025
1026 err_fs_init:
1027 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
1028 unregister_netevent_notifier(&ipsec->netevent_nb);
1029 clear_aso:
1030 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
1031 mlx5e_ipsec_aso_cleanup(ipsec);
1032 err_aso:
1033 destroy_workqueue(ipsec->wq);
1034 err_wq:
1035 kfree(ipsec);
1036 mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
1037 return;
1038 }
1039
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)1040 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
1041 {
1042 struct mlx5e_ipsec *ipsec = priv->ipsec;
1043
1044 if (!ipsec)
1045 return;
1046
1047 mlx5e_accel_ipsec_fs_cleanup(ipsec);
1048 if (ipsec->netevent_nb.notifier_call) {
1049 unregister_netevent_notifier(&ipsec->netevent_nb);
1050 ipsec->netevent_nb.notifier_call = NULL;
1051 }
1052 if (ipsec->aso)
1053 mlx5e_ipsec_aso_cleanup(ipsec);
1054 destroy_workqueue(ipsec->wq);
1055 kfree(ipsec);
1056 priv->ipsec = NULL;
1057 }
1058
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)1059 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
1060 {
1061 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
1062 struct mlx5e_ipsec_work *work = sa_entry->work;
1063 struct mlx5e_ipsec_sa_entry *sa_entry_shadow;
1064 bool need_update;
1065
1066 need_update = mlx5e_ipsec_update_esn_state(sa_entry);
1067 if (!need_update)
1068 return;
1069
1070 sa_entry_shadow = work->data;
1071 memset(sa_entry_shadow, 0x00, sizeof(*sa_entry_shadow));
1072 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry_shadow->attrs);
1073 queue_work(sa_entry->ipsec->wq, &work->work);
1074 }
1075
mlx5e_xfrm_update_stats(struct xfrm_state * x)1076 static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
1077 {
1078 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
1079 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1080 struct net *net = dev_net(x->xso.dev);
1081 u64 trailer_packets = 0, trailer_bytes = 0;
1082 u64 replay_packets = 0, replay_bytes = 0;
1083 u64 auth_packets = 0, auth_bytes = 0;
1084 u64 success_packets, success_bytes;
1085 u64 packets, bytes, lastuse;
1086 size_t headers;
1087
1088 lockdep_assert(lockdep_is_held(&x->lock) ||
1089 lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) ||
1090 lockdep_is_held(&net->xfrm.xfrm_state_lock));
1091
1092 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
1093 return;
1094
1095 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1096 mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
1097 &auth_packets, &lastuse);
1098 x->stats.integrity_failed += auth_packets;
1099 XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
1100
1101 mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
1102 &trailer_packets, &lastuse);
1103 XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
1104 }
1105
1106 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1107 return;
1108
1109 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1110 mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
1111 &replay_packets, &lastuse);
1112 x->stats.replay += replay_packets;
1113 XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
1114 }
1115
1116 mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
1117 success_packets = packets - auth_packets - trailer_packets - replay_packets;
1118 x->curlft.packets += success_packets;
1119 /* NIC counts all bytes passed through flow steering and doesn't have
1120 * an ability to count payload data size which is needed for SA.
1121 *
1122 * To overcome HW limitestion, let's approximate the payload size
1123 * by removing always available headers.
1124 */
1125 headers = sizeof(struct ethhdr);
1126 if (sa_entry->attrs.addrs.family == AF_INET)
1127 headers += sizeof(struct iphdr);
1128 else
1129 headers += sizeof(struct ipv6hdr);
1130
1131 success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
1132 x->curlft.bytes += success_bytes - headers * success_packets;
1133 }
1134
word_to_mask(int prefix)1135 static __be32 word_to_mask(int prefix)
1136 {
1137 if (prefix < 0)
1138 return 0;
1139
1140 if (!prefix || prefix > 31)
1141 return cpu_to_be32(0xFFFFFFFF);
1142
1143 return cpu_to_be32(((1U << prefix) - 1) << (32 - prefix));
1144 }
1145
mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr * addrs,struct xfrm_selector * sel)1146 static void mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr *addrs,
1147 struct xfrm_selector *sel)
1148 {
1149 int i;
1150
1151 if (addrs->family == AF_INET) {
1152 addrs->smask.m4 = word_to_mask(sel->prefixlen_s);
1153 addrs->saddr.a4 &= addrs->smask.m4;
1154 addrs->dmask.m4 = word_to_mask(sel->prefixlen_d);
1155 addrs->daddr.a4 &= addrs->dmask.m4;
1156 return;
1157 }
1158
1159 for (i = 0; i < 4; i++) {
1160 if (sel->prefixlen_s != 32 * i)
1161 addrs->smask.m6[i] =
1162 word_to_mask(sel->prefixlen_s - 32 * i);
1163 addrs->saddr.a6[i] &= addrs->smask.m6[i];
1164
1165 if (sel->prefixlen_d != 32 * i)
1166 addrs->dmask.m6[i] =
1167 word_to_mask(sel->prefixlen_d - 32 * i);
1168 addrs->daddr.a6[i] &= addrs->dmask.m6[i];
1169 }
1170 }
1171
mlx5e_xfrm_validate_policy(struct mlx5_core_dev * mdev,struct xfrm_policy * x,struct netlink_ext_ack * extack)1172 static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
1173 struct xfrm_policy *x,
1174 struct netlink_ext_ack *extack)
1175 {
1176 struct xfrm_selector *sel = &x->selector;
1177
1178 if (x->type != XFRM_POLICY_TYPE_MAIN) {
1179 NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
1180 return -EINVAL;
1181 }
1182
1183 /* Please pay attention that we support only one template */
1184 if (x->xfrm_nr > 1) {
1185 NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
1186 return -EINVAL;
1187 }
1188
1189 if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
1190 x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
1191 NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
1192 return -EINVAL;
1193 }
1194
1195 if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP &&
1196 addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) {
1197 NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0");
1198 return -EINVAL;
1199 }
1200
1201 if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
1202 NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
1203 return -EINVAL;
1204 }
1205
1206 if (x->selector.proto != IPPROTO_IP &&
1207 x->selector.proto != IPPROTO_UDP &&
1208 x->selector.proto != IPPROTO_TCP) {
1209 NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
1210 return -EINVAL;
1211 }
1212
1213 if (x->priority) {
1214 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
1215 NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority");
1216 return -EINVAL;
1217 }
1218
1219 if (x->priority == U32_MAX) {
1220 NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority");
1221 return -EINVAL;
1222 }
1223 }
1224
1225 if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
1226 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
1227 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
1228 return -EINVAL;
1229 }
1230
1231 return 0;
1232 }
1233
1234 static void
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry * pol_entry,struct mlx5_accel_pol_xfrm_attrs * attrs)1235 mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
1236 struct mlx5_accel_pol_xfrm_attrs *attrs)
1237 {
1238 struct xfrm_policy *x = pol_entry->x;
1239 struct xfrm_selector *sel;
1240
1241 sel = &x->selector;
1242 memset(attrs, 0, sizeof(*attrs));
1243
1244 memcpy(&attrs->addrs.saddr, sel->saddr.a6, sizeof(attrs->addrs.saddr));
1245 memcpy(&attrs->addrs.daddr, sel->daddr.a6, sizeof(attrs->addrs.daddr));
1246 attrs->addrs.family = sel->family;
1247 mlx5e_ipsec_policy_mask(&attrs->addrs, sel);
1248 attrs->dir = x->xdo.dir;
1249 attrs->action = x->action;
1250 attrs->type = XFRM_DEV_OFFLOAD_PACKET;
1251 attrs->reqid = x->xfrm_vec[0].reqid;
1252 attrs->upspec.dport = ntohs(sel->dport);
1253 attrs->upspec.dport_mask = ntohs(sel->dport_mask);
1254 attrs->upspec.sport = ntohs(sel->sport);
1255 attrs->upspec.sport_mask = ntohs(sel->sport_mask);
1256 attrs->upspec.proto = sel->proto;
1257 attrs->prio = x->priority;
1258 }
1259
mlx5e_xfrm_add_policy(struct xfrm_policy * x,struct netlink_ext_ack * extack)1260 static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
1261 struct netlink_ext_ack *extack)
1262 {
1263 struct net_device *netdev = x->xdo.dev;
1264 struct mlx5e_ipsec_pol_entry *pol_entry;
1265 struct mlx5e_priv *priv;
1266 int err;
1267
1268 priv = netdev_priv(netdev);
1269 if (!priv->ipsec) {
1270 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
1271 return -EOPNOTSUPP;
1272 }
1273
1274 err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack);
1275 if (err)
1276 return err;
1277
1278 pol_entry = kzalloc_obj(*pol_entry);
1279 if (!pol_entry)
1280 return -ENOMEM;
1281
1282 pol_entry->x = x;
1283 pol_entry->ipsec = priv->ipsec;
1284
1285 if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
1286 err = -EBUSY;
1287 goto ipsec_busy;
1288 }
1289
1290 mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
1291 err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
1292 if (err)
1293 goto err_fs;
1294
1295 x->xdo.offload_handle = (unsigned long)pol_entry;
1296 return 0;
1297
1298 err_fs:
1299 mlx5_eswitch_unblock_ipsec(priv->mdev);
1300 ipsec_busy:
1301 kfree(pol_entry);
1302 NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
1303 return err;
1304 }
1305
mlx5e_xfrm_del_policy(struct xfrm_policy * x)1306 static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
1307 {
1308 struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1309
1310 mlx5e_accel_ipsec_fs_del_pol(pol_entry);
1311 mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
1312 }
1313
mlx5e_xfrm_free_policy(struct xfrm_policy * x)1314 static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
1315 {
1316 struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1317
1318 kfree(pol_entry);
1319 }
1320
1321 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
1322 .xdo_dev_state_add = mlx5e_xfrm_add_state,
1323 .xdo_dev_state_delete = mlx5e_xfrm_del_state,
1324 .xdo_dev_state_free = mlx5e_xfrm_free_state,
1325 .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
1326
1327 .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
1328 .xdo_dev_policy_add = mlx5e_xfrm_add_policy,
1329 .xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
1330 .xdo_dev_policy_free = mlx5e_xfrm_free_policy,
1331 };
1332
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)1333 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1334 {
1335 struct mlx5_core_dev *mdev = priv->mdev;
1336 struct net_device *netdev = priv->netdev;
1337
1338 if (!mlx5_ipsec_device_caps(mdev))
1339 return;
1340
1341 mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
1342
1343 netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
1344 netdev->features |= NETIF_F_HW_ESP;
1345 netdev->hw_enc_features |= NETIF_F_HW_ESP;
1346
1347 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
1348 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
1349 return;
1350 }
1351
1352 netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
1353 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
1354
1355 if (!MLX5_CAP_ETH(mdev, swp_lso)) {
1356 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
1357 return;
1358 }
1359
1360 netdev->gso_partial_features |= NETIF_F_GSO_ESP;
1361 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
1362 netdev->features |= NETIF_F_GSO_ESP;
1363 netdev->hw_features |= NETIF_F_GSO_ESP;
1364 netdev->hw_enc_features |= NETIF_F_GSO_ESP;
1365 }
1366