1 /*- 2 * Copyright (c) 2023 NVIDIA corporation & affiliates. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include "opt_ipsec.h" 28 29 #include <sys/types.h> 30 #include <netinet/in.h> 31 #include <sys/socket.h> 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <net/if.h> 35 #include <net/if_var.h> 36 #include <net/pfkeyv2.h> 37 #include <netipsec/key_var.h> 38 #include <netipsec/keydb.h> 39 #include <netipsec/ipsec.h> 40 #include <netipsec/xform.h> 41 #include <netipsec/ipsec_offload.h> 42 #include <dev/mlx5/fs.h> 43 #include <dev/mlx5/mlx5_en/en.h> 44 #include <dev/mlx5/mlx5_accel/ipsec.h> 45 46 #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000) 47 48 static int mlx5e_if_sa_deinstall(struct ifnet *ifp, u_int dev_spi, void *priv); 49 50 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(void *x) 51 { 52 return (struct mlx5e_ipsec_sa_entry *)x; 53 } 54 55 static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(void *x) 56 { 57 return (struct mlx5e_ipsec_pol_entry *)x; 58 } 59 60 static void 61 mlx5e_ipsec_handle_counters_onedir(struct mlx5e_ipsec_sa_entry *sa_entry, 62 u64 *packets, u64 *bytes) 63 { 64 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 65 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 66 67 mlx5_fc_query(mdev, ipsec_rule->fc, packets, bytes); 68 } 69 70 static struct mlx5e_ipsec_sa_entry * 71 mlx5e_ipsec_other_sa_entry(struct mlx5e_ipsec_priv_bothdir *pb, 72 struct mlx5e_ipsec_sa_entry *sa_entry) 73 { 74 return (pb->priv_in == sa_entry ? pb->priv_out : pb->priv_in); 75 } 76 77 static void 78 mlx5e_ipsec_handle_counters(struct work_struct *_work) 79 { 80 struct mlx5e_ipsec_dwork *dwork = 81 container_of(_work, struct mlx5e_ipsec_dwork, dwork.work); 82 struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry; 83 struct mlx5e_ipsec_sa_entry *other_sa_entry; 84 u64 bytes, bytes1, packets1, packets; 85 86 if (sa_entry->attrs.drop) 87 return; 88 other_sa_entry = mlx5e_ipsec_other_sa_entry(dwork->pb, sa_entry); 89 if (other_sa_entry == NULL || other_sa_entry->attrs.drop) 90 return; 91 92 mlx5e_ipsec_handle_counters_onedir(sa_entry, &packets, &bytes); 93 mlx5e_ipsec_handle_counters_onedir(other_sa_entry, &packets1, &bytes1); 94 packets += packets1; 95 bytes += bytes1; 96 97 #ifdef IPSEC_OFFLOAD 98 ipsec_accel_drv_sa_lifetime_update(sa_entry->savp, sa_entry->ifp, 99 sa_entry->kspi, bytes, packets); 100 #endif 101 102 queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork, 103 MLX5_IPSEC_RESCHED); 104 } 105 106 static int 107 mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry, 108 struct mlx5e_ipsec_priv_bothdir *pb) 109 { 110 struct mlx5e_ipsec_dwork *dwork; 111 112 dwork = kzalloc(sizeof(*dwork), GFP_KERNEL); 113 if (!dwork) 114 return (ENOMEM); 115 116 dwork->sa_entry = sa_entry; 117 dwork->pb = pb; 118 INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_counters); 119 sa_entry->dwork = dwork; 120 return 0; 121 } 122 123 static int mlx5_xform_ah_authsize(const struct auth_hash *esph) 124 { 125 int alen; 126 127 if (esph == NULL) 128 return 0; 129 130 switch (esph->type) { 131 case CRYPTO_SHA2_256_HMAC: 132 case CRYPTO_SHA2_384_HMAC: 133 case CRYPTO_SHA2_512_HMAC: 134 alen = esph->hashsize / 2; /* RFC4868 2.3 */ 135 break; 136 137 case CRYPTO_POLY1305: 138 case CRYPTO_AES_NIST_GMAC: 139 alen = esph->hashsize; 140 break; 141 142 default: 143 alen = AH_HMAC_HASHLEN; 144 break; 145 } 146 147 return alen; 148 } 149 150 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, 151 struct mlx5_accel_esp_xfrm_attrs *attrs, 152 u8 dir) 153 { 154 struct secasvar *savp = sa_entry->savp; 155 const struct auth_hash *esph = savp->tdb_authalgxform; 156 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; 157 struct secasindex *saidx = &savp->sah->saidx; 158 struct seckey *key_encap = savp->key_enc; 159 int key_len; 160 161 memset(attrs, 0, sizeof(*attrs)); 162 163 /* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */ 164 key_len = _KEYLEN(key_encap) - SAV_ISCTRORGCM(savp) * 4 - SAV_ISCHACHA(savp) * 4; 165 166 memcpy(aes_gcm->aes_key, key_encap->key_data, key_len); 167 aes_gcm->key_len = key_len; 168 169 /* salt and seq_iv */ 170 aes_gcm->seq_iv = 0; 171 memcpy(&aes_gcm->salt, key_encap->key_data + key_len, 172 sizeof(aes_gcm->salt)); 173 174 switch (savp->alg_enc) { 175 case SADB_X_EALG_AESGCM8: 176 attrs->authsize = 8 / 4; /* in dwords */ 177 break; 178 case SADB_X_EALG_AESGCM12: 179 attrs->authsize = 12 / 4; /* in dwords */ 180 break; 181 case SADB_X_EALG_AESGCM16: 182 attrs->authsize = 16 / 4; /* in dwords */ 183 break; 184 default: break; 185 } 186 187 /* iv len */ 188 aes_gcm->icv_len = mlx5_xform_ah_authsize(esph); //TBD: check if value make sense 189 190 attrs->dir = dir; 191 /* spi - host order */ 192 attrs->spi = ntohl(savp->spi); 193 attrs->family = saidx->dst.sa.sa_family; 194 attrs->reqid = saidx->reqid; 195 196 if (saidx->src.sa.sa_family == AF_INET) { 197 attrs->saddr.a4 = saidx->src.sin.sin_addr.s_addr; 198 attrs->daddr.a4 = saidx->dst.sin.sin_addr.s_addr; 199 } else { 200 memcpy(&attrs->saddr.a6, &saidx->src.sin6.sin6_addr, 16); 201 memcpy(&attrs->daddr.a6, &saidx->dst.sin6.sin6_addr, 16); 202 } 203 204 if (savp->natt) { 205 attrs->encap = true; 206 attrs->sport = savp->natt->sport; 207 attrs->dport = savp->natt->dport; 208 } 209 210 if (savp->flags & SADB_X_SAFLAGS_ESN) { 211 /* We support replay window with ESN only */ 212 attrs->replay_esn.trigger = true; 213 if (sa_entry->esn_state.esn_msb) 214 attrs->replay_esn.esn = sa_entry->esn_state.esn; 215 else 216 /* According to RFC4303, section "3.3.3. Sequence Number Generation", 217 * the first packet sent using a given SA will contain a sequence 218 * number of 1. 219 */ 220 attrs->replay_esn.esn = max_t(u32, sa_entry->esn_state.esn, 1); 221 attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; 222 attrs->replay_esn.overlap = sa_entry->esn_state.overlap; 223 224 if (savp->replay) { 225 switch (savp->replay->wsize) { 226 case 4: 227 attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_32BIT; 228 break; 229 case 8: 230 attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_64BIT; 231 break; 232 case 16: 233 attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_128BIT; 234 break; 235 case 32: 236 attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_256BIT; 237 break; 238 default: 239 /* Do nothing */ 240 break; 241 } 242 } 243 } 244 } 245 246 static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, 247 struct secasvar *savp) 248 { 249 struct secasindex *saidx = &savp->sah->saidx; 250 struct seckey *key_encp = savp->key_enc; 251 int keylen; 252 253 if (!(mlx5_ipsec_device_caps(mdev) & 254 MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { 255 mlx5_core_err(mdev, "FULL offload is not supported\n"); 256 return (EINVAL); 257 } 258 if (savp->alg_enc == SADB_EALG_NONE) { 259 mlx5_core_err(mdev, "Cannot offload authenticated xfrm states\n"); 260 return (EINVAL); 261 } 262 if (savp->alg_enc != SADB_X_EALG_AESGCM16) { 263 mlx5_core_err(mdev, "Only IPSec aes-gcm-16 encryption protocol may be offloaded\n"); 264 return (EINVAL); 265 } 266 if (savp->tdb_compalgxform) { 267 mlx5_core_err(mdev, "Cannot offload compressed xfrm states\n"); 268 return (EINVAL); 269 } 270 if (savp->alg_auth != SADB_X_AALG_AES128GMAC && savp->alg_auth != SADB_X_AALG_AES256GMAC) { 271 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bits\n"); 272 return (EINVAL); 273 } 274 if ((saidx->dst.sa.sa_family != AF_INET && saidx->dst.sa.sa_family != AF_INET6) || 275 (saidx->src.sa.sa_family != AF_INET && saidx->src.sa.sa_family != AF_INET6)) { 276 mlx5_core_err(mdev, "Only IPv4/6 xfrm states may be offloaded\n"); 277 return (EINVAL); 278 } 279 if (saidx->proto != IPPROTO_ESP) { 280 mlx5_core_err(mdev, "Only ESP xfrm state may be offloaded\n"); 281 return (EINVAL); 282 } 283 /* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */ 284 keylen = _KEYLEN(key_encp) - SAV_ISCTRORGCM(savp) * 4 - SAV_ISCHACHA(savp) * 4; 285 if (keylen != 128/8 && keylen != 256 / 8) { 286 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); 287 return (EINVAL); 288 } 289 290 if (saidx->mode != IPSEC_MODE_TRANSPORT) { 291 mlx5_core_err(mdev, "Only transport xfrm states may be offloaded in full offlaod mode\n"); 292 return (EINVAL); 293 } 294 295 if (savp->natt) { 296 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) { 297 mlx5_core_err(mdev, "Encapsulation is not supported\n"); 298 return (EINVAL); 299 } 300 } 301 302 if (savp->replay && savp->replay->wsize != 0 && savp->replay->wsize != 4 && 303 savp->replay->wsize != 8 && savp->replay->wsize != 16 && savp->replay->wsize != 32) { 304 mlx5_core_err(mdev, "Unsupported replay window size %d\n", savp->replay->wsize); 305 return (EINVAL); 306 } 307 308 if ((savp->flags & SADB_X_SAFLAGS_ESN) != 0) { 309 if ((mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN) == 0) { 310 mlx5_core_err(mdev, "ESN is not supported\n"); 311 return (EINVAL); 312 } 313 } else if (savp->replay != NULL && savp->replay->wsize != 0) { 314 mlx5_core_warn(mdev, 315 "non-ESN but replay-protect SA offload is not supported\n"); 316 return (EINVAL); 317 } 318 return 0; 319 } 320 321 static int 322 mlx5e_if_sa_newkey_onedir(struct ifnet *ifp, void *sav, int dir, 323 u_int drv_spi, struct mlx5e_ipsec_sa_entry **privp, 324 struct mlx5e_ipsec_priv_bothdir *pb) 325 { 326 struct mlx5e_ipsec_sa_entry *sa_entry = NULL; 327 struct mlx5e_priv *priv = if_getsoftc(ifp); 328 struct mlx5_core_dev *mdev = priv->mdev; 329 struct mlx5e_ipsec *ipsec = priv->ipsec; 330 int err; 331 332 if (priv->gone != 0 || ipsec == NULL) 333 return (EOPNOTSUPP); 334 335 err = mlx5e_xfrm_validate_state(mdev, sav); 336 if (err) 337 return err; 338 339 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); 340 if (sa_entry == NULL) 341 return (ENOMEM); 342 343 sa_entry->kspi = drv_spi; 344 sa_entry->savp = sav; 345 sa_entry->ifp = ifp; 346 sa_entry->ipsec = ipsec; 347 348 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs, dir); 349 350 err = mlx5e_ipsec_create_dwork(sa_entry, pb); 351 if (err) 352 goto err_xfrm; 353 354 /* create hw context */ 355 err = mlx5_ipsec_create_sa_ctx(sa_entry); 356 if (err) 357 goto err_sa_ctx; 358 359 err = mlx5e_accel_ipsec_fs_add_rule(sa_entry); 360 if (err) 361 goto err_fs; 362 363 *privp = sa_entry; 364 if (sa_entry->dwork) 365 queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork, MLX5_IPSEC_RESCHED); 366 367 err = xa_insert(&mdev->ipsec_sadb, sa_entry->ipsec_obj_id, sa_entry, GFP_KERNEL); 368 if (err) 369 goto err_xa; 370 371 return 0; 372 373 err_xa: 374 if (sa_entry->dwork) 375 cancel_delayed_work_sync(&sa_entry->dwork->dwork); 376 mlx5e_accel_ipsec_fs_del_rule(sa_entry); 377 err_fs: 378 mlx5_ipsec_free_sa_ctx(sa_entry); 379 err_sa_ctx: 380 kfree(sa_entry->dwork); 381 err_xfrm: 382 kfree(sa_entry); 383 mlx5_en_err(ifp, "Device failed to offload this state"); 384 return err; 385 } 386 387 static int 388 mlx5e_if_sa_newkey(struct ifnet *ifp, void *sav, u_int dev_spi, void **privp) 389 { 390 struct mlx5e_ipsec_priv_bothdir *pb; 391 int error; 392 393 pb = malloc(sizeof(struct mlx5e_ipsec_priv_bothdir), M_DEVBUF, 394 M_WAITOK | M_ZERO); 395 error = mlx5e_if_sa_newkey_onedir(ifp, sav, IPSEC_DIR_INBOUND, 396 dev_spi, &pb->priv_in, pb); 397 if (error != 0) { 398 free(pb, M_DEVBUF); 399 return (error); 400 } 401 error = mlx5e_if_sa_newkey_onedir(ifp, sav, IPSEC_DIR_OUTBOUND, 402 dev_spi, &pb->priv_out, pb); 403 if (error == 0) { 404 *privp = pb; 405 } else { 406 mlx5e_if_sa_deinstall(ifp, dev_spi, pb->priv_in); 407 free(pb, M_DEVBUF); 408 } 409 return (error); 410 } 411 412 static void 413 mlx5e_if_sa_deinstall_onekey(struct ifnet *ifp, u_int dev_spi, void *priv) 414 { 415 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(priv); 416 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 417 struct mlx5e_ipsec_sa_entry *old; 418 419 old = xa_erase(&mdev->ipsec_sadb, sa_entry->ipsec_obj_id); 420 WARN_ON(old != sa_entry); 421 422 mlx5e_accel_ipsec_fs_del_rule(sa_entry); 423 mlx5_ipsec_free_sa_ctx(sa_entry); 424 kfree(sa_entry->dwork); 425 kfree(sa_entry); 426 } 427 428 static int 429 mlx5e_if_sa_deinstall(struct ifnet *ifp, u_int dev_spi, void *priv) 430 { 431 struct mlx5e_ipsec_priv_bothdir pb, *pbp; 432 433 pbp = priv; 434 pb = *(struct mlx5e_ipsec_priv_bothdir *)priv; 435 pbp->priv_in = pbp->priv_out = NULL; 436 437 if (pb.priv_in->dwork != NULL) 438 cancel_delayed_work_sync(&pb.priv_in->dwork->dwork); 439 if (pb.priv_out->dwork != NULL) 440 cancel_delayed_work_sync(&pb.priv_out->dwork->dwork); 441 442 mlx5e_if_sa_deinstall_onekey(ifp, dev_spi, pb.priv_in); 443 mlx5e_if_sa_deinstall_onekey(ifp, dev_spi, pb.priv_out); 444 free(pbp, M_DEVBUF); 445 return (0); 446 } 447 448 static void 449 mlx5e_if_sa_cnt_one(struct ifnet *ifp, void *sa, uint32_t drv_spi, 450 void *priv, u64 *bytes, u64 *packets) 451 { 452 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(priv); 453 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 454 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 455 456 mlx5_fc_query(mdev, ipsec_rule->fc, packets, bytes); 457 } 458 459 static int 460 mlx5e_if_sa_cnt(struct ifnet *ifp, void *sa, uint32_t drv_spi, 461 void *priv, struct seclifetime *lt) 462 { 463 struct mlx5e_ipsec_priv_bothdir *pb; 464 u64 packets_in, packets_out; 465 u64 bytes_in, bytes_out; 466 467 pb = priv; 468 mlx5e_if_sa_cnt_one(ifp, sa, drv_spi, pb->priv_in, 469 &bytes_in, &packets_in); 470 mlx5e_if_sa_cnt_one(ifp, sa, drv_spi, pb->priv_out, 471 &bytes_out, &packets_out); 472 /* TODO: remove this casting once Kostia changes allocation type to be u64 */ 473 lt->bytes = bytes_in + bytes_out; 474 lt->allocations = (uint32_t)(packets_in + packets_out); 475 return (0); 476 } 477 478 static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, 479 struct secpolicy *sp, struct inpcb *inp) 480 { 481 struct secpolicyindex *spidx = &sp->spidx; 482 483 if (!(mlx5_ipsec_device_caps(mdev) & 484 MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { 485 mlx5_core_err(mdev, "FULL offload is not supported\n"); 486 return (EINVAL); 487 } 488 489 if (sp->tcount > 1) { 490 mlx5_core_err(mdev, "Can offload exactly one template, " 491 "not %d\n", sp->tcount); 492 return (EINVAL); 493 } 494 495 if (sp->policy == IPSEC_POLICY_BYPASS && 496 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) { 497 mlx5_core_err(mdev, "Device does not support policy priority\n"); 498 return (EINVAL); 499 } 500 501 if (sp->tcount > 0 && inp != NULL) { 502 mlx5_core_err(mdev, "Not valid input data\n"); 503 return (EINVAL); 504 } 505 506 if (spidx->dir != IPSEC_DIR_INBOUND && spidx->dir != IPSEC_DIR_OUTBOUND) { 507 mlx5_core_err(mdev, "Wrong policy direction\n"); 508 return (EINVAL); 509 } 510 511 if (sp->tcount > 0 && sp->req[0]->saidx.mode != IPSEC_MODE_TRANSPORT) { 512 mlx5_core_err(mdev, "Device supports transport mode only"); 513 return (EINVAL); 514 } 515 516 if (sp->policy != IPSEC_POLICY_DISCARD && 517 sp->policy != IPSEC_POLICY_IPSEC && sp->policy != IPSEC_POLICY_BYPASS) { 518 mlx5_core_err(mdev, "Offloaded policy must be specific on its action\n"); 519 return (EINVAL); 520 } 521 522 if (sp->policy == IPSEC_POLICY_BYPASS && !inp) { 523 mlx5_core_err(mdev, "Missing port information for IKE bypass\n"); 524 return (EINVAL); 525 } 526 527 if (inp != NULL) { 528 INP_RLOCK(inp); 529 if (inp->inp_socket == NULL || inp->inp_socket->so_proto-> 530 pr_protocol != IPPROTO_UDP) { 531 mlx5_core_err(mdev, "Unsupported IKE bypass protocol %d\n", 532 inp->inp_socket == NULL ? -1 : 533 inp->inp_socket->so_proto->pr_protocol); 534 INP_RUNLOCK(inp); 535 return (EINVAL); 536 } 537 INP_RUNLOCK(inp); 538 } 539 540 /* TODO fill relevant bits */ 541 return 0; 542 } 543 544 static void mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, 545 struct mlx5_accel_pol_xfrm_attrs *attrs, 546 struct inpcb *inp) 547 { 548 struct secpolicy *sp = pol_entry->sp; 549 struct secpolicyindex *spidx = &sp->spidx; 550 551 memset(attrs, 0, sizeof(*attrs)); 552 553 if (!inp) { 554 if (spidx->src.sa.sa_family == AF_INET) { 555 attrs->saddr.a4 = spidx->src.sin.sin_addr.s_addr; 556 attrs->daddr.a4 = spidx->dst.sin.sin_addr.s_addr; 557 } else if (spidx->src.sa.sa_family == AF_INET6) { 558 memcpy(&attrs->saddr.a6, &spidx->src.sin6.sin6_addr, 16); 559 memcpy(&attrs->daddr.a6, &spidx->dst.sin6.sin6_addr, 16); 560 } else { 561 KASSERT(0, ("unsupported family %d", spidx->src.sa.sa_family)); 562 } 563 attrs->family = spidx->src.sa.sa_family; 564 attrs->prio = 0; 565 attrs->action = sp->policy; 566 attrs->reqid = sp->req[0]->saidx.reqid; 567 } else { 568 INP_RLOCK(inp); 569 if ((inp->inp_vflag & INP_IPV4) != 0) { 570 attrs->saddr.a4 = inp->inp_laddr.s_addr; 571 attrs->daddr.a4 = inp->inp_faddr.s_addr; 572 attrs->family = AF_INET; 573 } else if ((inp->inp_vflag & INP_IPV6) != 0) { 574 memcpy(&attrs->saddr.a6, &inp->in6p_laddr, 16); 575 memcpy(&attrs->daddr.a6, &inp->in6p_laddr, 16); 576 attrs->family = AF_INET6; 577 } else { 578 KASSERT(0, ("unsupported family %d", inp->inp_vflag)); 579 } 580 attrs->upspec.dport = inp->inp_fport; 581 attrs->upspec.sport = inp->inp_lport; 582 attrs->upspec.proto = inp->inp_ip_p; 583 INP_RUNLOCK(inp); 584 585 /* Give highest priority for PCB policies */ 586 attrs->prio = 1; 587 attrs->action = IPSEC_POLICY_IPSEC; 588 } 589 attrs->dir = spidx->dir; 590 } 591 592 static int mlx5e_if_spd_install(struct ifnet *ifp, void *sp, void *inp1, 593 void **ifdatap) 594 { 595 struct mlx5e_ipsec_pol_entry *pol_entry; 596 struct mlx5e_priv *priv; 597 int err; 598 599 priv = if_getsoftc(ifp); 600 if (priv->gone || !priv->ipsec) 601 return (EOPNOTSUPP); 602 603 err = mlx5e_xfrm_validate_policy(priv->mdev, sp, inp1); 604 if (err) 605 return err; 606 607 pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL); 608 if (!pol_entry) 609 return (ENOMEM); 610 611 pol_entry->sp = sp; 612 pol_entry->ipsec = priv->ipsec; 613 614 mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs, inp1); 615 err = mlx5e_accel_ipsec_fs_add_pol(pol_entry); 616 if (err) 617 goto err_pol; 618 *ifdatap = pol_entry; 619 620 return 0; 621 622 err_pol: 623 kfree(pol_entry); 624 mlx5_en_err(ifp, "Device failed to offload this policy"); 625 return err; 626 } 627 628 629 static int mlx5e_if_spd_deinstall(struct ifnet *ifp, void *sp, void *ifdata) 630 { 631 struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(ifdata); 632 633 mlx5e_accel_ipsec_fs_del_pol(pol_entry); 634 kfree(pol_entry); 635 return 0; 636 } 637 638 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) 639 { 640 struct mlx5e_ipsec *pipsec = priv->ipsec; 641 if (!pipsec) 642 return; 643 644 mlx5e_accel_ipsec_fs_cleanup(pipsec); 645 destroy_workqueue(pipsec->wq); 646 mlx5e_ipsec_aso_cleanup(pipsec); 647 kfree(pipsec); 648 priv->ipsec = NULL; 649 } 650 651 static int 652 mlx5e_if_ipsec_hwassist(if_t ifnet, void *sav __unused, 653 uint32_t drv_spi __unused, void *priv __unused) 654 { 655 return (if_gethwassist(ifnet) & (CSUM_TSO | CSUM_TCP | CSUM_UDP | 656 CSUM_IP | CSUM_IP6_TSO | CSUM_IP6_TCP | CSUM_IP6_UDP)); 657 } 658 659 static const struct if_ipsec_accel_methods mlx5e_ipsec_funcs = { 660 .if_sa_newkey = mlx5e_if_sa_newkey, 661 .if_sa_deinstall = mlx5e_if_sa_deinstall, 662 .if_spdadd = mlx5e_if_spd_install, 663 .if_spddel = mlx5e_if_spd_deinstall, 664 .if_sa_cnt = mlx5e_if_sa_cnt, 665 .if_hwassist = mlx5e_if_ipsec_hwassist, 666 }; 667 668 int mlx5e_ipsec_init(struct mlx5e_priv *priv) 669 { 670 struct mlx5_core_dev *mdev = priv->mdev; 671 struct mlx5e_ipsec *pipsec; 672 if_t ifp = priv->ifp; 673 int ret; 674 675 mlx5_core_info(mdev, "ipsec " 676 "offload %d log_max_dek %d gen_obj_types %d " 677 "ipsec_encrypt %d ipsec_decrypt %d " 678 "esp_aes_gcm_128_encrypt %d esp_aes_gcm_128_decrypt %d " 679 "ipsec_full_offload %d " 680 "reformat_add_esp_trasport %d reformat_del_esp_trasport %d " 681 "decap %d " 682 "ignore_flow_level_tx %d ignore_flow_level_rx %d " 683 "reformat_natt_tx %d reformat_natt_rx %d " 684 "ipsec_esn %d\n", 685 MLX5_CAP_GEN(mdev, ipsec_offload) != 0, 686 MLX5_CAP_GEN(mdev, log_max_dek) != 0, 687 (MLX5_CAP_GEN_64(mdev, general_obj_types) & 688 MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC) != 0, 689 MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) != 0, 690 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt) != 0, 691 MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) != 0, 692 MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt) != 0, 693 MLX5_CAP_IPSEC(mdev, ipsec_full_offload) != 0, 694 MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) != 0, 695 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) != 0, 696 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap) != 0, 697 MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) != 0, 698 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level) != 0, 699 MLX5_CAP_FLOWTABLE_NIC_TX(mdev, 700 reformat_add_esp_transport_over_udp) != 0, 701 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 702 reformat_del_esp_transport_over_udp) != 0, 703 MLX5_CAP_IPSEC(mdev, ipsec_esn) != 0); 704 705 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { 706 mlx5_core_dbg(mdev, "Not an IPSec offload device\n"); 707 return 0; 708 } 709 710 xa_init_flags(&mdev->ipsec_sadb, XA_FLAGS_ALLOC); 711 712 pipsec = kzalloc(sizeof(*pipsec), GFP_KERNEL); 713 if (pipsec == NULL) 714 return (ENOMEM); 715 716 pipsec->mdev = mdev; 717 pipsec->pdn = priv->pdn; 718 pipsec->mkey = priv->mr.key; 719 720 ret = mlx5e_ipsec_aso_init(pipsec); 721 if (ret) 722 goto err_ipsec_aso; 723 724 pipsec->wq = alloc_workqueue("mlx5e_ipsec", WQ_UNBOUND, 0); 725 if (pipsec->wq == NULL) { 726 ret = ENOMEM; 727 goto err_ipsec_wq; 728 } 729 730 ret = mlx5e_accel_ipsec_fs_init(pipsec); 731 if (ret) 732 goto err_ipsec_alloc; 733 734 if_setipsec_accel_methods(ifp, &mlx5e_ipsec_funcs); 735 priv->ipsec = pipsec; 736 mlx5_core_dbg(mdev, "IPSec attached to netdevice\n"); 737 return 0; 738 739 err_ipsec_alloc: 740 destroy_workqueue(pipsec->wq); 741 err_ipsec_wq: 742 mlx5e_ipsec_aso_cleanup(pipsec); 743 err_ipsec_aso: 744 kfree(pipsec); 745 mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret); 746 return ret; 747 } 748