1 /*- 2 * Copyright (c) 2019-2021 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2022 NVIDIA corporation & affiliates. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "opt_kern_tls.h" 30 #include "opt_rss.h" 31 #include "opt_ratelimit.h" 32 33 #include <dev/mlx5/mlx5_en/en.h> 34 35 #include <dev/mlx5/tls.h> 36 37 #include <linux/delay.h> 38 #include <sys/ktls.h> 39 #include <opencrypto/cryptodev.h> 40 41 #ifdef KERN_TLS 42 43 #ifdef RATELIMIT 44 static if_snd_tag_modify_t mlx5e_tls_rl_snd_tag_modify; 45 #endif 46 static if_snd_tag_query_t mlx5e_tls_snd_tag_query; 47 static if_snd_tag_free_t mlx5e_tls_snd_tag_free; 48 49 static const struct if_snd_tag_sw mlx5e_tls_snd_tag_sw = { 50 .snd_tag_query = mlx5e_tls_snd_tag_query, 51 .snd_tag_free = mlx5e_tls_snd_tag_free, 52 .type = IF_SND_TAG_TYPE_TLS 53 }; 54 55 #ifdef RATELIMIT 56 static const struct if_snd_tag_sw mlx5e_tls_rl_snd_tag_sw = { 57 .snd_tag_modify = mlx5e_tls_rl_snd_tag_modify, 58 .snd_tag_query = mlx5e_tls_snd_tag_query, 59 .snd_tag_free = mlx5e_tls_snd_tag_free, 60 .type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT 61 }; 62 #endif 63 64 MALLOC_DEFINE(M_MLX5E_TLS, "MLX5E_TLS", "MLX5 ethernet HW TLS"); 65 66 /* software TLS context */ 67 struct mlx5_ifc_sw_tls_cntx_bits { 68 struct mlx5_ifc_tls_static_params_bits param; 69 struct mlx5_ifc_tls_progress_params_bits progress; 70 struct { 71 uint8_t key_data[8][0x20]; 72 uint8_t key_len[0x20]; 73 } key; 74 }; 75 76 CTASSERT(MLX5_ST_SZ_BYTES(sw_tls_cntx) <= sizeof(((struct mlx5e_tls_tag *)0)->crypto_params)); 77 CTASSERT(MLX5_ST_SZ_BYTES(mkc) == sizeof(((struct mlx5e_tx_umr_wqe *)0)->mkc)); 78 79 static const char *mlx5e_tls_stats_desc[] = { 80 MLX5E_TLS_STATS(MLX5E_STATS_DESC) 81 }; 82 83 static void mlx5e_tls_work(struct work_struct *); 84 85 static int 86 mlx5e_tls_tag_import(void *arg, void **store, int cnt, int domain, int flags) 87 { 88 struct mlx5e_tls_tag *ptag; 89 int i; 90 91 for (i = 0; i != cnt; i++) { 92 ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS, 93 mlx5_dev_domainset(arg), flags | M_ZERO); 94 mtx_init(&ptag->mtx, "mlx5-tls-tag-mtx", NULL, MTX_DEF); 95 INIT_WORK(&ptag->work, mlx5e_tls_work); 96 store[i] = ptag; 97 } 98 return (i); 99 } 100 101 static void 102 mlx5e_tls_tag_release(void *arg, void **store, int cnt) 103 { 104 struct mlx5e_tls_tag *ptag; 105 struct mlx5e_priv *priv; 106 struct mlx5e_tls *ptls; 107 int i; 108 109 for (i = 0; i != cnt; i++) { 110 ptag = store[i]; 111 ptls = ptag->tls; 112 priv = container_of(ptls, struct mlx5e_priv, tls); 113 114 flush_work(&ptag->work); 115 116 if (ptag->tisn != 0) { 117 mlx5_tls_close_tis(priv->mdev, ptag->tisn); 118 atomic_add_32(&ptls->num_resources, -1U); 119 } 120 121 mtx_destroy(&ptag->mtx); 122 123 free(ptag, M_MLX5E_TLS); 124 } 125 } 126 127 static void 128 mlx5e_tls_tag_zfree(struct mlx5e_tls_tag *ptag) 129 { 130 /* make sure any unhandled taskqueue events are ignored */ 131 ptag->state = MLX5E_TLS_ST_FREED; 132 133 /* reset some variables */ 134 ptag->dek_index = 0; 135 ptag->dek_index_ok = 0; 136 137 /* avoid leaking keys */ 138 memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params)); 139 140 /* update number of TIS contexts */ 141 if (ptag->tisn == 0) 142 atomic_add_32(&ptag->tls->num_resources, -1U); 143 144 /* return tag to UMA */ 145 uma_zfree(ptag->tls->zone, ptag); 146 } 147 148 int 149 mlx5e_tls_init(struct mlx5e_priv *priv) 150 { 151 struct mlx5e_tls *ptls = &priv->tls; 152 struct sysctl_oid *node; 153 uint32_t x; 154 155 if (MLX5_CAP_GEN(priv->mdev, tls_tx) == 0 || 156 MLX5_CAP_GEN(priv->mdev, log_max_dek) == 0) 157 return (0); 158 159 ptls->wq = create_singlethread_workqueue("mlx5-tls-wq"); 160 if (ptls->wq == NULL) 161 return (ENOMEM); 162 163 sysctl_ctx_init(&ptls->ctx); 164 165 snprintf(ptls->zname, sizeof(ptls->zname), 166 "mlx5_%u_tls", device_get_unit(priv->mdev->pdev->dev.bsddev)); 167 168 ptls->zone = uma_zcache_create(ptls->zname, 169 sizeof(struct mlx5e_tls_tag), NULL, NULL, NULL, NULL, 170 mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv->mdev, 171 UMA_ZONE_UNMANAGED); 172 173 /* shared between RX and TX TLS */ 174 ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1); 175 176 for (x = 0; x != MLX5E_TLS_STATS_NUM; x++) 177 ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK); 178 179 ptls->init = 1; 180 181 node = SYSCTL_ADD_NODE(&priv->sysctl_ctx, 182 SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, 183 "tls", CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "Hardware TLS offload"); 184 if (node == NULL) 185 return (0); 186 187 mlx5e_create_counter_stats(&ptls->ctx, 188 SYSCTL_CHILDREN(node), "stats", 189 mlx5e_tls_stats_desc, MLX5E_TLS_STATS_NUM, 190 ptls->stats.arg); 191 192 return (0); 193 } 194 195 void 196 mlx5e_tls_cleanup(struct mlx5e_priv *priv) 197 { 198 struct mlx5e_tls *ptls = &priv->tls; 199 uint32_t x; 200 201 if (ptls->init == 0) 202 return; 203 204 ptls->init = 0; 205 flush_workqueue(ptls->wq); 206 sysctl_ctx_free(&ptls->ctx); 207 uma_zdestroy(ptls->zone); 208 destroy_workqueue(ptls->wq); 209 210 /* check if all resources are freed */ 211 MPASS(priv->tls.num_resources == 0); 212 213 for (x = 0; x != MLX5E_TLS_STATS_NUM; x++) 214 counter_u64_free(ptls->stats.arg[x]); 215 } 216 217 static void 218 mlx5e_tls_work(struct work_struct *work) 219 { 220 struct mlx5e_tls_tag *ptag; 221 struct mlx5e_priv *priv; 222 int err; 223 224 ptag = container_of(work, struct mlx5e_tls_tag, work); 225 priv = container_of(ptag->tls, struct mlx5e_priv, tls); 226 227 switch (ptag->state) { 228 case MLX5E_TLS_ST_INIT: 229 /* try to open TIS, if not present */ 230 if (ptag->tisn == 0) { 231 err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn, 232 priv->pdn, &ptag->tisn); 233 if (err) { 234 MLX5E_TLS_STAT_INC(ptag, tx_error, 1); 235 break; 236 } 237 } 238 MLX5_SET(sw_tls_cntx, ptag->crypto_params, progress.pd, ptag->tisn); 239 240 /* try to allocate a DEK context ID */ 241 err = mlx5_encryption_key_create(priv->mdev, priv->pdn, 242 MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data), 243 MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len), 244 &ptag->dek_index); 245 if (err) { 246 MLX5E_TLS_STAT_INC(ptag, tx_error, 1); 247 break; 248 } 249 250 MLX5_SET(sw_tls_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index); 251 252 ptag->dek_index_ok = 1; 253 254 MLX5E_TLS_TAG_LOCK(ptag); 255 if (ptag->state == MLX5E_TLS_ST_INIT) 256 ptag->state = MLX5E_TLS_ST_SETUP; 257 MLX5E_TLS_TAG_UNLOCK(ptag); 258 break; 259 260 case MLX5E_TLS_ST_RELEASE: 261 /* try to destroy DEK context by ID */ 262 if (ptag->dek_index_ok) 263 err = mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index); 264 265 /* free tag */ 266 mlx5e_tls_tag_zfree(ptag); 267 break; 268 269 default: 270 break; 271 } 272 } 273 274 static int 275 mlx5e_tls_set_params(void *ctx, const struct tls_session_params *en) 276 { 277 278 MLX5_SET(sw_tls_cntx, ctx, param.const_2, 2); 279 if (en->tls_vminor == TLS_MINOR_VER_TWO) 280 MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 2); /* v1.2 */ 281 else 282 MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 3); /* v1.3 */ 283 MLX5_SET(sw_tls_cntx, ctx, param.const_1, 1); 284 MLX5_SET(sw_tls_cntx, ctx, param.encryption_standard, 1); /* TLS */ 285 286 /* copy the initial vector in place */ 287 switch (en->iv_len) { 288 case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv): 289 case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv) + 290 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.implicit_iv): 291 memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, param.gcm_iv), 292 en->iv, en->iv_len); 293 break; 294 default: 295 return (EINVAL); 296 } 297 298 if (en->cipher_key_len <= MLX5_FLD_SZ_BYTES(sw_tls_cntx, key.key_data)) { 299 memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, key.key_data), 300 en->cipher_key, en->cipher_key_len); 301 MLX5_SET(sw_tls_cntx, ctx, key.key_len, en->cipher_key_len); 302 } else { 303 return (EINVAL); 304 } 305 return (0); 306 } 307 308 /* Verify zero default */ 309 CTASSERT(MLX5E_TLS_ST_INIT == 0); 310 311 int 312 mlx5e_tls_snd_tag_alloc(if_t ifp, 313 union if_snd_tag_alloc_params *params, 314 struct m_snd_tag **ppmt) 315 { 316 union if_snd_tag_alloc_params rl_params; 317 const struct if_snd_tag_sw *snd_tag_sw; 318 struct mlx5e_priv *priv; 319 struct mlx5e_tls_tag *ptag; 320 const struct tls_session_params *en; 321 int error; 322 323 priv = if_getsoftc(ifp); 324 325 if (priv->gone != 0 || priv->tls.init == 0) 326 return (EOPNOTSUPP); 327 328 /* allocate new tag from zone, if any */ 329 ptag = uma_zalloc(priv->tls.zone, M_NOWAIT); 330 if (ptag == NULL) 331 return (ENOMEM); 332 333 /* sanity check default values */ 334 MPASS(ptag->dek_index == 0); 335 MPASS(ptag->dek_index_ok == 0); 336 337 /* setup TLS tag */ 338 ptag->tls = &priv->tls; 339 340 /* check if there is no TIS context */ 341 if (ptag->tisn == 0) { 342 uint32_t value; 343 344 value = atomic_fetchadd_32(&priv->tls.num_resources, 1U); 345 346 /* check resource limits */ 347 if (value >= priv->tls.max_resources) { 348 error = ENOMEM; 349 goto failure; 350 } 351 } 352 353 en = ¶ms->tls.tls->params; 354 355 /* only TLS v1.2 and v1.3 is currently supported */ 356 if (en->tls_vmajor != TLS_MAJOR_VER_ONE || 357 (en->tls_vminor != TLS_MINOR_VER_TWO 358 #ifdef TLS_MINOR_VER_THREE 359 && en->tls_vminor != TLS_MINOR_VER_THREE 360 #endif 361 )) { 362 error = EPROTONOSUPPORT; 363 goto failure; 364 } 365 366 switch (en->cipher_algorithm) { 367 case CRYPTO_AES_NIST_GCM_16: 368 switch (en->cipher_key_len) { 369 case 128 / 8: 370 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 371 if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) { 372 error = EPROTONOSUPPORT; 373 goto failure; 374 } 375 } else { 376 if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_128) == 0) { 377 error = EPROTONOSUPPORT; 378 goto failure; 379 } 380 } 381 error = mlx5e_tls_set_params(ptag->crypto_params, en); 382 if (error) 383 goto failure; 384 break; 385 386 case 256 / 8: 387 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 388 if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) { 389 error = EPROTONOSUPPORT; 390 goto failure; 391 } 392 } else { 393 if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_256) == 0) { 394 error = EPROTONOSUPPORT; 395 goto failure; 396 } 397 } 398 error = mlx5e_tls_set_params(ptag->crypto_params, en); 399 if (error) 400 goto failure; 401 break; 402 403 default: 404 error = EINVAL; 405 goto failure; 406 } 407 break; 408 default: 409 error = EPROTONOSUPPORT; 410 goto failure; 411 } 412 413 memset(&rl_params, 0, sizeof(rl_params)); 414 rl_params.hdr = params->hdr; 415 switch (params->hdr.type) { 416 #ifdef RATELIMIT 417 case IF_SND_TAG_TYPE_TLS_RATE_LIMIT: 418 rl_params.hdr.type = IF_SND_TAG_TYPE_RATE_LIMIT; 419 rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate; 420 snd_tag_sw = &mlx5e_tls_rl_snd_tag_sw; 421 break; 422 #endif 423 case IF_SND_TAG_TYPE_TLS: 424 rl_params.hdr.type = IF_SND_TAG_TYPE_UNLIMITED; 425 snd_tag_sw = &mlx5e_tls_snd_tag_sw; 426 break; 427 default: 428 error = EOPNOTSUPP; 429 goto failure; 430 } 431 432 error = m_snd_tag_alloc(ifp, &rl_params, &ptag->rl_tag); 433 if (error) 434 goto failure; 435 436 /* store pointer to mbuf tag */ 437 MPASS(ptag->tag.refcount == 0); 438 m_snd_tag_init(&ptag->tag, ifp, snd_tag_sw); 439 *ppmt = &ptag->tag; 440 441 /* reset state */ 442 ptag->state = MLX5E_TLS_ST_INIT; 443 444 queue_work(priv->tls.wq, &ptag->work); 445 flush_work(&ptag->work); 446 447 return (0); 448 449 failure: 450 mlx5e_tls_tag_zfree(ptag); 451 return (error); 452 } 453 454 #ifdef RATELIMIT 455 static int 456 mlx5e_tls_rl_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params) 457 { 458 union if_snd_tag_modify_params rl_params; 459 struct mlx5e_tls_tag *ptag = 460 container_of(pmt, struct mlx5e_tls_tag, tag); 461 int error; 462 463 memset(&rl_params, 0, sizeof(rl_params)); 464 rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate; 465 error = ptag->rl_tag->sw->snd_tag_modify(ptag->rl_tag, &rl_params); 466 return (error); 467 } 468 #endif 469 470 static int 471 mlx5e_tls_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) 472 { 473 struct mlx5e_tls_tag *ptag = 474 container_of(pmt, struct mlx5e_tls_tag, tag); 475 476 return (ptag->rl_tag->sw->snd_tag_query(ptag->rl_tag, params)); 477 } 478 479 static void 480 mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt) 481 { 482 struct mlx5e_tls_tag *ptag = 483 container_of(pmt, struct mlx5e_tls_tag, tag); 484 struct mlx5e_priv *priv; 485 486 m_snd_tag_rele(ptag->rl_tag); 487 488 MLX5E_TLS_TAG_LOCK(ptag); 489 ptag->state = MLX5E_TLS_ST_RELEASE; 490 MLX5E_TLS_TAG_UNLOCK(ptag); 491 492 priv = if_getsoftc(ptag->tag.ifp); 493 queue_work(priv->tls.wq, &ptag->work); 494 } 495 496 CTASSERT((MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) % 16) == 0); 497 498 static void 499 mlx5e_tls_send_static_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 500 { 501 const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) + 502 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param), MLX5_SEND_WQE_DS); 503 struct mlx5e_tx_umr_wqe *wqe; 504 u16 pi; 505 506 pi = sq->pc & sq->wq.sz_m1; 507 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 508 509 memset(wqe, 0, sizeof(*wqe)); 510 511 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | 512 MLX5_OPCODE_UMR | (MLX5_OPCODE_MOD_UMR_TLS_TIS_STATIC_PARAMS << 24)); 513 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 514 wqe->ctrl.imm = cpu_to_be32(ptag->tisn << 8); 515 516 if (mlx5e_do_send_cqe(sq)) 517 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; 518 else 519 wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 520 521 /* fill out UMR control segment */ 522 wqe->umr.flags = 0x80; /* inline data */ 523 wqe->umr.bsf_octowords = cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) / 16); 524 525 /* copy in the static crypto parameters */ 526 memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, param), 527 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param)); 528 529 /* copy data for doorbell */ 530 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 531 532 sq->mbuf[pi].mbuf = NULL; 533 sq->mbuf[pi].num_bytes = 0; 534 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 535 sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag); 536 537 sq->pc += sq->mbuf[pi].num_wqebbs; 538 } 539 540 CTASSERT(MLX5_FLD_SZ_BYTES(sw_tls_cntx, progress) == 541 sizeof(((struct mlx5e_tx_psv_wqe *)0)->psv)); 542 543 static void 544 mlx5e_tls_send_progress_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 545 { 546 const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe), 547 MLX5_SEND_WQE_DS); 548 struct mlx5e_tx_psv_wqe *wqe; 549 u16 pi; 550 551 pi = sq->pc & sq->wq.sz_m1; 552 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 553 554 memset(wqe, 0, sizeof(*wqe)); 555 556 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | 557 MLX5_OPCODE_SET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIS_PROGRESS_PARAMS << 24)); 558 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 559 560 if (mlx5e_do_send_cqe(sq)) 561 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 562 563 /* copy in the PSV control segment */ 564 memcpy(&wqe->psv, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, progress), 565 sizeof(wqe->psv)); 566 567 /* copy data for doorbell */ 568 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 569 570 sq->mbuf[pi].mbuf = NULL; 571 sq->mbuf[pi].num_bytes = 0; 572 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 573 sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag); 574 575 sq->pc += sq->mbuf[pi].num_wqebbs; 576 } 577 578 static void 579 mlx5e_tls_send_nop(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 580 { 581 const u32 ds_cnt = MLX5_SEND_WQEBB_NUM_DS; 582 struct mlx5e_tx_wqe *wqe; 583 u16 pi; 584 585 pi = sq->pc & sq->wq.sz_m1; 586 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 587 588 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 589 590 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 591 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 592 if (mlx5e_do_send_cqe(sq)) 593 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; 594 else 595 wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 596 597 /* Copy data for doorbell */ 598 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 599 600 sq->mbuf[pi].mbuf = NULL; 601 sq->mbuf[pi].num_bytes = 0; 602 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 603 sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag); 604 605 sq->pc += sq->mbuf[pi].num_wqebbs; 606 } 607 608 #define SBTLS_MBUF_NO_DATA ((struct mbuf *)1) 609 610 static struct mbuf * 611 sbtls_recover_record(struct mbuf *mb, int wait, uint32_t tcp_old, uint32_t *ptcp_seq, bool *pis_start) 612 { 613 struct mbuf *mr, *top; 614 uint32_t offset; 615 uint32_t delta; 616 617 /* check format of incoming mbuf */ 618 if (mb->m_next == NULL || 619 (mb->m_next->m_flags & (M_EXTPG | M_EXT)) != (M_EXTPG | M_EXT)) { 620 top = NULL; 621 goto done; 622 } 623 624 /* get unmapped data offset */ 625 offset = mtod(mb->m_next, uintptr_t); 626 627 /* check if we don't need to re-transmit anything */ 628 if (offset == 0) { 629 top = SBTLS_MBUF_NO_DATA; 630 *pis_start = true; 631 goto done; 632 } 633 634 /* try to get a new packet header */ 635 top = m_gethdr(wait, MT_DATA); 636 if (top == NULL) 637 goto done; 638 639 mr = m_get(wait, MT_DATA); 640 if (mr == NULL) { 641 m_free(top); 642 top = NULL; 643 goto done; 644 } 645 646 top->m_next = mr; 647 648 mb_dupcl(mr, mb->m_next); 649 650 /* the beginning of the TLS record */ 651 mr->m_data = NULL; 652 653 /* setup packet header length */ 654 top->m_pkthdr.len = mr->m_len = offset; 655 top->m_len = 0; 656 657 /* check for partial re-transmit */ 658 delta = *ptcp_seq - tcp_old; 659 660 if (delta < offset) { 661 m_adj(top, offset - delta); 662 offset = delta; 663 664 /* continue where we left off */ 665 *pis_start = false; 666 } else { 667 *pis_start = true; 668 } 669 670 /* 671 * Rewind the TCP sequence number by the amount of data 672 * retransmitted: 673 */ 674 *ptcp_seq -= offset; 675 done: 676 return (top); 677 } 678 679 static int 680 mlx5e_sq_tls_populate(struct mbuf *mb, uint64_t *pseq) 681 { 682 683 for (; mb != NULL; mb = mb->m_next) { 684 if (!(mb->m_flags & M_EXTPG)) 685 continue; 686 *pseq = mb->m_epg_seqno; 687 return (1); 688 } 689 return (0); 690 } 691 692 int 693 mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **ppmb) 694 { 695 struct mlx5e_tls_tag *ptls_tag; 696 struct m_snd_tag *ptag; 697 const struct tcphdr *th; 698 struct mbuf *mb = *ppmb; 699 u64 rcd_sn; 700 u32 header_size; 701 u32 mb_seq; 702 703 if ((mb->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0) 704 return (MLX5E_TLS_CONTINUE); 705 706 ptag = mb->m_pkthdr.snd_tag; 707 708 if ( 709 #ifdef RATELIMIT 710 ptag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT && 711 #endif 712 ptag->sw->type != IF_SND_TAG_TYPE_TLS) 713 return (MLX5E_TLS_CONTINUE); 714 715 ptls_tag = container_of(ptag, struct mlx5e_tls_tag, tag); 716 717 header_size = mlx5e_get_full_header_size(mb, &th); 718 if (unlikely(header_size == 0 || th == NULL)) 719 return (MLX5E_TLS_FAILURE); 720 721 /* 722 * Send non-TLS TCP packets AS-IS: 723 */ 724 if (header_size == mb->m_pkthdr.len || 725 mlx5e_sq_tls_populate(mb, &rcd_sn) == 0) { 726 parg->tisn = 0; 727 parg->ihs = header_size; 728 return (MLX5E_TLS_CONTINUE); 729 } 730 731 mb_seq = ntohl(th->th_seq); 732 733 MLX5E_TLS_TAG_LOCK(ptls_tag); 734 switch (ptls_tag->state) { 735 case MLX5E_TLS_ST_INIT: 736 MLX5E_TLS_TAG_UNLOCK(ptls_tag); 737 return (MLX5E_TLS_FAILURE); 738 case MLX5E_TLS_ST_SETUP: 739 ptls_tag->state = MLX5E_TLS_ST_TXRDY; 740 ptls_tag->expected_seq = ~mb_seq; /* force setup */ 741 default: 742 MLX5E_TLS_TAG_UNLOCK(ptls_tag); 743 break; 744 } 745 746 if (unlikely(ptls_tag->expected_seq != mb_seq)) { 747 bool is_start; 748 struct mbuf *r_mb; 749 uint32_t tcp_seq = mb_seq; 750 751 r_mb = sbtls_recover_record(mb, M_NOWAIT, ptls_tag->expected_seq, &tcp_seq, &is_start); 752 if (r_mb == NULL) { 753 MLX5E_TLS_STAT_INC(ptls_tag, tx_error, 1); 754 return (MLX5E_TLS_FAILURE); 755 } 756 757 MLX5E_TLS_STAT_INC(ptls_tag, tx_packets_ooo, 1); 758 759 /* check if this is the first fragment of a TLS record */ 760 if (is_start) { 761 /* setup TLS static parameters */ 762 MLX5_SET64(sw_tls_cntx, ptls_tag->crypto_params, 763 param.initial_record_number, rcd_sn); 764 765 /* 766 * NOTE: The sendqueue should have enough room to 767 * carry both the static and the progress parameters 768 * when we get here! 769 */ 770 mlx5e_tls_send_static_parameters(sq, ptls_tag); 771 mlx5e_tls_send_progress_parameters(sq, ptls_tag); 772 773 if (r_mb == SBTLS_MBUF_NO_DATA) { 774 mlx5e_tls_send_nop(sq, ptls_tag); 775 ptls_tag->expected_seq = mb_seq; 776 return (MLX5E_TLS_LOOP); 777 } 778 } 779 780 MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes_ooo, r_mb->m_pkthdr.len); 781 782 /* setup transmit arguments */ 783 parg->tisn = ptls_tag->tisn; 784 parg->mst = &ptls_tag->tag; 785 786 /* try to send DUMP data */ 787 if (mlx5e_sq_dump_xmit(sq, parg, &r_mb) != 0) { 788 m_freem(r_mb); 789 ptls_tag->expected_seq = tcp_seq; 790 return (MLX5E_TLS_FAILURE); 791 } else { 792 ptls_tag->expected_seq = mb_seq; 793 return (MLX5E_TLS_LOOP); 794 } 795 } else { 796 MLX5E_TLS_STAT_INC(ptls_tag, tx_packets, 1); 797 MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes, mb->m_pkthdr.len); 798 } 799 ptls_tag->expected_seq += mb->m_pkthdr.len - header_size; 800 801 parg->tisn = ptls_tag->tisn; 802 parg->ihs = header_size; 803 parg->mst = &ptls_tag->tag; 804 return (MLX5E_TLS_CONTINUE); 805 } 806 807 #else 808 809 int 810 mlx5e_tls_init(struct mlx5e_priv *priv) 811 { 812 813 return (0); 814 } 815 816 void 817 mlx5e_tls_cleanup(struct mlx5e_priv *priv) 818 { 819 /* NOP */ 820 } 821 822 #endif /* KERN_TLS */ 823