1 /*- 2 * Copyright (c) 2019-2021 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2022 NVIDIA corporation & affiliates. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include "opt_kern_tls.h" 28 #include "opt_rss.h" 29 #include "opt_ratelimit.h" 30 31 #include <dev/mlx5/mlx5_en/en.h> 32 33 #include <dev/mlx5/tls.h> 34 #include <dev/mlx5/crypto.h> 35 36 #include <linux/delay.h> 37 #include <sys/ktls.h> 38 #include <opencrypto/cryptodev.h> 39 40 #ifdef KERN_TLS 41 42 #ifdef RATELIMIT 43 static if_snd_tag_modify_t mlx5e_tls_rl_snd_tag_modify; 44 #endif 45 static if_snd_tag_query_t mlx5e_tls_snd_tag_query; 46 static if_snd_tag_free_t mlx5e_tls_snd_tag_free; 47 48 static const struct if_snd_tag_sw mlx5e_tls_snd_tag_sw = { 49 .snd_tag_query = mlx5e_tls_snd_tag_query, 50 .snd_tag_free = mlx5e_tls_snd_tag_free, 51 .type = IF_SND_TAG_TYPE_TLS 52 }; 53 54 #ifdef RATELIMIT 55 static const struct if_snd_tag_sw mlx5e_tls_rl_snd_tag_sw = { 56 .snd_tag_modify = mlx5e_tls_rl_snd_tag_modify, 57 .snd_tag_query = mlx5e_tls_snd_tag_query, 58 .snd_tag_free = mlx5e_tls_snd_tag_free, 59 .type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT 60 }; 61 #endif 62 63 MALLOC_DEFINE(M_MLX5E_TLS, "MLX5E_TLS", "MLX5 ethernet HW TLS"); 64 65 /* software TLS context */ 66 struct mlx5_ifc_sw_tls_cntx_bits { 67 struct mlx5_ifc_tls_static_params_bits param; 68 struct mlx5_ifc_tls_progress_params_bits progress; 69 struct { 70 uint8_t key_data[8][0x20]; 71 uint8_t key_len[0x20]; 72 } key; 73 }; 74 75 CTASSERT(MLX5_ST_SZ_BYTES(sw_tls_cntx) <= sizeof(((struct mlx5e_tls_tag *)0)->crypto_params)); 76 CTASSERT(MLX5_ST_SZ_BYTES(mkc) == sizeof(((struct mlx5e_tx_umr_wqe *)0)->mkc)); 77 78 static const char *mlx5e_tls_stats_desc[] = { 79 MLX5E_TLS_STATS(MLX5E_STATS_DESC) 80 }; 81 82 static void mlx5e_tls_work(struct work_struct *); 83 84 static int 85 mlx5e_tls_tag_import(void *arg, void **store, int cnt, int domain, int flags) 86 { 87 struct mlx5e_tls_tag *ptag; 88 int i; 89 90 for (i = 0; i != cnt; i++) { 91 ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS, 92 mlx5_dev_domainset(arg), flags | M_ZERO); 93 mtx_init(&ptag->mtx, "mlx5-tls-tag-mtx", NULL, MTX_DEF); 94 INIT_WORK(&ptag->work, mlx5e_tls_work); 95 store[i] = ptag; 96 } 97 return (i); 98 } 99 100 static void 101 mlx5e_tls_tag_release(void *arg, void **store, int cnt) 102 { 103 struct mlx5e_tls_tag *ptag; 104 struct mlx5e_priv *priv; 105 struct mlx5e_tls *ptls; 106 int i; 107 108 for (i = 0; i != cnt; i++) { 109 ptag = store[i]; 110 ptls = ptag->tls; 111 priv = container_of(ptls, struct mlx5e_priv, tls); 112 113 flush_work(&ptag->work); 114 115 if (ptag->tisn != 0) { 116 mlx5_tls_close_tis(priv->mdev, ptag->tisn); 117 atomic_add_32(&ptls->num_resources, -1U); 118 } 119 120 mtx_destroy(&ptag->mtx); 121 122 free(ptag, M_MLX5E_TLS); 123 } 124 } 125 126 static void 127 mlx5e_tls_tag_zfree(struct mlx5e_tls_tag *ptag) 128 { 129 /* make sure any unhandled taskqueue events are ignored */ 130 ptag->state = MLX5E_TLS_ST_FREED; 131 132 /* reset some variables */ 133 ptag->dek_index = 0; 134 ptag->dek_index_ok = 0; 135 136 /* avoid leaking keys */ 137 memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params)); 138 139 /* update number of TIS contexts */ 140 if (ptag->tisn == 0) 141 atomic_add_32(&ptag->tls->num_resources, -1U); 142 143 /* return tag to UMA */ 144 uma_zfree(ptag->tls->zone, ptag); 145 } 146 147 int 148 mlx5e_tls_init(struct mlx5e_priv *priv) 149 { 150 struct mlx5e_tls *ptls = &priv->tls; 151 struct sysctl_oid *node; 152 uint32_t x; 153 154 if (MLX5_CAP_GEN(priv->mdev, tls_tx) == 0 || 155 MLX5_CAP_GEN(priv->mdev, log_max_dek) == 0) 156 return (0); 157 158 ptls->wq = create_singlethread_workqueue("mlx5-tls-wq"); 159 if (ptls->wq == NULL) 160 return (ENOMEM); 161 162 sysctl_ctx_init(&ptls->ctx); 163 164 snprintf(ptls->zname, sizeof(ptls->zname), 165 "mlx5_%u_tls", device_get_unit(priv->mdev->pdev->dev.bsddev)); 166 167 ptls->zone = uma_zcache_create(ptls->zname, 168 sizeof(struct mlx5e_tls_tag), NULL, NULL, NULL, NULL, 169 mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv->mdev, 170 UMA_ZONE_UNMANAGED); 171 172 /* shared between RX and TX TLS */ 173 ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1); 174 175 for (x = 0; x != MLX5E_TLS_STATS_NUM; x++) 176 ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK); 177 178 ptls->init = 1; 179 180 node = SYSCTL_ADD_NODE(&priv->sysctl_ctx, 181 SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, 182 "tls", CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "Hardware TLS offload"); 183 if (node == NULL) 184 return (0); 185 186 mlx5e_create_counter_stats(&ptls->ctx, 187 SYSCTL_CHILDREN(node), "stats", 188 mlx5e_tls_stats_desc, MLX5E_TLS_STATS_NUM, 189 ptls->stats.arg); 190 191 return (0); 192 } 193 194 void 195 mlx5e_tls_cleanup(struct mlx5e_priv *priv) 196 { 197 struct mlx5e_tls *ptls = &priv->tls; 198 uint32_t x; 199 200 if (ptls->init == 0) 201 return; 202 203 ptls->init = 0; 204 flush_workqueue(ptls->wq); 205 sysctl_ctx_free(&ptls->ctx); 206 uma_zdestroy(ptls->zone); 207 destroy_workqueue(ptls->wq); 208 209 /* check if all resources are freed */ 210 MPASS(priv->tls.num_resources == 0); 211 212 for (x = 0; x != MLX5E_TLS_STATS_NUM; x++) 213 counter_u64_free(ptls->stats.arg[x]); 214 } 215 216 217 static int 218 mlx5e_tls_st_init(struct mlx5e_priv *priv, struct mlx5e_tls_tag *ptag) 219 { 220 int err; 221 222 /* try to open TIS, if not present */ 223 if (ptag->tisn == 0) { 224 err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn, 225 priv->pdn, &ptag->tisn); 226 if (err) { 227 MLX5E_TLS_STAT_INC(ptag, tx_error, 1); 228 return (err); 229 } 230 } 231 MLX5_SET(sw_tls_cntx, ptag->crypto_params, progress.pd, ptag->tisn); 232 233 /* try to allocate a DEK context ID */ 234 err = mlx5_encryption_key_create(priv->mdev, priv->pdn, 235 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS, 236 MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data), 237 MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len), 238 &ptag->dek_index); 239 if (err) { 240 MLX5E_TLS_STAT_INC(ptag, tx_error, 1); 241 return (err); 242 } 243 244 MLX5_SET(sw_tls_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index); 245 246 ptag->dek_index_ok = 1; 247 248 MLX5E_TLS_TAG_LOCK(ptag); 249 if (ptag->state == MLX5E_TLS_ST_INIT) 250 ptag->state = MLX5E_TLS_ST_SETUP; 251 MLX5E_TLS_TAG_UNLOCK(ptag); 252 return (0); 253 } 254 255 static void 256 mlx5e_tls_work(struct work_struct *work) 257 { 258 struct mlx5e_tls_tag *ptag; 259 struct mlx5e_priv *priv; 260 261 ptag = container_of(work, struct mlx5e_tls_tag, work); 262 priv = container_of(ptag->tls, struct mlx5e_priv, tls); 263 264 switch (ptag->state) { 265 case MLX5E_TLS_ST_INIT: 266 (void)mlx5e_tls_st_init(priv, ptag); 267 break; 268 269 case MLX5E_TLS_ST_RELEASE: 270 /* try to destroy DEK context by ID */ 271 if (ptag->dek_index_ok) 272 (void)mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index); 273 274 /* free tag */ 275 mlx5e_tls_tag_zfree(ptag); 276 break; 277 278 default: 279 break; 280 } 281 } 282 283 static int 284 mlx5e_tls_set_params(void *ctx, const struct tls_session_params *en) 285 { 286 287 MLX5_SET(sw_tls_cntx, ctx, param.const_2, 2); 288 if (en->tls_vminor == TLS_MINOR_VER_TWO) 289 MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 2); /* v1.2 */ 290 else 291 MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 3); /* v1.3 */ 292 MLX5_SET(sw_tls_cntx, ctx, param.const_1, 1); 293 MLX5_SET(sw_tls_cntx, ctx, param.encryption_standard, 1); /* TLS */ 294 295 /* copy the initial vector in place */ 296 switch (en->iv_len) { 297 case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv): 298 case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv) + 299 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.implicit_iv): 300 memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, param.gcm_iv), 301 en->iv, en->iv_len); 302 break; 303 default: 304 return (EINVAL); 305 } 306 307 if (en->cipher_key_len <= MLX5_FLD_SZ_BYTES(sw_tls_cntx, key.key_data)) { 308 memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, key.key_data), 309 en->cipher_key, en->cipher_key_len); 310 MLX5_SET(sw_tls_cntx, ctx, key.key_len, en->cipher_key_len); 311 } else { 312 return (EINVAL); 313 } 314 return (0); 315 } 316 317 /* Verify zero default */ 318 CTASSERT(MLX5E_TLS_ST_INIT == 0); 319 320 int 321 mlx5e_tls_snd_tag_alloc(if_t ifp, 322 union if_snd_tag_alloc_params *params, 323 struct m_snd_tag **ppmt) 324 { 325 union if_snd_tag_alloc_params rl_params; 326 const struct if_snd_tag_sw *snd_tag_sw; 327 struct mlx5e_priv *priv; 328 struct mlx5e_tls_tag *ptag; 329 const struct tls_session_params *en; 330 int error; 331 332 priv = if_getsoftc(ifp); 333 334 if (priv->gone != 0 || priv->tls.init == 0) 335 return (EOPNOTSUPP); 336 337 /* allocate new tag from zone, if any */ 338 ptag = uma_zalloc(priv->tls.zone, M_WAITOK); 339 340 /* sanity check default values */ 341 MPASS(ptag->dek_index == 0); 342 MPASS(ptag->dek_index_ok == 0); 343 344 /* setup TLS tag */ 345 ptag->tls = &priv->tls; 346 347 /* check if there is no TIS context */ 348 if (ptag->tisn == 0) { 349 uint32_t value; 350 351 value = atomic_fetchadd_32(&priv->tls.num_resources, 1U); 352 353 /* check resource limits */ 354 if (value >= priv->tls.max_resources) { 355 error = ENOMEM; 356 goto failure; 357 } 358 } 359 360 en = ¶ms->tls.tls->params; 361 362 /* only TLS v1.2 and v1.3 is currently supported */ 363 if (en->tls_vmajor != TLS_MAJOR_VER_ONE || 364 (en->tls_vminor != TLS_MINOR_VER_TWO 365 #ifdef TLS_MINOR_VER_THREE 366 && en->tls_vminor != TLS_MINOR_VER_THREE 367 #endif 368 )) { 369 error = EPROTONOSUPPORT; 370 goto failure; 371 } 372 373 switch (en->cipher_algorithm) { 374 case CRYPTO_AES_NIST_GCM_16: 375 switch (en->cipher_key_len) { 376 case 128 / 8: 377 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 378 if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) { 379 error = EPROTONOSUPPORT; 380 goto failure; 381 } 382 } else { 383 if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_128) == 0) { 384 error = EPROTONOSUPPORT; 385 goto failure; 386 } 387 } 388 error = mlx5e_tls_set_params(ptag->crypto_params, en); 389 if (error) 390 goto failure; 391 break; 392 393 case 256 / 8: 394 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 395 if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) { 396 error = EPROTONOSUPPORT; 397 goto failure; 398 } 399 } else { 400 if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_256) == 0) { 401 error = EPROTONOSUPPORT; 402 goto failure; 403 } 404 } 405 error = mlx5e_tls_set_params(ptag->crypto_params, en); 406 if (error) 407 goto failure; 408 break; 409 410 default: 411 error = EINVAL; 412 goto failure; 413 } 414 break; 415 default: 416 error = EPROTONOSUPPORT; 417 goto failure; 418 } 419 420 memset(&rl_params, 0, sizeof(rl_params)); 421 rl_params.hdr = params->hdr; 422 switch (params->hdr.type) { 423 #ifdef RATELIMIT 424 case IF_SND_TAG_TYPE_TLS_RATE_LIMIT: 425 rl_params.hdr.type = IF_SND_TAG_TYPE_RATE_LIMIT; 426 rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate; 427 snd_tag_sw = &mlx5e_tls_rl_snd_tag_sw; 428 break; 429 #endif 430 case IF_SND_TAG_TYPE_TLS: 431 rl_params.hdr.type = IF_SND_TAG_TYPE_UNLIMITED; 432 snd_tag_sw = &mlx5e_tls_snd_tag_sw; 433 break; 434 default: 435 error = EOPNOTSUPP; 436 goto failure; 437 } 438 439 error = m_snd_tag_alloc(ifp, &rl_params, &ptag->rl_tag); 440 if (error) 441 goto failure; 442 443 /* store pointer to mbuf tag */ 444 MPASS(ptag->tag.refcount == 0); 445 m_snd_tag_init(&ptag->tag, ifp, snd_tag_sw); 446 *ppmt = &ptag->tag; 447 448 /* reset state */ 449 ptag->state = MLX5E_TLS_ST_INIT; 450 451 /* 452 * Try to immediately init the tag. We may fail if the NIC's 453 * resources are tied up with send tags that are in the work 454 * queue, waiting to be freed. So if we fail, put ourselves 455 * on the queue so as to try again after resouces have been freed. 456 */ 457 error = mlx5e_tls_st_init(priv, ptag); 458 if (error != 0) { 459 queue_work(priv->tls.wq, &ptag->work); 460 flush_work(&ptag->work); 461 } 462 463 return (0); 464 465 failure: 466 mlx5e_tls_tag_zfree(ptag); 467 return (error); 468 } 469 470 #ifdef RATELIMIT 471 static int 472 mlx5e_tls_rl_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params) 473 { 474 union if_snd_tag_modify_params rl_params; 475 struct mlx5e_tls_tag *ptag = 476 container_of(pmt, struct mlx5e_tls_tag, tag); 477 int error; 478 479 memset(&rl_params, 0, sizeof(rl_params)); 480 rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate; 481 error = ptag->rl_tag->sw->snd_tag_modify(ptag->rl_tag, &rl_params); 482 return (error); 483 } 484 #endif 485 486 static int 487 mlx5e_tls_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) 488 { 489 struct mlx5e_tls_tag *ptag = 490 container_of(pmt, struct mlx5e_tls_tag, tag); 491 492 return (ptag->rl_tag->sw->snd_tag_query(ptag->rl_tag, params)); 493 } 494 495 static void 496 mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt) 497 { 498 struct mlx5e_tls_tag *ptag = 499 container_of(pmt, struct mlx5e_tls_tag, tag); 500 struct mlx5e_priv *priv; 501 502 m_snd_tag_rele(ptag->rl_tag); 503 504 MLX5E_TLS_TAG_LOCK(ptag); 505 ptag->state = MLX5E_TLS_ST_RELEASE; 506 MLX5E_TLS_TAG_UNLOCK(ptag); 507 508 priv = if_getsoftc(ptag->tag.ifp); 509 queue_work(priv->tls.wq, &ptag->work); 510 } 511 512 CTASSERT((MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) % 16) == 0); 513 514 static void 515 mlx5e_tls_send_static_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 516 { 517 const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) + 518 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param), MLX5_SEND_WQE_DS); 519 struct mlx5e_tx_umr_wqe *wqe; 520 u16 pi; 521 522 pi = sq->pc & sq->wq.sz_m1; 523 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 524 525 memset(wqe, 0, sizeof(*wqe)); 526 527 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | 528 MLX5_OPCODE_UMR | (MLX5_OPCODE_MOD_UMR_TLS_TIS_STATIC_PARAMS << 24)); 529 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 530 wqe->ctrl.imm = cpu_to_be32(ptag->tisn << 8); 531 532 if (mlx5e_do_send_cqe(sq)) 533 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; 534 else 535 wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 536 537 /* fill out UMR control segment */ 538 wqe->umr.flags = 0x80; /* inline data */ 539 wqe->umr.bsf_octowords = cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) / 16); 540 541 /* copy in the static crypto parameters */ 542 memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, param), 543 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param)); 544 545 /* copy data for doorbell */ 546 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 547 548 sq->mbuf[pi].mbuf = NULL; 549 sq->mbuf[pi].num_bytes = 0; 550 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 551 sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag); 552 553 sq->pc += sq->mbuf[pi].num_wqebbs; 554 } 555 556 CTASSERT(MLX5_FLD_SZ_BYTES(sw_tls_cntx, progress) == 557 sizeof(((struct mlx5e_tx_psv_wqe *)0)->psv)); 558 559 static void 560 mlx5e_tls_send_progress_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 561 { 562 const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe), 563 MLX5_SEND_WQE_DS); 564 struct mlx5e_tx_psv_wqe *wqe; 565 u16 pi; 566 567 pi = sq->pc & sq->wq.sz_m1; 568 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 569 570 memset(wqe, 0, sizeof(*wqe)); 571 572 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | 573 MLX5_OPCODE_SET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIS_PROGRESS_PARAMS << 24)); 574 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 575 576 if (mlx5e_do_send_cqe(sq)) 577 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 578 579 /* copy in the PSV control segment */ 580 memcpy(&wqe->psv, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, progress), 581 sizeof(wqe->psv)); 582 583 /* copy data for doorbell */ 584 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 585 586 sq->mbuf[pi].mbuf = NULL; 587 sq->mbuf[pi].num_bytes = 0; 588 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 589 sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag); 590 591 sq->pc += sq->mbuf[pi].num_wqebbs; 592 } 593 594 static void 595 mlx5e_tls_send_nop(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 596 { 597 const u32 ds_cnt = MLX5_SEND_WQEBB_NUM_DS; 598 struct mlx5e_tx_wqe *wqe; 599 u16 pi; 600 601 pi = sq->pc & sq->wq.sz_m1; 602 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 603 604 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 605 606 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 607 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 608 if (mlx5e_do_send_cqe(sq)) 609 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; 610 else 611 wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 612 613 /* Copy data for doorbell */ 614 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 615 616 sq->mbuf[pi].mbuf = NULL; 617 sq->mbuf[pi].num_bytes = 0; 618 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 619 sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag); 620 621 sq->pc += sq->mbuf[pi].num_wqebbs; 622 } 623 624 #define SBTLS_MBUF_NO_DATA ((struct mbuf *)1) 625 626 static struct mbuf * 627 sbtls_recover_record(struct mbuf *mb, int wait, uint32_t tcp_old, uint32_t *ptcp_seq, bool *pis_start) 628 { 629 struct mbuf *mr, *top; 630 uint32_t offset; 631 uint32_t delta; 632 633 /* check format of incoming mbuf */ 634 if (mb->m_next == NULL || 635 (mb->m_next->m_flags & (M_EXTPG | M_EXT)) != (M_EXTPG | M_EXT)) { 636 top = NULL; 637 goto done; 638 } 639 640 /* get unmapped data offset */ 641 offset = mtod(mb->m_next, uintptr_t); 642 643 /* check if we don't need to re-transmit anything */ 644 if (offset == 0) { 645 top = SBTLS_MBUF_NO_DATA; 646 *pis_start = true; 647 goto done; 648 } 649 650 /* try to get a new packet header */ 651 top = m_gethdr(wait, MT_DATA); 652 if (top == NULL) 653 goto done; 654 655 mr = m_get(wait, MT_DATA); 656 if (mr == NULL) { 657 m_free(top); 658 top = NULL; 659 goto done; 660 } 661 662 top->m_next = mr; 663 664 mb_dupcl(mr, mb->m_next); 665 666 /* the beginning of the TLS record */ 667 mr->m_data = NULL; 668 669 /* setup packet header length */ 670 top->m_pkthdr.len = mr->m_len = offset; 671 top->m_len = 0; 672 673 /* check for partial re-transmit */ 674 delta = *ptcp_seq - tcp_old; 675 676 if (delta < offset) { 677 m_adj(top, offset - delta); 678 offset = delta; 679 680 /* continue where we left off */ 681 *pis_start = false; 682 } else { 683 *pis_start = true; 684 } 685 686 /* 687 * Rewind the TCP sequence number by the amount of data 688 * retransmitted: 689 */ 690 *ptcp_seq -= offset; 691 done: 692 return (top); 693 } 694 695 static int 696 mlx5e_sq_tls_populate(struct mbuf *mb, uint64_t *pseq) 697 { 698 699 for (; mb != NULL; mb = mb->m_next) { 700 if (!(mb->m_flags & M_EXTPG)) 701 continue; 702 *pseq = mb->m_epg_seqno; 703 return (1); 704 } 705 return (0); 706 } 707 708 int 709 mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **ppmb) 710 { 711 struct mlx5e_tls_tag *ptls_tag; 712 struct m_snd_tag *ptag; 713 const struct tcphdr *th; 714 struct mbuf *mb = *ppmb; 715 u64 rcd_sn; 716 u32 header_size; 717 u32 mb_seq; 718 719 if ((mb->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0) 720 return (MLX5E_TLS_CONTINUE); 721 722 ptag = mb->m_pkthdr.snd_tag; 723 724 if ( 725 #ifdef RATELIMIT 726 ptag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT && 727 #endif 728 ptag->sw->type != IF_SND_TAG_TYPE_TLS) 729 return (MLX5E_TLS_CONTINUE); 730 731 ptls_tag = container_of(ptag, struct mlx5e_tls_tag, tag); 732 733 header_size = mlx5e_get_full_header_size(mb, &th); 734 if (unlikely(header_size == 0 || th == NULL)) 735 return (MLX5E_TLS_FAILURE); 736 737 /* 738 * Send non-TLS TCP packets AS-IS: 739 */ 740 if (header_size == mb->m_pkthdr.len || 741 mlx5e_sq_tls_populate(mb, &rcd_sn) == 0) { 742 parg->tisn = 0; 743 parg->ihs = header_size; 744 return (MLX5E_TLS_CONTINUE); 745 } 746 747 mb_seq = ntohl(th->th_seq); 748 749 MLX5E_TLS_TAG_LOCK(ptls_tag); 750 switch (ptls_tag->state) { 751 case MLX5E_TLS_ST_INIT: 752 MLX5E_TLS_TAG_UNLOCK(ptls_tag); 753 return (MLX5E_TLS_FAILURE); 754 case MLX5E_TLS_ST_SETUP: 755 ptls_tag->state = MLX5E_TLS_ST_TXRDY; 756 ptls_tag->expected_seq = ~mb_seq; /* force setup */ 757 default: 758 MLX5E_TLS_TAG_UNLOCK(ptls_tag); 759 break; 760 } 761 762 if (unlikely(ptls_tag->expected_seq != mb_seq)) { 763 bool is_start; 764 struct mbuf *r_mb; 765 uint32_t tcp_seq = mb_seq; 766 767 r_mb = sbtls_recover_record(mb, M_NOWAIT, ptls_tag->expected_seq, &tcp_seq, &is_start); 768 if (r_mb == NULL) { 769 MLX5E_TLS_STAT_INC(ptls_tag, tx_error, 1); 770 return (MLX5E_TLS_FAILURE); 771 } 772 773 MLX5E_TLS_STAT_INC(ptls_tag, tx_packets_ooo, 1); 774 775 /* check if this is the first fragment of a TLS record */ 776 if (is_start) { 777 /* setup TLS static parameters */ 778 MLX5_SET64(sw_tls_cntx, ptls_tag->crypto_params, 779 param.initial_record_number, rcd_sn); 780 781 /* 782 * NOTE: The sendqueue should have enough room to 783 * carry both the static and the progress parameters 784 * when we get here! 785 */ 786 mlx5e_tls_send_static_parameters(sq, ptls_tag); 787 mlx5e_tls_send_progress_parameters(sq, ptls_tag); 788 789 if (r_mb == SBTLS_MBUF_NO_DATA) { 790 mlx5e_tls_send_nop(sq, ptls_tag); 791 ptls_tag->expected_seq = mb_seq; 792 return (MLX5E_TLS_LOOP); 793 } 794 } 795 796 MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes_ooo, r_mb->m_pkthdr.len); 797 798 /* setup transmit arguments */ 799 parg->tisn = ptls_tag->tisn; 800 parg->mst = &ptls_tag->tag; 801 802 /* try to send DUMP data */ 803 if (mlx5e_sq_dump_xmit(sq, parg, &r_mb) != 0) { 804 m_freem(r_mb); 805 ptls_tag->expected_seq = tcp_seq; 806 return (MLX5E_TLS_FAILURE); 807 } else { 808 ptls_tag->expected_seq = mb_seq; 809 return (MLX5E_TLS_LOOP); 810 } 811 } else { 812 MLX5E_TLS_STAT_INC(ptls_tag, tx_packets, 1); 813 MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes, mb->m_pkthdr.len); 814 } 815 ptls_tag->expected_seq += mb->m_pkthdr.len - header_size; 816 817 parg->tisn = ptls_tag->tisn; 818 parg->ihs = header_size; 819 parg->mst = &ptls_tag->tag; 820 return (MLX5E_TLS_CONTINUE); 821 } 822 823 #else 824 825 int 826 mlx5e_tls_init(struct mlx5e_priv *priv) 827 { 828 829 return (0); 830 } 831 832 void 833 mlx5e_tls_cleanup(struct mlx5e_priv *priv) 834 { 835 /* NOP */ 836 } 837 838 #endif /* KERN_TLS */ 839