1 /*- 2 * Copyright (c) 2019-2021 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "opt_kern_tls.h" 29 #include "opt_rss.h" 30 #include "opt_ratelimit.h" 31 32 #include <dev/mlx5/mlx5_en/en.h> 33 34 #include <dev/mlx5/tls.h> 35 36 #include <linux/delay.h> 37 #include <sys/ktls.h> 38 #include <opencrypto/cryptodev.h> 39 40 #ifdef KERN_TLS 41 42 #ifdef RATELIMIT 43 static if_snd_tag_modify_t mlx5e_tls_rl_snd_tag_modify; 44 #endif 45 static if_snd_tag_query_t mlx5e_tls_snd_tag_query; 46 static if_snd_tag_free_t mlx5e_tls_snd_tag_free; 47 48 static const struct if_snd_tag_sw mlx5e_tls_snd_tag_sw = { 49 .snd_tag_query = mlx5e_tls_snd_tag_query, 50 .snd_tag_free = mlx5e_tls_snd_tag_free, 51 .type = IF_SND_TAG_TYPE_TLS 52 }; 53 54 #ifdef RATELIMIT 55 static const struct if_snd_tag_sw mlx5e_tls_rl_snd_tag_sw = { 56 .snd_tag_modify = mlx5e_tls_rl_snd_tag_modify, 57 .snd_tag_query = mlx5e_tls_snd_tag_query, 58 .snd_tag_free = mlx5e_tls_snd_tag_free, 59 .type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT 60 }; 61 #endif 62 63 MALLOC_DEFINE(M_MLX5E_TLS, "MLX5E_TLS", "MLX5 ethernet HW TLS"); 64 65 /* software TLS context */ 66 struct mlx5_ifc_sw_tls_cntx_bits { 67 struct mlx5_ifc_tls_static_params_bits param; 68 struct mlx5_ifc_tls_progress_params_bits progress; 69 struct { 70 uint8_t key_data[8][0x20]; 71 uint8_t key_len[0x20]; 72 } key; 73 }; 74 75 CTASSERT(MLX5_ST_SZ_BYTES(sw_tls_cntx) <= sizeof(((struct mlx5e_tls_tag *)0)->crypto_params)); 76 CTASSERT(MLX5_ST_SZ_BYTES(mkc) == sizeof(((struct mlx5e_tx_umr_wqe *)0)->mkc)); 77 78 static const char *mlx5e_tls_stats_desc[] = { 79 MLX5E_TLS_STATS(MLX5E_STATS_DESC) 80 }; 81 82 static void mlx5e_tls_work(struct work_struct *); 83 84 static int 85 mlx5e_tls_tag_import(void *arg, void **store, int cnt, int domain, int flags) 86 { 87 struct mlx5e_tls_tag *ptag; 88 int i; 89 90 for (i = 0; i != cnt; i++) { 91 ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS, 92 mlx5_dev_domainset(arg), flags | M_ZERO); 93 mtx_init(&ptag->mtx, "mlx5-tls-tag-mtx", NULL, MTX_DEF); 94 INIT_WORK(&ptag->work, mlx5e_tls_work); 95 store[i] = ptag; 96 } 97 return (i); 98 } 99 100 static void 101 mlx5e_tls_tag_release(void *arg, void **store, int cnt) 102 { 103 struct mlx5e_tls_tag *ptag; 104 struct mlx5e_priv *priv; 105 struct mlx5e_tls *ptls; 106 int i; 107 108 for (i = 0; i != cnt; i++) { 109 ptag = store[i]; 110 ptls = ptag->tls; 111 priv = container_of(ptls, struct mlx5e_priv, tls); 112 113 flush_work(&ptag->work); 114 115 if (ptag->tisn != 0) { 116 mlx5_tls_close_tis(priv->mdev, ptag->tisn); 117 atomic_add_32(&ptls->num_resources, -1U); 118 } 119 120 mtx_destroy(&ptag->mtx); 121 122 free(ptag, M_MLX5E_TLS); 123 } 124 } 125 126 static void 127 mlx5e_tls_tag_zfree(struct mlx5e_tls_tag *ptag) 128 { 129 130 /* reset some variables */ 131 ptag->state = MLX5E_TLS_ST_INIT; 132 ptag->dek_index = 0; 133 ptag->dek_index_ok = 0; 134 135 /* avoid leaking keys */ 136 memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params)); 137 138 /* update number of TIS contexts */ 139 if (ptag->tisn == 0) 140 atomic_add_32(&ptag->tls->num_resources, -1U); 141 142 /* return tag to UMA */ 143 uma_zfree(ptag->tls->zone, ptag); 144 } 145 146 int 147 mlx5e_tls_init(struct mlx5e_priv *priv) 148 { 149 struct mlx5e_tls *ptls = &priv->tls; 150 struct sysctl_oid *node; 151 uint32_t x; 152 153 if (MLX5_CAP_GEN(priv->mdev, tls_tx) == 0 || 154 MLX5_CAP_GEN(priv->mdev, log_max_dek) == 0) 155 return (0); 156 157 ptls->wq = create_singlethread_workqueue("mlx5-tls-wq"); 158 if (ptls->wq == NULL) 159 return (ENOMEM); 160 161 sysctl_ctx_init(&ptls->ctx); 162 163 snprintf(ptls->zname, sizeof(ptls->zname), 164 "mlx5_%u_tls", device_get_unit(priv->mdev->pdev->dev.bsddev)); 165 166 ptls->zone = uma_zcache_create(ptls->zname, 167 sizeof(struct mlx5e_tls_tag), NULL, NULL, NULL, NULL, 168 mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv->mdev, 0); 169 170 /* shared between RX and TX TLS */ 171 ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1); 172 173 for (x = 0; x != MLX5E_TLS_STATS_NUM; x++) 174 ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK); 175 176 ptls->init = 1; 177 178 node = SYSCTL_ADD_NODE(&priv->sysctl_ctx, 179 SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, 180 "tls", CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "Hardware TLS offload"); 181 if (node == NULL) 182 return (0); 183 184 mlx5e_create_counter_stats(&ptls->ctx, 185 SYSCTL_CHILDREN(node), "stats", 186 mlx5e_tls_stats_desc, MLX5E_TLS_STATS_NUM, 187 ptls->stats.arg); 188 189 return (0); 190 } 191 192 void 193 mlx5e_tls_cleanup(struct mlx5e_priv *priv) 194 { 195 struct mlx5e_tls *ptls = &priv->tls; 196 uint32_t x; 197 198 if (ptls->init == 0) 199 return; 200 201 ptls->init = 0; 202 flush_workqueue(ptls->wq); 203 sysctl_ctx_free(&ptls->ctx); 204 uma_zdestroy(ptls->zone); 205 destroy_workqueue(ptls->wq); 206 207 /* check if all resources are freed */ 208 MPASS(priv->tls.num_resources == 0); 209 210 for (x = 0; x != MLX5E_TLS_STATS_NUM; x++) 211 counter_u64_free(ptls->stats.arg[x]); 212 } 213 214 static void 215 mlx5e_tls_work(struct work_struct *work) 216 { 217 struct mlx5e_tls_tag *ptag; 218 struct mlx5e_priv *priv; 219 int err; 220 221 ptag = container_of(work, struct mlx5e_tls_tag, work); 222 priv = container_of(ptag->tls, struct mlx5e_priv, tls); 223 224 switch (ptag->state) { 225 case MLX5E_TLS_ST_INIT: 226 /* try to open TIS, if not present */ 227 if (ptag->tisn == 0) { 228 err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn, 229 priv->pdn, &ptag->tisn); 230 if (err) { 231 MLX5E_TLS_STAT_INC(ptag, tx_error, 1); 232 break; 233 } 234 } 235 MLX5_SET(sw_tls_cntx, ptag->crypto_params, progress.pd, ptag->tisn); 236 237 /* try to allocate a DEK context ID */ 238 err = mlx5_encryption_key_create(priv->mdev, priv->pdn, 239 MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data), 240 MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len), 241 &ptag->dek_index); 242 if (err) { 243 MLX5E_TLS_STAT_INC(ptag, tx_error, 1); 244 break; 245 } 246 247 MLX5_SET(sw_tls_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index); 248 249 ptag->dek_index_ok = 1; 250 251 MLX5E_TLS_TAG_LOCK(ptag); 252 if (ptag->state == MLX5E_TLS_ST_INIT) 253 ptag->state = MLX5E_TLS_ST_SETUP; 254 MLX5E_TLS_TAG_UNLOCK(ptag); 255 break; 256 257 case MLX5E_TLS_ST_FREED: 258 /* wait for all refs to go away */ 259 while (ptag->refs != 0) 260 msleep(1); 261 262 /* try to destroy DEK context by ID */ 263 if (ptag->dek_index_ok) 264 err = mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index); 265 266 /* free tag */ 267 mlx5e_tls_tag_zfree(ptag); 268 break; 269 270 default: 271 break; 272 } 273 } 274 275 static int 276 mlx5e_tls_set_params(void *ctx, const struct tls_session_params *en) 277 { 278 279 MLX5_SET(sw_tls_cntx, ctx, param.const_2, 2); 280 if (en->tls_vminor == TLS_MINOR_VER_TWO) 281 MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 2); /* v1.2 */ 282 else 283 MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 3); /* v1.3 */ 284 MLX5_SET(sw_tls_cntx, ctx, param.const_1, 1); 285 MLX5_SET(sw_tls_cntx, ctx, param.encryption_standard, 1); /* TLS */ 286 287 /* copy the initial vector in place */ 288 switch (en->iv_len) { 289 case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv): 290 case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv) + 291 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.implicit_iv): 292 memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, param.gcm_iv), 293 en->iv, en->iv_len); 294 break; 295 default: 296 return (EINVAL); 297 } 298 299 if (en->cipher_key_len <= MLX5_FLD_SZ_BYTES(sw_tls_cntx, key.key_data)) { 300 memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, key.key_data), 301 en->cipher_key, en->cipher_key_len); 302 MLX5_SET(sw_tls_cntx, ctx, key.key_len, en->cipher_key_len); 303 } else { 304 return (EINVAL); 305 } 306 return (0); 307 } 308 309 /* Verify zero default */ 310 CTASSERT(MLX5E_TLS_ST_INIT == 0); 311 312 int 313 mlx5e_tls_snd_tag_alloc(struct ifnet *ifp, 314 union if_snd_tag_alloc_params *params, 315 struct m_snd_tag **ppmt) 316 { 317 union if_snd_tag_alloc_params rl_params; 318 const struct if_snd_tag_sw *snd_tag_sw; 319 struct mlx5e_priv *priv; 320 struct mlx5e_tls_tag *ptag; 321 const struct tls_session_params *en; 322 int error; 323 324 priv = ifp->if_softc; 325 326 if (priv->gone != 0 || priv->tls.init == 0) 327 return (EOPNOTSUPP); 328 329 /* allocate new tag from zone, if any */ 330 ptag = uma_zalloc(priv->tls.zone, M_NOWAIT); 331 if (ptag == NULL) 332 return (ENOMEM); 333 334 /* sanity check default values */ 335 MPASS(ptag->state == MLX5E_TLS_ST_INIT); 336 MPASS(ptag->dek_index == 0); 337 MPASS(ptag->dek_index_ok == 0); 338 339 /* setup TLS tag */ 340 ptag->tls = &priv->tls; 341 342 /* check if there is no TIS context */ 343 if (ptag->tisn == 0) { 344 uint32_t value; 345 346 value = atomic_fetchadd_32(&priv->tls.num_resources, 1U); 347 348 /* check resource limits */ 349 if (value >= priv->tls.max_resources) { 350 error = ENOMEM; 351 goto failure; 352 } 353 } 354 355 en = ¶ms->tls.tls->params; 356 357 /* only TLS v1.2 and v1.3 is currently supported */ 358 if (en->tls_vmajor != TLS_MAJOR_VER_ONE || 359 (en->tls_vminor != TLS_MINOR_VER_TWO 360 #ifdef TLS_MINOR_VER_THREE 361 && en->tls_vminor != TLS_MINOR_VER_THREE 362 #endif 363 )) { 364 error = EPROTONOSUPPORT; 365 goto failure; 366 } 367 368 switch (en->cipher_algorithm) { 369 case CRYPTO_AES_NIST_GCM_16: 370 switch (en->cipher_key_len) { 371 case 128 / 8: 372 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 373 if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) { 374 error = EPROTONOSUPPORT; 375 goto failure; 376 } 377 } else { 378 if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_128) == 0) { 379 error = EPROTONOSUPPORT; 380 goto failure; 381 } 382 } 383 error = mlx5e_tls_set_params(ptag->crypto_params, en); 384 if (error) 385 goto failure; 386 break; 387 388 case 256 / 8: 389 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 390 if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) { 391 error = EPROTONOSUPPORT; 392 goto failure; 393 } 394 } else { 395 if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_256) == 0) { 396 error = EPROTONOSUPPORT; 397 goto failure; 398 } 399 } 400 error = mlx5e_tls_set_params(ptag->crypto_params, en); 401 if (error) 402 goto failure; 403 break; 404 405 default: 406 error = EINVAL; 407 goto failure; 408 } 409 break; 410 default: 411 error = EPROTONOSUPPORT; 412 goto failure; 413 } 414 415 memset(&rl_params, 0, sizeof(rl_params)); 416 rl_params.hdr = params->hdr; 417 switch (params->hdr.type) { 418 #ifdef RATELIMIT 419 case IF_SND_TAG_TYPE_TLS_RATE_LIMIT: 420 rl_params.hdr.type = IF_SND_TAG_TYPE_RATE_LIMIT; 421 rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate; 422 snd_tag_sw = &mlx5e_tls_rl_snd_tag_sw; 423 break; 424 #endif 425 case IF_SND_TAG_TYPE_TLS: 426 rl_params.hdr.type = IF_SND_TAG_TYPE_UNLIMITED; 427 snd_tag_sw = &mlx5e_tls_snd_tag_sw; 428 break; 429 default: 430 error = EOPNOTSUPP; 431 goto failure; 432 } 433 434 error = m_snd_tag_alloc(ifp, &rl_params, &ptag->rl_tag); 435 if (error) 436 goto failure; 437 438 /* store pointer to mbuf tag */ 439 MPASS(ptag->tag.refcount == 0); 440 m_snd_tag_init(&ptag->tag, ifp, snd_tag_sw); 441 *ppmt = &ptag->tag; 442 443 queue_work(priv->tls.wq, &ptag->work); 444 flush_work(&ptag->work); 445 446 return (0); 447 448 failure: 449 mlx5e_tls_tag_zfree(ptag); 450 return (error); 451 } 452 453 #ifdef RATELIMIT 454 static int 455 mlx5e_tls_rl_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params) 456 { 457 union if_snd_tag_modify_params rl_params; 458 struct mlx5e_tls_tag *ptag = 459 container_of(pmt, struct mlx5e_tls_tag, tag); 460 int error; 461 462 memset(&rl_params, 0, sizeof(rl_params)); 463 rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate; 464 error = ptag->rl_tag->sw->snd_tag_modify(ptag->rl_tag, &rl_params); 465 return (error); 466 } 467 #endif 468 469 static int 470 mlx5e_tls_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) 471 { 472 struct mlx5e_tls_tag *ptag = 473 container_of(pmt, struct mlx5e_tls_tag, tag); 474 475 return (ptag->rl_tag->sw->snd_tag_query(ptag->rl_tag, params)); 476 } 477 478 static void 479 mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt) 480 { 481 struct mlx5e_tls_tag *ptag = 482 container_of(pmt, struct mlx5e_tls_tag, tag); 483 struct mlx5e_priv *priv; 484 485 m_snd_tag_rele(ptag->rl_tag); 486 487 MLX5E_TLS_TAG_LOCK(ptag); 488 ptag->state = MLX5E_TLS_ST_FREED; 489 MLX5E_TLS_TAG_UNLOCK(ptag); 490 491 priv = ptag->tag.ifp->if_softc; 492 queue_work(priv->tls.wq, &ptag->work); 493 } 494 495 CTASSERT((MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) % 16) == 0); 496 497 static void 498 mlx5e_tls_send_static_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 499 { 500 const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) + 501 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param), MLX5_SEND_WQE_DS); 502 struct mlx5e_tx_umr_wqe *wqe; 503 u16 pi; 504 505 pi = sq->pc & sq->wq.sz_m1; 506 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 507 508 memset(wqe, 0, sizeof(*wqe)); 509 510 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | 511 MLX5_OPCODE_UMR | (MLX5_OPCODE_MOD_UMR_TLS_TIS_STATIC_PARAMS << 24)); 512 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 513 wqe->ctrl.imm = cpu_to_be32(ptag->tisn << 8); 514 515 if (mlx5e_do_send_cqe(sq)) 516 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; 517 else 518 wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 519 520 /* fill out UMR control segment */ 521 wqe->umr.flags = 0x80; /* inline data */ 522 wqe->umr.bsf_octowords = cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) / 16); 523 524 /* copy in the static crypto parameters */ 525 memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, param), 526 MLX5_FLD_SZ_BYTES(sw_tls_cntx, param)); 527 528 /* copy data for doorbell */ 529 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 530 531 sq->mbuf[pi].mbuf = NULL; 532 sq->mbuf[pi].num_bytes = 0; 533 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 534 sq->mbuf[pi].p_refcount = &ptag->refs; 535 atomic_add_int(&ptag->refs, 1); 536 sq->pc += sq->mbuf[pi].num_wqebbs; 537 } 538 539 CTASSERT(MLX5_FLD_SZ_BYTES(sw_tls_cntx, progress) == 540 sizeof(((struct mlx5e_tx_psv_wqe *)0)->psv)); 541 542 static void 543 mlx5e_tls_send_progress_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 544 { 545 const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe), 546 MLX5_SEND_WQE_DS); 547 struct mlx5e_tx_psv_wqe *wqe; 548 u16 pi; 549 550 pi = sq->pc & sq->wq.sz_m1; 551 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 552 553 memset(wqe, 0, sizeof(*wqe)); 554 555 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | 556 MLX5_OPCODE_SET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIS_PROGRESS_PARAMS << 24)); 557 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 558 559 if (mlx5e_do_send_cqe(sq)) 560 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 561 562 /* copy in the PSV control segment */ 563 memcpy(&wqe->psv, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, progress), 564 sizeof(wqe->psv)); 565 566 /* copy data for doorbell */ 567 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 568 569 sq->mbuf[pi].mbuf = NULL; 570 sq->mbuf[pi].num_bytes = 0; 571 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 572 sq->mbuf[pi].p_refcount = &ptag->refs; 573 atomic_add_int(&ptag->refs, 1); 574 sq->pc += sq->mbuf[pi].num_wqebbs; 575 } 576 577 static void 578 mlx5e_tls_send_nop(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag) 579 { 580 const u32 ds_cnt = MLX5_SEND_WQEBB_NUM_DS; 581 struct mlx5e_tx_wqe *wqe; 582 u16 pi; 583 584 pi = sq->pc & sq->wq.sz_m1; 585 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 586 587 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 588 589 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 590 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 591 if (mlx5e_do_send_cqe(sq)) 592 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; 593 else 594 wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; 595 596 /* Copy data for doorbell */ 597 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 598 599 sq->mbuf[pi].mbuf = NULL; 600 sq->mbuf[pi].num_bytes = 0; 601 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 602 sq->mbuf[pi].p_refcount = &ptag->refs; 603 atomic_add_int(&ptag->refs, 1); 604 sq->pc += sq->mbuf[pi].num_wqebbs; 605 } 606 607 #define SBTLS_MBUF_NO_DATA ((struct mbuf *)1) 608 609 static struct mbuf * 610 sbtls_recover_record(struct mbuf *mb, int wait, uint32_t tcp_old, uint32_t *ptcp_seq, bool *pis_start) 611 { 612 struct mbuf *mr, *top; 613 uint32_t offset; 614 uint32_t delta; 615 616 /* check format of incoming mbuf */ 617 if (mb->m_next == NULL || 618 (mb->m_next->m_flags & (M_EXTPG | M_EXT)) != (M_EXTPG | M_EXT)) { 619 top = NULL; 620 goto done; 621 } 622 623 /* get unmapped data offset */ 624 offset = mtod(mb->m_next, uintptr_t); 625 626 /* check if we don't need to re-transmit anything */ 627 if (offset == 0) { 628 top = SBTLS_MBUF_NO_DATA; 629 *pis_start = true; 630 goto done; 631 } 632 633 /* try to get a new packet header */ 634 top = m_gethdr(wait, MT_DATA); 635 if (top == NULL) 636 goto done; 637 638 mr = m_get(wait, MT_DATA); 639 if (mr == NULL) { 640 m_free(top); 641 top = NULL; 642 goto done; 643 } 644 645 top->m_next = mr; 646 647 mb_dupcl(mr, mb->m_next); 648 649 /* the beginning of the TLS record */ 650 mr->m_data = NULL; 651 652 /* setup packet header length */ 653 top->m_pkthdr.len = mr->m_len = offset; 654 top->m_len = 0; 655 656 /* check for partial re-transmit */ 657 delta = *ptcp_seq - tcp_old; 658 659 if (delta < offset) { 660 m_adj(top, offset - delta); 661 offset = delta; 662 663 /* continue where we left off */ 664 *pis_start = false; 665 } else { 666 *pis_start = true; 667 } 668 669 /* 670 * Rewind the TCP sequence number by the amount of data 671 * retransmitted: 672 */ 673 *ptcp_seq -= offset; 674 done: 675 return (top); 676 } 677 678 static int 679 mlx5e_sq_tls_populate(struct mbuf *mb, uint64_t *pseq) 680 { 681 682 for (; mb != NULL; mb = mb->m_next) { 683 if (!(mb->m_flags & M_EXTPG)) 684 continue; 685 *pseq = mb->m_epg_seqno; 686 return (1); 687 } 688 return (0); 689 } 690 691 int 692 mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **ppmb) 693 { 694 struct mlx5e_tls_tag *ptls_tag; 695 struct m_snd_tag *ptag; 696 const struct tcphdr *th; 697 struct mbuf *mb = *ppmb; 698 u64 rcd_sn; 699 u32 header_size; 700 u32 mb_seq; 701 702 if ((mb->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0) 703 return (MLX5E_TLS_CONTINUE); 704 705 ptag = mb->m_pkthdr.snd_tag; 706 707 if ( 708 #ifdef RATELIMIT 709 ptag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT && 710 #endif 711 ptag->sw->type != IF_SND_TAG_TYPE_TLS) 712 return (MLX5E_TLS_CONTINUE); 713 714 ptls_tag = container_of(ptag, struct mlx5e_tls_tag, tag); 715 716 header_size = mlx5e_get_full_header_size(mb, &th); 717 if (unlikely(header_size == 0 || th == NULL)) 718 return (MLX5E_TLS_FAILURE); 719 720 /* 721 * Send non-TLS TCP packets AS-IS: 722 */ 723 if (header_size == mb->m_pkthdr.len || 724 mlx5e_sq_tls_populate(mb, &rcd_sn) == 0) { 725 parg->tisn = 0; 726 parg->ihs = header_size; 727 return (MLX5E_TLS_CONTINUE); 728 } 729 730 mb_seq = ntohl(th->th_seq); 731 732 MLX5E_TLS_TAG_LOCK(ptls_tag); 733 switch (ptls_tag->state) { 734 case MLX5E_TLS_ST_INIT: 735 MLX5E_TLS_TAG_UNLOCK(ptls_tag); 736 return (MLX5E_TLS_FAILURE); 737 case MLX5E_TLS_ST_SETUP: 738 ptls_tag->state = MLX5E_TLS_ST_TXRDY; 739 ptls_tag->expected_seq = ~mb_seq; /* force setup */ 740 default: 741 MLX5E_TLS_TAG_UNLOCK(ptls_tag); 742 break; 743 } 744 745 if (unlikely(ptls_tag->expected_seq != mb_seq)) { 746 bool is_start; 747 struct mbuf *r_mb; 748 uint32_t tcp_seq = mb_seq; 749 750 r_mb = sbtls_recover_record(mb, M_NOWAIT, ptls_tag->expected_seq, &tcp_seq, &is_start); 751 if (r_mb == NULL) { 752 MLX5E_TLS_STAT_INC(ptls_tag, tx_error, 1); 753 return (MLX5E_TLS_FAILURE); 754 } 755 756 MLX5E_TLS_STAT_INC(ptls_tag, tx_packets_ooo, 1); 757 758 /* check if this is the first fragment of a TLS record */ 759 if (is_start) { 760 /* setup TLS static parameters */ 761 MLX5_SET64(sw_tls_cntx, ptls_tag->crypto_params, 762 param.initial_record_number, rcd_sn); 763 764 /* 765 * NOTE: The sendqueue should have enough room to 766 * carry both the static and the progress parameters 767 * when we get here! 768 */ 769 mlx5e_tls_send_static_parameters(sq, ptls_tag); 770 mlx5e_tls_send_progress_parameters(sq, ptls_tag); 771 772 if (r_mb == SBTLS_MBUF_NO_DATA) { 773 mlx5e_tls_send_nop(sq, ptls_tag); 774 ptls_tag->expected_seq = mb_seq; 775 return (MLX5E_TLS_LOOP); 776 } 777 } 778 779 MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes_ooo, r_mb->m_pkthdr.len); 780 781 /* setup transmit arguments */ 782 parg->tisn = ptls_tag->tisn; 783 parg->pref = &ptls_tag->refs; 784 785 /* try to send DUMP data */ 786 if (mlx5e_sq_dump_xmit(sq, parg, &r_mb) != 0) { 787 m_freem(r_mb); 788 ptls_tag->expected_seq = tcp_seq; 789 return (MLX5E_TLS_FAILURE); 790 } else { 791 ptls_tag->expected_seq = mb_seq; 792 return (MLX5E_TLS_LOOP); 793 } 794 } else { 795 MLX5E_TLS_STAT_INC(ptls_tag, tx_packets, 1); 796 MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes, mb->m_pkthdr.len); 797 } 798 ptls_tag->expected_seq += mb->m_pkthdr.len - header_size; 799 800 parg->tisn = ptls_tag->tisn; 801 parg->ihs = header_size; 802 parg->pref = &ptls_tag->refs; 803 return (MLX5E_TLS_CONTINUE); 804 } 805 806 #else 807 808 int 809 mlx5e_tls_init(struct mlx5e_priv *priv) 810 { 811 812 return (0); 813 } 814 815 void 816 mlx5e_tls_cleanup(struct mlx5e_priv *priv) 817 { 818 /* NOP */ 819 } 820 821 #endif /* KERN_TLS */ 822