xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c (revision 5ab1c5846ff41be24b1f6beb0317bf8258cd4409)
1 /*-
2  * Copyright (c) 2019 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_kern_tls.h"
29 
30 #include "en.h"
31 
32 #include <dev/mlx5/tls.h>
33 
34 #include <linux/delay.h>
35 #include <sys/ktls.h>
36 #include <opencrypto/cryptodev.h>
37 
38 #ifdef KERN_TLS
39 
40 MALLOC_DEFINE(M_MLX5E_TLS, "MLX5E_TLS", "MLX5 ethernet HW TLS");
41 
42 /* software TLS context */
43 struct mlx5_ifc_sw_tls_cntx_bits {
44 	struct mlx5_ifc_tls_static_params_bits param;
45 	struct mlx5_ifc_tls_progress_params_bits progress;
46 	struct {
47 		uint8_t key_data[8][0x20];
48 		uint8_t key_len[0x20];
49 	} key;
50 };
51 
52 CTASSERT(MLX5_ST_SZ_BYTES(sw_tls_cntx) <= sizeof(((struct mlx5e_tls_tag *)0)->crypto_params));
53 CTASSERT(MLX5_ST_SZ_BYTES(mkc) == sizeof(((struct mlx5e_tx_umr_wqe *)0)->mkc));
54 
55 static const char *mlx5e_tls_stats_desc[] = {
56 	MLX5E_TLS_STATS(MLX5E_STATS_DESC)
57 };
58 
59 static void mlx5e_tls_work(struct work_struct *);
60 
61 static int
62 mlx5e_tls_tag_zinit(void *mem, int size, int flags)
63 {
64 	struct mlx5e_tls_tag *ptag = mem;
65 
66 	MPASS(size == sizeof(*ptag));
67 
68 	memset(ptag, 0, sizeof(*ptag));
69 	mtx_init(&ptag->mtx, "mlx5-tls-tag-mtx", NULL, MTX_DEF);
70 	INIT_WORK(&ptag->work, mlx5e_tls_work);
71 
72 	return (0);
73 }
74 
75 static void
76 mlx5e_tls_tag_zfini(void *mem, int size)
77 {
78 	struct mlx5e_tls_tag *ptag = mem;
79 	struct mlx5e_priv *priv;
80 	struct mlx5e_tls *ptls;
81 
82 	ptls = ptag->tls;
83 	priv = container_of(ptls, struct mlx5e_priv, tls);
84 
85 	flush_work(&ptag->work);
86 
87 	if (ptag->tisn != 0) {
88 		mlx5_tls_close_tis(priv->mdev, ptag->tisn);
89 		atomic_add_32(&ptls->num_resources, -1U);
90 	}
91 
92 	mtx_destroy(&ptag->mtx);
93 }
94 
95 static void
96 mlx5e_tls_tag_zfree(struct mlx5e_tls_tag *ptag)
97 {
98 
99 	/* reset some variables */
100 	ptag->state = MLX5E_TLS_ST_INIT;
101 	ptag->dek_index = 0;
102 	ptag->dek_index_ok = 0;
103 
104 	/* avoid leaking keys */
105 	memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params));
106 
107 	/* update number of TIS contexts */
108 	if (ptag->tisn == 0)
109 		atomic_add_32(&ptag->tls->num_resources, -1U);
110 
111 	/* return tag to UMA */
112 	uma_zfree(ptag->tls->zone, ptag);
113 }
114 
115 int
116 mlx5e_tls_init(struct mlx5e_priv *priv)
117 {
118 	struct mlx5e_tls *ptls = &priv->tls;
119 	struct sysctl_oid *node;
120 	uint32_t x;
121 
122 	if (MLX5_CAP_GEN(priv->mdev, tls) == 0)
123 		return (0);
124 
125 	ptls->wq = create_singlethread_workqueue("mlx5-tls-wq");
126 	if (ptls->wq == NULL)
127 		return (ENOMEM);
128 
129 	sysctl_ctx_init(&ptls->ctx);
130 
131 	snprintf(ptls->zname, sizeof(ptls->zname),
132 	    "mlx5_%u_tls", device_get_unit(priv->mdev->pdev->dev.bsddev));
133 
134 	ptls->zone = uma_zcreate(ptls->zname, sizeof(struct mlx5e_tls_tag),
135 	    NULL, NULL, mlx5e_tls_tag_zinit, mlx5e_tls_tag_zfini, UMA_ALIGN_CACHE, 0);
136 
137 	ptls->max_resources = 1U << MLX5_CAP_GEN(priv->mdev, log_max_dek);
138 
139 	for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
140 		ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK);
141 
142 	ptls->init = 1;
143 
144 	node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
145 	    SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
146 	    "tls", CTLFLAG_RW, NULL, "Hardware TLS offload");
147 	if (node == NULL)
148 		return (0);
149 
150 	mlx5e_create_counter_stats(&ptls->ctx,
151 	    SYSCTL_CHILDREN(node), "stats",
152 	    mlx5e_tls_stats_desc, MLX5E_TLS_STATS_NUM,
153 	    ptls->stats.arg);
154 
155 	return (0);
156 }
157 
158 void
159 mlx5e_tls_cleanup(struct mlx5e_priv *priv)
160 {
161 	struct mlx5e_tls *ptls = &priv->tls;
162 	uint32_t x;
163 
164 	if (MLX5_CAP_GEN(priv->mdev, tls) == 0)
165 		return;
166 
167 	ptls->init = 0;
168 	flush_workqueue(ptls->wq);
169 	sysctl_ctx_free(&ptls->ctx);
170 	uma_zdestroy(ptls->zone);
171 	destroy_workqueue(ptls->wq);
172 
173 	/* check if all resources are freed */
174 	MPASS(priv->tls.num_resources == 0);
175 
176 	for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
177 		counter_u64_free(ptls->stats.arg[x]);
178 }
179 
180 static void
181 mlx5e_tls_work(struct work_struct *work)
182 {
183 	struct mlx5e_tls_tag *ptag;
184 	struct mlx5e_priv *priv;
185 	int err;
186 
187 	ptag = container_of(work, struct mlx5e_tls_tag, work);
188 	priv = container_of(ptag->tls, struct mlx5e_priv, tls);
189 
190 	switch (ptag->state) {
191 	case MLX5E_TLS_ST_SETUP:
192 		/* try to open TIS, if not present */
193 		if (ptag->tisn == 0) {
194 			err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn,
195 			    priv->pdn, &ptag->tisn);
196 			if (err) {
197 				MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
198 				break;
199 			}
200 		}
201 		MLX5_SET(sw_tls_cntx, ptag->crypto_params, progress.pd, ptag->tisn);
202 
203 		/* try to allocate a DEK context ID */
204 		err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
205 		    MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data),
206 		    MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len),
207 		    &ptag->dek_index);
208 		if (err) {
209 			MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
210 			break;
211 		}
212 
213 		MLX5_SET(sw_tls_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index);
214 
215 		ptag->dek_index_ok = 1;
216 
217 		MLX5E_TLS_TAG_LOCK(ptag);
218 		if (ptag->state == MLX5E_TLS_ST_SETUP)
219 			ptag->state = MLX5E_TLS_ST_TXRDY;
220 		MLX5E_TLS_TAG_UNLOCK(ptag);
221 		break;
222 
223 	case MLX5E_TLS_ST_FREED:
224 		/* wait for all refs to go away */
225 		while (ptag->refs != 0)
226 			msleep(1);
227 
228 		/* try to destroy DEK context by ID */
229 		if (ptag->dek_index_ok)
230 			err = mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
231 
232 		/* free tag */
233 		mlx5e_tls_tag_zfree(ptag);
234 		break;
235 
236 	default:
237 		break;
238 	}
239 }
240 
241 static int
242 mlx5e_tls_set_params(void *ctx, const struct tls_session_params *en)
243 {
244 
245 	MLX5_SET(sw_tls_cntx, ctx, param.const_2, 2);
246 	if (en->tls_vminor == TLS_MINOR_VER_TWO)
247 		MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 2); /* v1.2 */
248 	else
249 		MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 3); /* v1.3 */
250 	MLX5_SET(sw_tls_cntx, ctx, param.const_1, 1);
251 	MLX5_SET(sw_tls_cntx, ctx, param.encryption_standard, 1); /* TLS */
252 
253 	/* copy the initial vector in place */
254 	if (en->iv_len == MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv)) {
255 		memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, param.gcm_iv),
256 		    en->iv, MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv));
257 	} else if (en->iv_len == (MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv) +
258 				  MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.implicit_iv))) {
259 		memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, param.gcm_iv),
260 		    (char *)en->iv + MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.implicit_iv),
261 		    MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv));
262 		memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, param.implicit_iv),
263 		    en->iv,
264 		    MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.implicit_iv));
265 	} else {
266 		return (EINVAL);
267 	}
268 
269 	if (en->cipher_key_len <= MLX5_FLD_SZ_BYTES(sw_tls_cntx, key.key_data)) {
270 		memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, key.key_data),
271 		    en->cipher_key, en->cipher_key_len);
272 		MLX5_SET(sw_tls_cntx, ctx, key.key_len, en->cipher_key_len);
273 	} else {
274 		return (EINVAL);
275 	}
276 	return (0);
277 }
278 
279 /* Verify zero default */
280 CTASSERT(MLX5E_TLS_ST_INIT == 0);
281 
282 int
283 mlx5e_tls_snd_tag_alloc(struct ifnet *ifp,
284     union if_snd_tag_alloc_params *params,
285     struct m_snd_tag **ppmt)
286 {
287 	struct if_snd_tag_alloc_rate_limit rl_params;
288 	struct mlx5e_priv *priv;
289 	struct mlx5e_tls_tag *ptag;
290 	const struct tls_session_params *en;
291 	int error;
292 
293 	priv = ifp->if_softc;
294 
295 	if (priv->tls.init == 0)
296 		return (EOPNOTSUPP);
297 
298 	/* allocate new tag from zone, if any */
299 	ptag = uma_zalloc(priv->tls.zone, M_NOWAIT);
300 	if (ptag == NULL)
301 		return (ENOMEM);
302 
303 	/* sanity check default values */
304 	MPASS(ptag->state == MLX5E_TLS_ST_INIT);
305 	MPASS(ptag->dek_index == 0);
306 	MPASS(ptag->dek_index_ok == 0);
307 
308 	/* setup TLS tag */
309 	ptag->tls = &priv->tls;
310 	ptag->tag.type = params->hdr.type;
311 
312 	/* check if there is no TIS context */
313 	if (ptag->tisn == 0) {
314 		uint32_t value;
315 
316 		value = atomic_fetchadd_32(&priv->tls.num_resources, 1U);
317 
318 		/* check resource limits */
319 		if (value >= priv->tls.max_resources) {
320 			error = ENOMEM;
321 			goto failure;
322 		}
323 	}
324 
325 	en = &params->tls.tls->params;
326 
327 	/* only TLS v1.2 and v1.3 is currently supported */
328 	if (en->tls_vmajor != TLS_MAJOR_VER_ONE ||
329 	    (en->tls_vminor != TLS_MINOR_VER_TWO
330 #ifdef TLS_MINOR_VER_THREE
331 	     && en->tls_vminor != TLS_MINOR_VER_THREE
332 #endif
333 	     )) {
334 		error = EPROTONOSUPPORT;
335 		goto failure;
336 	}
337 
338 	switch (en->cipher_algorithm) {
339 	case CRYPTO_AES_NIST_GCM_16:
340 		switch (en->cipher_key_len) {
341 		case 128 / 8:
342 			if (en->auth_algorithm != CRYPTO_AES_128_NIST_GMAC) {
343 				error = EINVAL;
344 				goto failure;
345 			}
346 			if (en->tls_vminor == TLS_MINOR_VER_TWO) {
347 				if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) {
348 					error = EPROTONOSUPPORT;
349 					goto failure;
350 				}
351 			} else {
352 				if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_128) == 0) {
353 					error = EPROTONOSUPPORT;
354 					goto failure;
355 				}
356 			}
357 			error = mlx5e_tls_set_params(ptag->crypto_params, en);
358 			if (error)
359 				goto failure;
360 			break;
361 
362 		case 256 / 8:
363 			if (en->auth_algorithm != CRYPTO_AES_256_NIST_GMAC) {
364 				error = EINVAL;
365 				goto failure;
366 			}
367 			if (en->tls_vminor == TLS_MINOR_VER_TWO) {
368 				if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) {
369 					error = EPROTONOSUPPORT;
370 					goto failure;
371 				}
372 			} else {
373 				if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_256) == 0) {
374 					error = EPROTONOSUPPORT;
375 					goto failure;
376 				}
377 			}
378 			error = mlx5e_tls_set_params(ptag->crypto_params, en);
379 			if (error)
380 				goto failure;
381 			break;
382 
383 		default:
384 			error = EINVAL;
385 			goto failure;
386 		}
387 		break;
388 	default:
389 		error = EPROTONOSUPPORT;
390 		goto failure;
391 	}
392 
393 	switch (ptag->tag.type) {
394 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
395 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
396 		memset(&rl_params, 0, sizeof(rl_params));
397 		rl_params.hdr = params->tls_rate_limit.hdr;
398 		rl_params.hdr.type = IF_SND_TAG_TYPE_RATE_LIMIT;
399 		rl_params.max_rate = params->tls_rate_limit.max_rate;
400 
401 		error = mlx5e_rl_snd_tag_alloc(ifp,
402 		    container_of(&rl_params, union if_snd_tag_alloc_params, rate_limit),
403 		    &ptag->rl_tag);
404 		if (error)
405 			goto failure;
406 		break;
407 #endif
408 	case IF_SND_TAG_TYPE_TLS:
409 		memset(&rl_params, 0, sizeof(rl_params));
410 		rl_params.hdr = params->tls.hdr;
411 		rl_params.hdr.type = IF_SND_TAG_TYPE_UNLIMITED;
412 
413 		error = mlx5e_ul_snd_tag_alloc(ifp,
414 		    container_of(&rl_params, union if_snd_tag_alloc_params, unlimited),
415 		    &ptag->rl_tag);
416 		if (error)
417 			goto failure;
418 		break;
419 	default:
420 		error = EOPNOTSUPP;
421 		goto failure;
422 	}
423 
424 	/* store pointer to mbuf tag */
425 	MPASS(ptag->tag.m_snd_tag.refcount == 0);
426 	m_snd_tag_init(&ptag->tag.m_snd_tag, ifp);
427 	*ppmt = &ptag->tag.m_snd_tag;
428 	return (0);
429 
430 failure:
431 	mlx5e_tls_tag_zfree(ptag);
432 	return (error);
433 }
434 
435 int
436 mlx5e_tls_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
437 {
438 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
439 	struct if_snd_tag_rate_limit_params rl_params;
440 	int error;
441 #endif
442 	struct mlx5e_tls_tag *ptag =
443 	    container_of(pmt, struct mlx5e_tls_tag, tag.m_snd_tag);
444 
445 	switch (ptag->tag.type) {
446 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
447 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
448 		memset(&rl_params, 0, sizeof(rl_params));
449 		rl_params.max_rate = params->tls_rate_limit.max_rate;
450 		error = mlx5e_rl_snd_tag_modify(ptag->rl_tag,
451 		    container_of(&rl_params, union if_snd_tag_modify_params, rate_limit));
452 		return (error);
453 #endif
454 	default:
455 		return (EOPNOTSUPP);
456 	}
457 }
458 
459 int
460 mlx5e_tls_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
461 {
462 	struct mlx5e_tls_tag *ptag =
463 	    container_of(pmt, struct mlx5e_tls_tag, tag.m_snd_tag);
464 	int error;
465 
466 	switch (ptag->tag.type) {
467 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
468 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
469 		error = mlx5e_rl_snd_tag_query(ptag->rl_tag, params);
470 		break;
471 #endif
472 	case IF_SND_TAG_TYPE_TLS:
473 		error = mlx5e_ul_snd_tag_query(ptag->rl_tag, params);
474 		break;
475 	default:
476 		error = EOPNOTSUPP;
477 		break;
478 	}
479 	return (error);
480 }
481 
482 void
483 mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt)
484 {
485 	struct mlx5e_tls_tag *ptag =
486 	    container_of(pmt, struct mlx5e_tls_tag, tag.m_snd_tag);
487 	struct mlx5e_priv *priv;
488 
489 	switch (ptag->tag.type) {
490 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
491 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
492 		mlx5e_rl_snd_tag_free(ptag->rl_tag);
493 		break;
494 #endif
495 	case IF_SND_TAG_TYPE_TLS:
496 		mlx5e_ul_snd_tag_free(ptag->rl_tag);
497 		break;
498 	default:
499 		break;
500 	}
501 
502 	MLX5E_TLS_TAG_LOCK(ptag);
503 	ptag->state = MLX5E_TLS_ST_FREED;
504 	MLX5E_TLS_TAG_UNLOCK(ptag);
505 
506 	priv = ptag->tag.m_snd_tag.ifp->if_softc;
507 	queue_work(priv->tls.wq, &ptag->work);
508 }
509 
510 CTASSERT((MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) % 16) == 0);
511 
512 static void
513 mlx5e_tls_send_static_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
514 {
515 	const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) +
516 	    MLX5_FLD_SZ_BYTES(sw_tls_cntx, param), MLX5_SEND_WQE_DS);
517 	struct mlx5e_tx_umr_wqe *wqe;
518 	u16 pi;
519 
520 	pi = sq->pc & sq->wq.sz_m1;
521 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
522 
523 	memset(wqe, 0, sizeof(*wqe));
524 
525 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) |
526 	    MLX5_OPCODE_UMR | (MLX5_OPCODE_MOD_UMR_TLS_TIS_STATIC_PARAMS << 24));
527 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
528 	wqe->ctrl.imm = cpu_to_be32(ptag->tisn << 8);
529 
530 	if (mlx5e_do_send_cqe(sq))
531 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
532 	else
533 		wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
534 
535 	/* fill out UMR control segment */
536 	wqe->umr.flags = 0x80;	/* inline data */
537 	wqe->umr.bsf_octowords = cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) / 16);
538 
539 	/* copy in the static crypto parameters */
540 	memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, param),
541 	    MLX5_FLD_SZ_BYTES(sw_tls_cntx, param));
542 
543 	/* copy data for doorbell */
544 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
545 
546 	sq->mbuf[pi].mbuf = NULL;
547 	sq->mbuf[pi].num_bytes = 0;
548 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
549 	sq->mbuf[pi].p_refcount = &ptag->refs;
550 	atomic_add_int(&ptag->refs, 1);
551 	sq->pc += sq->mbuf[pi].num_wqebbs;
552 }
553 
554 CTASSERT(MLX5_FLD_SZ_BYTES(sw_tls_cntx, progress) ==
555     sizeof(((struct mlx5e_tx_psv_wqe *)0)->psv));
556 
557 static void
558 mlx5e_tls_send_progress_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
559 {
560 	const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe),
561 	    MLX5_SEND_WQE_DS);
562 	struct mlx5e_tx_psv_wqe *wqe;
563 	u16 pi;
564 
565 	pi = sq->pc & sq->wq.sz_m1;
566 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
567 
568 	memset(wqe, 0, sizeof(*wqe));
569 
570 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) |
571 	    MLX5_OPCODE_SET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIS_PROGRESS_PARAMS << 24));
572 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
573 
574 	if (mlx5e_do_send_cqe(sq))
575 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
576 	else
577 		wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
578 
579 	/* copy in the PSV control segment */
580 	memcpy(&wqe->psv, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, progress),
581 	    sizeof(wqe->psv));
582 
583 	/* copy data for doorbell */
584 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
585 
586 	sq->mbuf[pi].mbuf = NULL;
587 	sq->mbuf[pi].num_bytes = 0;
588 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
589 	sq->mbuf[pi].p_refcount = &ptag->refs;
590 	atomic_add_int(&ptag->refs, 1);
591 	sq->pc += sq->mbuf[pi].num_wqebbs;
592 }
593 
594 static void
595 mlx5e_tls_send_nop(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
596 {
597 	const u32 ds_cnt = MLX5_SEND_WQEBB_NUM_DS;
598 	struct mlx5e_tx_wqe *wqe;
599 	u16 pi;
600 
601 	pi = sq->pc & sq->wq.sz_m1;
602 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
603 
604 	memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
605 
606 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
607 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
608 	if (mlx5e_do_send_cqe(sq))
609 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
610 	else
611 		wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
612 
613 	/* Copy data for doorbell */
614 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
615 
616 	sq->mbuf[pi].mbuf = NULL;
617 	sq->mbuf[pi].num_bytes = 0;
618 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
619 	sq->mbuf[pi].p_refcount = &ptag->refs;
620 	atomic_add_int(&ptag->refs, 1);
621 	sq->pc += sq->mbuf[pi].num_wqebbs;
622 }
623 
624 #define	SBTLS_MBUF_NO_DATA ((struct mbuf *)1)
625 
626 static struct mbuf *
627 sbtls_recover_record(struct mbuf *mb, int wait, uint32_t tcp_old, uint32_t *ptcp_seq)
628 {
629 	struct mbuf *mr;
630 	uint32_t offset;
631 	uint32_t delta;
632 
633 	/* check format of incoming mbuf */
634 	if (mb->m_next == NULL ||
635 	    (mb->m_next->m_flags & (M_NOMAP | M_EXT)) != (M_NOMAP | M_EXT) ||
636 	    mb->m_next->m_ext.ext_buf == NULL) {
637 		mr = NULL;
638 		goto done;
639 	}
640 
641 	/* get unmapped data offset */
642 	offset = mtod(mb->m_next, uintptr_t);
643 
644 	/* check if we don't need to re-transmit anything */
645 	if (offset == 0) {
646 		mr = SBTLS_MBUF_NO_DATA;
647 		goto done;
648 	}
649 
650 	/* try to get a new mbufs with packet header */
651 	mr = m_gethdr(wait, MT_DATA);
652 	if (mr == NULL)
653 		goto done;
654 
655 	mb_dupcl(mr, mb->m_next);
656 
657 	/* the beginning of the TLS record */
658 	mr->m_data = NULL;
659 
660 	/* setup packet header length */
661 	mr->m_pkthdr.len = mr->m_len = offset;
662 
663 	/* check for partial re-transmit */
664 	delta = *ptcp_seq - tcp_old;
665 
666 	if (delta < offset) {
667 		m_adj(mr, offset - delta);
668 		offset = delta;
669 	}
670 
671 	/*
672 	 * Rewind the TCP sequence number by the amount of data
673 	 * retransmitted:
674 	 */
675 	*ptcp_seq -= offset;
676 done:
677 	return (mr);
678 }
679 
680 static int
681 mlx5e_sq_tls_populate(struct mbuf *mb, uint64_t *pseq)
682 {
683 	struct mbuf_ext_pgs *ext_pgs;
684 
685 	for (; mb != NULL; mb = mb->m_next) {
686 		if (!(mb->m_flags & M_NOMAP))
687 			continue;
688 		ext_pgs = (void *)mb->m_ext.ext_buf;
689 		*pseq = ext_pgs->seqno;
690 		return (1);
691 	}
692 	return (0);
693 }
694 
695 int
696 mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **ppmb)
697 {
698 	struct mlx5e_tls_tag *ptls_tag;
699 	struct mlx5e_snd_tag *ptag;
700 	struct tcphdr *th;
701 	struct mbuf *mb = *ppmb;
702 	u64 rcd_sn;
703 	u32 header_size;
704 	u32 mb_seq;
705 
706 	if ((mb->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0)
707 		return (MLX5E_TLS_CONTINUE);
708 
709 	ptag = container_of(mb->m_pkthdr.snd_tag,
710 	    struct mlx5e_snd_tag, m_snd_tag);
711 
712 	if (
713 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
714 	    ptag->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT &&
715 #endif
716 	    ptag->type != IF_SND_TAG_TYPE_TLS)
717 		return (MLX5E_TLS_CONTINUE);
718 
719 	ptls_tag = container_of(ptag, struct mlx5e_tls_tag, tag);
720 
721 	header_size = mlx5e_get_full_header_size(mb, &th);
722 	if (unlikely(header_size == 0 || th == NULL))
723 		return (MLX5E_TLS_FAILURE);
724 
725 	/*
726 	 * Send non-TLS TCP packets AS-IS:
727 	 */
728 	if (header_size == mb->m_pkthdr.len ||
729 	    mlx5e_sq_tls_populate(mb, &rcd_sn) == 0) {
730 		parg->tisn = 0;
731 		parg->ihs = header_size;
732 		return (MLX5E_TLS_CONTINUE);
733 	}
734 
735 	mb_seq = ntohl(th->th_seq);
736 
737 	MLX5E_TLS_TAG_LOCK(ptls_tag);
738 	switch (ptls_tag->state) {
739 	case MLX5E_TLS_ST_INIT:
740 		queue_work(sq->priv->tls.wq, &ptls_tag->work);
741 		ptls_tag->state = MLX5E_TLS_ST_SETUP;
742 		ptls_tag->expected_seq = ~mb_seq;	/* force setup */
743 		MLX5E_TLS_TAG_UNLOCK(ptls_tag);
744 		return (MLX5E_TLS_FAILURE);
745 
746 	case MLX5E_TLS_ST_SETUP:
747 		MLX5E_TLS_TAG_UNLOCK(ptls_tag);
748 		return (MLX5E_TLS_FAILURE);
749 
750 	default:
751 		MLX5E_TLS_TAG_UNLOCK(ptls_tag);
752 		break;
753 	}
754 
755 	if (unlikely(ptls_tag->expected_seq != mb_seq)) {
756 		struct mbuf *r_mb;
757 		uint32_t tcp_seq = mb_seq;
758 
759 		r_mb = sbtls_recover_record(mb, M_NOWAIT, ptls_tag->expected_seq, &tcp_seq);
760 		if (r_mb == NULL) {
761 			MLX5E_TLS_STAT_INC(ptls_tag, tx_error, 1);
762 			return (MLX5E_TLS_FAILURE);
763 		}
764 
765 		MLX5E_TLS_STAT_INC(ptls_tag, tx_packets_ooo, 1);
766 
767 		/* check if this is the first fragment of a TLS record */
768 		if (r_mb == SBTLS_MBUF_NO_DATA || r_mb->m_data == NULL) {
769 			/* setup TLS static parameters */
770 			MLX5_SET64(sw_tls_cntx, ptls_tag->crypto_params,
771 			    param.initial_record_number, rcd_sn);
772 
773 			/* setup TLS progress parameters */
774 			MLX5_SET(sw_tls_cntx, ptls_tag->crypto_params,
775 			    progress.next_record_tcp_sn, tcp_seq);
776 
777 			/*
778 			 * NOTE: The sendqueue should have enough room to
779 			 * carry both the static and the progress parameters
780 			 * when we get here!
781 			 */
782 			mlx5e_tls_send_static_parameters(sq, ptls_tag);
783 			mlx5e_tls_send_progress_parameters(sq, ptls_tag);
784 
785 			if (r_mb == SBTLS_MBUF_NO_DATA) {
786 				mlx5e_tls_send_nop(sq, ptls_tag);
787 				ptls_tag->expected_seq = mb_seq;
788 				return (MLX5E_TLS_LOOP);
789 			}
790 		}
791 
792 		MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes_ooo, r_mb->m_pkthdr.len);
793 
794 		/* setup transmit arguments */
795 		parg->tisn = ptls_tag->tisn;
796 		parg->pref = &ptls_tag->refs;
797 
798 		/* try to send DUMP data */
799 		if (mlx5e_sq_dump_xmit(sq, parg, &r_mb) != 0) {
800 			m_freem(r_mb);
801 			ptls_tag->expected_seq = tcp_seq;
802 			return (MLX5E_TLS_FAILURE);
803 		} else {
804 			ptls_tag->expected_seq = mb_seq;
805 			return (MLX5E_TLS_LOOP);
806 		}
807 	} else {
808 		MLX5E_TLS_STAT_INC(ptls_tag, tx_packets, 1);
809 		MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes, mb->m_pkthdr.len);
810 	}
811 	ptls_tag->expected_seq += mb->m_pkthdr.len - header_size;
812 
813 	parg->tisn = ptls_tag->tisn;
814 	parg->ihs = header_size;
815 	parg->pref = &ptls_tag->refs;
816 	return (MLX5E_TLS_CONTINUE);
817 }
818 
819 #else
820 
821 int
822 mlx5e_tls_init(struct mlx5e_priv *priv)
823 {
824 
825 	return (0);
826 }
827 
828 void
829 mlx5e_tls_cleanup(struct mlx5e_priv *priv)
830 {
831 	/* NOP */
832 }
833 
834 #endif		/* KERN_TLS */
835