xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c (revision 67f93079075be4a2f5b7198fb3bda6d10bbd54e7)
1 /*-
2  * Copyright (c) 2019-2021 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2022 NVIDIA corporation & affiliates.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include "opt_kern_tls.h"
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30 
31 #include <dev/mlx5/mlx5_en/en.h>
32 
33 #include <dev/mlx5/tls.h>
34 #include <dev/mlx5/crypto.h>
35 
36 #include <linux/delay.h>
37 #include <sys/ktls.h>
38 #include <opencrypto/cryptodev.h>
39 
40 #ifdef KERN_TLS
41 
42 #ifdef RATELIMIT
43 static if_snd_tag_modify_t mlx5e_tls_rl_snd_tag_modify;
44 #endif
45 static if_snd_tag_query_t mlx5e_tls_snd_tag_query;
46 static if_snd_tag_free_t mlx5e_tls_snd_tag_free;
47 
48 static const struct if_snd_tag_sw mlx5e_tls_snd_tag_sw = {
49 	.snd_tag_query = mlx5e_tls_snd_tag_query,
50 	.snd_tag_free = mlx5e_tls_snd_tag_free,
51 	.type = IF_SND_TAG_TYPE_TLS
52 };
53 
54 #ifdef RATELIMIT
55 static const struct if_snd_tag_sw mlx5e_tls_rl_snd_tag_sw = {
56 	.snd_tag_modify = mlx5e_tls_rl_snd_tag_modify,
57 	.snd_tag_query = mlx5e_tls_snd_tag_query,
58 	.snd_tag_free = mlx5e_tls_snd_tag_free,
59 	.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT
60 };
61 #endif
62 
63 MALLOC_DEFINE(M_MLX5E_TLS, "MLX5E_TLS", "MLX5 ethernet HW TLS");
64 
65 /* software TLS context */
66 struct mlx5_ifc_sw_tls_cntx_bits {
67 	struct mlx5_ifc_tls_static_params_bits param;
68 	struct mlx5_ifc_tls_progress_params_bits progress;
69 	struct {
70 		uint8_t key_data[8][0x20];
71 		uint8_t key_len[0x20];
72 	} key;
73 };
74 
75 CTASSERT(MLX5_ST_SZ_BYTES(sw_tls_cntx) <= sizeof(((struct mlx5e_tls_tag *)0)->crypto_params));
76 CTASSERT(MLX5_ST_SZ_BYTES(mkc) == sizeof(((struct mlx5e_tx_umr_wqe *)0)->mkc));
77 
78 static const char *mlx5e_tls_stats_desc[] = {
79 	MLX5E_TLS_STATS(MLX5E_STATS_DESC)
80 };
81 
82 static void mlx5e_tls_work(struct work_struct *);
83 
84 /*
85  * Expand the tls tag UMA zone in a sleepable context
86  */
87 
88 static void
mlx5e_prealloc_tags(struct mlx5e_priv * priv,int nitems)89 mlx5e_prealloc_tags(struct mlx5e_priv *priv, int nitems)
90 {
91 	struct mlx5e_tls_tag **tags;
92 	int i;
93 
94 	tags = malloc(sizeof(tags[0]) * nitems,
95 	    M_MLX5E_TLS, M_WAITOK);
96 	for (i = 0; i < nitems; i++)
97 		tags[i] = uma_zalloc(priv->tls.zone, M_WAITOK);
98 	__compiler_membar();
99 	for (i = 0; i < nitems; i++)
100 		uma_zfree(priv->tls.zone, tags[i]);
101 	free(tags, M_MLX5E_TLS);
102 }
103 
104 static int
mlx5e_tls_tag_import(void * arg,void ** store,int cnt,int domain,int flags)105 mlx5e_tls_tag_import(void *arg, void **store, int cnt, int domain, int flags)
106 {
107 	struct mlx5e_tls_tag *ptag;
108 	struct mlx5e_priv *priv = arg;
109 	int err, i;
110 
111 	/*
112 	 * mlx5_tls_open_tis() sleeps on a firmware command, so
113 	 * zone allocations must be done from a sleepable context.
114 	 * Note that the uma_zalloc() in mlx5e_tls_snd_tag_alloc()
115 	 * is done with M_NOWAIT so that hitting the zone limit does
116 	 * not cause the allocation to pause forever.
117 	 */
118 
119 	for (i = 0; i != cnt; i++) {
120 		ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS,
121 		    mlx5_dev_domainset(arg), flags | M_ZERO);
122 		if (ptag == NULL)
123 			return (i);
124 		ptag->tls = &priv->tls;
125 		mtx_init(&ptag->mtx, "mlx5-tls-tag-mtx", NULL, MTX_DEF);
126 		INIT_WORK(&ptag->work, mlx5e_tls_work);
127 		err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn,
128 		    priv->pdn, &ptag->tisn);
129 		if (err) {
130 			MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
131 			free(ptag, M_MLX5E_TLS);
132 			return (i);
133 		}
134 
135 		store[i] = ptag;
136 	}
137 	return (i);
138 }
139 
140 static void
mlx5e_tls_tag_release(void * arg,void ** store,int cnt)141 mlx5e_tls_tag_release(void *arg, void **store, int cnt)
142 {
143 	struct mlx5e_tls_tag *ptag;
144 	struct mlx5e_priv *priv;
145 	struct mlx5e_tls *ptls;
146 	int i;
147 
148 	for (i = 0; i != cnt; i++) {
149 		ptag = store[i];
150 		ptls = ptag->tls;
151 		priv = container_of(ptls, struct mlx5e_priv, tls);
152 
153 		flush_work(&ptag->work);
154 
155 		if (ptag->tisn != 0) {
156 			mlx5_tls_close_tis(priv->mdev, ptag->tisn);
157 		}
158 
159 		mtx_destroy(&ptag->mtx);
160 
161 		free(ptag, M_MLX5E_TLS);
162 	}
163 }
164 
165 static void
mlx5e_tls_tag_zfree(struct mlx5e_tls_tag * ptag)166 mlx5e_tls_tag_zfree(struct mlx5e_tls_tag *ptag)
167 {
168 	/* make sure any unhandled taskqueue events are ignored */
169 	ptag->state = MLX5E_TLS_ST_FREED;
170 
171 	/* reset some variables */
172 	ptag->dek_index = 0;
173 	ptag->dek_index_ok = 0;
174 
175 	/* avoid leaking keys */
176 	memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params));
177 
178 	/* return tag to UMA */
179 	uma_zfree(ptag->tls->zone, ptag);
180 }
181 
182 static int
mlx5e_max_tag_proc(SYSCTL_HANDLER_ARGS)183 mlx5e_max_tag_proc(SYSCTL_HANDLER_ARGS)
184 {
185 	struct mlx5e_priv *priv = (struct mlx5e_priv *)arg1;
186 	struct mlx5e_tls *ptls = &priv->tls;
187 	int err;
188 	unsigned int max_tags;
189 
190 	max_tags = ptls->zone_max;
191 	err = sysctl_handle_int(oidp, &max_tags, arg2, req);
192 	if (err != 0 || req->newptr == NULL )
193 		return err;
194 	if (max_tags == ptls->zone_max)
195 		return 0;
196 	if (max_tags > priv->tls.max_resources || max_tags == 0)
197 		return (EINVAL);
198 	ptls->zone_max = max_tags;
199 	uma_zone_set_max(ptls->zone, ptls->zone_max);
200 	return 0;
201 }
202 
203 int
mlx5e_tls_init(struct mlx5e_priv * priv)204 mlx5e_tls_init(struct mlx5e_priv *priv)
205 {
206 	struct mlx5e_tls *ptls = &priv->tls;
207 	struct sysctl_oid *node;
208 	uint32_t max_dek, max_tis, x;
209 	int zone_max = 0, prealloc_tags = 0;
210 
211 	if (MLX5_CAP_GEN(priv->mdev, tls_tx) == 0 ||
212 	    MLX5_CAP_GEN(priv->mdev, log_max_dek) == 0)
213 		return (0);
214 
215 	ptls->wq = create_singlethread_workqueue("mlx5-tls-wq");
216 	if (ptls->wq == NULL)
217 		return (ENOMEM);
218 
219 	sysctl_ctx_init(&ptls->ctx);
220 
221 	snprintf(ptls->zname, sizeof(ptls->zname),
222 	    "mlx5_%u_tls", device_get_unit(priv->mdev->pdev->dev.bsddev));
223 
224 
225 	TUNABLE_INT_FETCH("hw.mlx5.tls_max_tags", &zone_max);
226 	TUNABLE_INT_FETCH("hw.mlx5.tls_prealloc_tags", &prealloc_tags);
227 
228 	ptls->zone = uma_zcache_create(ptls->zname,
229 	     sizeof(struct mlx5e_tls_tag), NULL, NULL, NULL, NULL,
230 	     mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv,
231 	     UMA_ZONE_UNMANAGED | (prealloc_tags ? UMA_ZONE_NOFREE : 0));
232 
233 	/* shared between RX and TX TLS */
234 	max_dek = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1);
235 	max_tis = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_tis) - 1);
236 	ptls->max_resources = MIN(max_dek, max_tis);
237 
238 	if (zone_max != 0) {
239 		ptls->zone_max = zone_max;
240 		if (ptls->zone_max > priv->tls.max_resources)
241 			ptls->zone_max = priv->tls.max_resources;
242 	} else {
243 		ptls->zone_max = priv->tls.max_resources;
244 	}
245 
246 	uma_zone_set_max(ptls->zone, ptls->zone_max);
247 	if (prealloc_tags != 0)
248 		mlx5e_prealloc_tags(priv, ptls->zone_max);
249 
250 	for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
251 		ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK);
252 
253 	ptls->init = 1;
254 
255 	node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
256 	    SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
257 	    "tls", CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "Hardware TLS offload");
258 	if (node == NULL)
259 		return (0);
260 
261 	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tls_max_tag",
262 	    CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, priv, 0, mlx5e_max_tag_proc,
263 	    "IU", "Max number of TLS offload session tags");
264 
265 	mlx5e_create_counter_stats(&ptls->ctx,
266 	    SYSCTL_CHILDREN(node), "stats",
267 	    mlx5e_tls_stats_desc, MLX5E_TLS_STATS_NUM,
268 	    ptls->stats.arg);
269 
270 	return (0);
271 }
272 
273 void
mlx5e_tls_cleanup(struct mlx5e_priv * priv)274 mlx5e_tls_cleanup(struct mlx5e_priv *priv)
275 {
276 	struct mlx5e_tls *ptls = &priv->tls;
277 	uint32_t x;
278 
279 	if (ptls->init == 0)
280 		return;
281 
282 	ptls->init = 0;
283 	flush_workqueue(ptls->wq);
284 	sysctl_ctx_free(&ptls->ctx);
285 	uma_zdestroy(ptls->zone);
286 	destroy_workqueue(ptls->wq);
287 
288 	for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
289 		counter_u64_free(ptls->stats.arg[x]);
290 }
291 
292 
293 static int
mlx5e_tls_st_init(struct mlx5e_priv * priv,struct mlx5e_tls_tag * ptag)294 mlx5e_tls_st_init(struct mlx5e_priv *priv, struct mlx5e_tls_tag *ptag)
295 {
296 	int err;
297 
298 	/* try to open TIS, if not present */
299 	if (ptag->tisn == 0) {
300 		err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn,
301 		    priv->pdn, &ptag->tisn);
302 		if (err) {
303 			MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
304 			return (-err);
305 		}
306 	}
307 	MLX5_SET(sw_tls_cntx, ptag->crypto_params, progress.pd, ptag->tisn);
308 
309 	/* try to allocate a DEK context ID */
310 	err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
311 	    MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
312 	    MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data),
313 	    MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len),
314 	    &ptag->dek_index);
315 	if (err) {
316 		MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
317 		return (-err);
318 	}
319 
320 	MLX5_SET(sw_tls_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index);
321 
322 	ptag->dek_index_ok = 1;
323 
324 	MLX5E_TLS_TAG_LOCK(ptag);
325 	if (ptag->state == MLX5E_TLS_ST_INIT)
326 		ptag->state = MLX5E_TLS_ST_SETUP;
327 	MLX5E_TLS_TAG_UNLOCK(ptag);
328 	return (0);
329 }
330 
331 static void
mlx5e_tls_work(struct work_struct * work)332 mlx5e_tls_work(struct work_struct *work)
333 {
334 	struct mlx5e_tls_tag *ptag;
335 	struct mlx5e_priv *priv;
336 
337 	ptag = container_of(work, struct mlx5e_tls_tag, work);
338 	priv = container_of(ptag->tls, struct mlx5e_priv, tls);
339 
340 	switch (ptag->state) {
341 	case MLX5E_TLS_ST_INIT:
342 		(void)mlx5e_tls_st_init(priv, ptag);
343 		break;
344 
345 	case MLX5E_TLS_ST_RELEASE:
346 		/* try to destroy DEK context by ID */
347 		if (ptag->dek_index_ok)
348 			(void)mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
349 
350 		/* free tag */
351 		mlx5e_tls_tag_zfree(ptag);
352 		break;
353 
354 	default:
355 		break;
356 	}
357 }
358 
359 static int
mlx5e_tls_set_params(void * ctx,const struct tls_session_params * en)360 mlx5e_tls_set_params(void *ctx, const struct tls_session_params *en)
361 {
362 
363 	MLX5_SET(sw_tls_cntx, ctx, param.const_2, 2);
364 	if (en->tls_vminor == TLS_MINOR_VER_TWO)
365 		MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 2); /* v1.2 */
366 	else
367 		MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 3); /* v1.3 */
368 	MLX5_SET(sw_tls_cntx, ctx, param.const_1, 1);
369 	MLX5_SET(sw_tls_cntx, ctx, param.encryption_standard, 1); /* TLS */
370 
371 	/* copy the initial vector in place */
372 	switch (en->iv_len) {
373 	case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv):
374 	case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv) +
375 	     MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.implicit_iv):
376 		memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, param.gcm_iv),
377 		    en->iv, en->iv_len);
378 		break;
379 	default:
380 		return (EINVAL);
381 	}
382 
383 	if (en->cipher_key_len <= MLX5_FLD_SZ_BYTES(sw_tls_cntx, key.key_data)) {
384 		memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, key.key_data),
385 		    en->cipher_key, en->cipher_key_len);
386 		MLX5_SET(sw_tls_cntx, ctx, key.key_len, en->cipher_key_len);
387 	} else {
388 		return (EINVAL);
389 	}
390 	return (0);
391 }
392 
393 /* Verify zero default */
394 CTASSERT(MLX5E_TLS_ST_INIT == 0);
395 
396 int
mlx5e_tls_snd_tag_alloc(if_t ifp,union if_snd_tag_alloc_params * params,struct m_snd_tag ** ppmt)397 mlx5e_tls_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
398     struct m_snd_tag **ppmt)
399 {
400 	union if_snd_tag_alloc_params rl_params;
401 	const struct if_snd_tag_sw *snd_tag_sw;
402 	struct mlx5e_priv *priv;
403 	struct mlx5e_tls_tag *ptag;
404 	const struct tls_session_params *en;
405 	int error;
406 
407 	priv = if_getsoftc(ifp);
408 
409 	if (priv->gone != 0 || priv->tls.init == 0)
410 		return (EOPNOTSUPP);
411 
412 	ptag = uma_zalloc(priv->tls.zone, M_NOWAIT);
413  	if (ptag == NULL)
414  		return (ENOMEM);
415 
416 	/* sanity check default values */
417 	MPASS(ptag->dek_index == 0);
418 	MPASS(ptag->dek_index_ok == 0);
419 
420 	/* check if there is no TIS context */
421 	KASSERT(ptag->tisn != 0, ("ptag %p w/0 tisn", ptag));
422 
423 	en = &params->tls.tls->params;
424 
425 	/* only TLS v1.2 and v1.3 is currently supported */
426 	if (en->tls_vmajor != TLS_MAJOR_VER_ONE ||
427 	    (en->tls_vminor != TLS_MINOR_VER_TWO
428 #ifdef TLS_MINOR_VER_THREE
429 	     && en->tls_vminor != TLS_MINOR_VER_THREE
430 #endif
431 	     )) {
432 		error = EPROTONOSUPPORT;
433 		goto failure;
434 	}
435 
436 	switch (en->cipher_algorithm) {
437 	case CRYPTO_AES_NIST_GCM_16:
438 		switch (en->cipher_key_len) {
439 		case 128 / 8:
440 			if (en->tls_vminor == TLS_MINOR_VER_TWO) {
441 				if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) {
442 					error = EPROTONOSUPPORT;
443 					goto failure;
444 				}
445 			} else {
446 				if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_128) == 0) {
447 					error = EPROTONOSUPPORT;
448 					goto failure;
449 				}
450 			}
451 			error = mlx5e_tls_set_params(ptag->crypto_params, en);
452 			if (error)
453 				goto failure;
454 			break;
455 
456 		case 256 / 8:
457 			if (en->tls_vminor == TLS_MINOR_VER_TWO) {
458 				if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) {
459 					error = EPROTONOSUPPORT;
460 					goto failure;
461 				}
462 			} else {
463 				if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_256) == 0) {
464 					error = EPROTONOSUPPORT;
465 					goto failure;
466 				}
467 			}
468 			error = mlx5e_tls_set_params(ptag->crypto_params, en);
469 			if (error)
470 				goto failure;
471 			break;
472 
473 		default:
474 			error = EINVAL;
475 			goto failure;
476 		}
477 		break;
478 	default:
479 		error = EPROTONOSUPPORT;
480 		goto failure;
481 	}
482 
483 	memset(&rl_params, 0, sizeof(rl_params));
484 	rl_params.hdr = params->hdr;
485 	switch (params->hdr.type) {
486 #ifdef RATELIMIT
487 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
488 		rl_params.hdr.type = IF_SND_TAG_TYPE_RATE_LIMIT;
489 		rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate;
490 		snd_tag_sw = &mlx5e_tls_rl_snd_tag_sw;
491 		break;
492 #endif
493 	case IF_SND_TAG_TYPE_TLS:
494 		rl_params.hdr.type = IF_SND_TAG_TYPE_UNLIMITED;
495 		snd_tag_sw = &mlx5e_tls_snd_tag_sw;
496 		break;
497 	default:
498 		error = EOPNOTSUPP;
499 		goto failure;
500 	}
501 
502 	error = m_snd_tag_alloc(ifp, &rl_params, &ptag->rl_tag);
503 	if (error)
504 		goto failure;
505 
506 	/* store pointer to mbuf tag */
507 	MPASS(ptag->tag.refcount == 0);
508 	m_snd_tag_init(&ptag->tag, ifp, snd_tag_sw);
509 	*ppmt = &ptag->tag;
510 
511 	/* reset state */
512 	ptag->state = MLX5E_TLS_ST_INIT;
513 
514 	error = mlx5e_tls_st_init(priv, ptag);
515 	if (error != 0)
516 		goto failure;
517 
518 	return (0);
519 
520 failure:
521 	mlx5e_tls_tag_zfree(ptag);
522 	return (error);
523 }
524 
525 #ifdef RATELIMIT
526 static int
mlx5e_tls_rl_snd_tag_modify(struct m_snd_tag * pmt,union if_snd_tag_modify_params * params)527 mlx5e_tls_rl_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
528 {
529 	union if_snd_tag_modify_params rl_params;
530 	struct mlx5e_tls_tag *ptag =
531 	    container_of(pmt, struct mlx5e_tls_tag, tag);
532 	int error;
533 
534 	memset(&rl_params, 0, sizeof(rl_params));
535 	rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate;
536 	error = ptag->rl_tag->sw->snd_tag_modify(ptag->rl_tag, &rl_params);
537 	return (error);
538 }
539 #endif
540 
541 static int
mlx5e_tls_snd_tag_query(struct m_snd_tag * pmt,union if_snd_tag_query_params * params)542 mlx5e_tls_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
543 {
544 	struct mlx5e_tls_tag *ptag =
545 	    container_of(pmt, struct mlx5e_tls_tag, tag);
546 
547 	return (ptag->rl_tag->sw->snd_tag_query(ptag->rl_tag, params));
548 }
549 
550 static void
mlx5e_tls_snd_tag_free(struct m_snd_tag * pmt)551 mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt)
552 {
553 	struct mlx5e_tls_tag *ptag =
554 	    container_of(pmt, struct mlx5e_tls_tag, tag);
555 	struct mlx5e_priv *priv;
556 
557 	m_snd_tag_rele(ptag->rl_tag);
558 
559 	MLX5E_TLS_TAG_LOCK(ptag);
560 	ptag->state = MLX5E_TLS_ST_RELEASE;
561 	MLX5E_TLS_TAG_UNLOCK(ptag);
562 
563 	priv = if_getsoftc(ptag->tag.ifp);
564 	queue_work(priv->tls.wq, &ptag->work);
565 }
566 
567 CTASSERT((MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) % 16) == 0);
568 
569 static void
mlx5e_tls_send_static_parameters(struct mlx5e_sq * sq,struct mlx5e_tls_tag * ptag)570 mlx5e_tls_send_static_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
571 {
572 	const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) +
573 	    MLX5_FLD_SZ_BYTES(sw_tls_cntx, param), MLX5_SEND_WQE_DS);
574 	struct mlx5e_tx_umr_wqe *wqe;
575 	u16 pi;
576 
577 	pi = sq->pc & sq->wq.sz_m1;
578 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
579 
580 	memset(wqe, 0, sizeof(*wqe));
581 
582 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) |
583 	    MLX5_OPCODE_UMR | (MLX5_OPCODE_MOD_UMR_TLS_TIS_STATIC_PARAMS << 24));
584 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
585 	wqe->ctrl.imm = cpu_to_be32(ptag->tisn << 8);
586 
587 	if (mlx5e_do_send_cqe(sq))
588 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
589 	else
590 		wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
591 
592 	/* fill out UMR control segment */
593 	wqe->umr.flags = 0x80;	/* inline data */
594 	wqe->umr.bsf_octowords = cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) / 16);
595 
596 	/* copy in the static crypto parameters */
597 	memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, param),
598 	    MLX5_FLD_SZ_BYTES(sw_tls_cntx, param));
599 
600 	/* copy data for doorbell */
601 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
602 
603 	sq->mbuf[pi].mbuf = NULL;
604 	sq->mbuf[pi].num_bytes = 0;
605 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
606 	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
607 
608 	sq->pc += sq->mbuf[pi].num_wqebbs;
609 }
610 
611 CTASSERT(MLX5_FLD_SZ_BYTES(sw_tls_cntx, progress) ==
612     sizeof(((struct mlx5e_tx_psv_wqe *)0)->psv));
613 
614 static void
mlx5e_tls_send_progress_parameters(struct mlx5e_sq * sq,struct mlx5e_tls_tag * ptag)615 mlx5e_tls_send_progress_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
616 {
617 	const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe),
618 	    MLX5_SEND_WQE_DS);
619 	struct mlx5e_tx_psv_wqe *wqe;
620 	u16 pi;
621 
622 	pi = sq->pc & sq->wq.sz_m1;
623 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
624 
625 	memset(wqe, 0, sizeof(*wqe));
626 
627 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) |
628 	    MLX5_OPCODE_SET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIS_PROGRESS_PARAMS << 24));
629 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
630 
631 	if (mlx5e_do_send_cqe(sq))
632 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
633 
634 	/* copy in the PSV control segment */
635 	memcpy(&wqe->psv, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, progress),
636 	    sizeof(wqe->psv));
637 
638 	/* copy data for doorbell */
639 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
640 
641 	sq->mbuf[pi].mbuf = NULL;
642 	sq->mbuf[pi].num_bytes = 0;
643 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
644 	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
645 
646 	sq->pc += sq->mbuf[pi].num_wqebbs;
647 }
648 
649 static void
mlx5e_tls_send_nop(struct mlx5e_sq * sq,struct mlx5e_tls_tag * ptag)650 mlx5e_tls_send_nop(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
651 {
652 	const u32 ds_cnt = MLX5_SEND_WQEBB_NUM_DS;
653 	struct mlx5e_tx_wqe *wqe;
654 	u16 pi;
655 
656 	pi = sq->pc & sq->wq.sz_m1;
657 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
658 
659 	memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
660 
661 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
662 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
663 	if (mlx5e_do_send_cqe(sq))
664 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
665 	else
666 		wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
667 
668 	/* Copy data for doorbell */
669 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
670 
671 	sq->mbuf[pi].mbuf = NULL;
672 	sq->mbuf[pi].num_bytes = 0;
673 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
674 	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
675 
676 	sq->pc += sq->mbuf[pi].num_wqebbs;
677 }
678 
679 #define	SBTLS_MBUF_NO_DATA ((struct mbuf *)1)
680 
681 static struct mbuf *
sbtls_recover_record(struct mbuf * mb,int wait,uint32_t tcp_old,uint32_t * ptcp_seq,bool * pis_start)682 sbtls_recover_record(struct mbuf *mb, int wait, uint32_t tcp_old, uint32_t *ptcp_seq, bool *pis_start)
683 {
684 	struct mbuf *mr, *top;
685 	uint32_t offset;
686 	uint32_t delta;
687 
688 	/* check format of incoming mbuf */
689 	if (mb->m_next == NULL ||
690 	    (mb->m_next->m_flags & (M_EXTPG | M_EXT)) != (M_EXTPG | M_EXT)) {
691 		top = NULL;
692 		goto done;
693 	}
694 
695 	/* get unmapped data offset */
696 	offset = mtod(mb->m_next, uintptr_t);
697 
698 	/* check if we don't need to re-transmit anything */
699 	if (offset == 0) {
700 		top = SBTLS_MBUF_NO_DATA;
701 		*pis_start = true;
702 		goto done;
703 	}
704 
705 	/* try to get a new  packet header */
706 	top = m_gethdr(wait, MT_DATA);
707 	if (top == NULL)
708 		goto done;
709 
710 	mr = m_get(wait, MT_DATA);
711 	if (mr == NULL) {
712 		m_free(top);
713 		top = NULL;
714 		goto done;
715 	}
716 
717 	top->m_next = mr;
718 
719 	mb_dupcl(mr, mb->m_next);
720 
721 	/* the beginning of the TLS record */
722 	mr->m_data = NULL;
723 
724 	/* setup packet header length */
725 	top->m_pkthdr.len = mr->m_len = offset;
726 	top->m_len = 0;
727 
728 	/* check for partial re-transmit */
729 	delta = *ptcp_seq - tcp_old;
730 
731 	if (delta < offset) {
732 		m_adj(top, offset - delta);
733 		offset = delta;
734 
735 		/* continue where we left off */
736 		*pis_start = false;
737 	} else {
738 		*pis_start = true;
739 	}
740 
741 	/*
742 	 * Rewind the TCP sequence number by the amount of data
743 	 * retransmitted:
744 	 */
745 	*ptcp_seq -= offset;
746 done:
747 	return (top);
748 }
749 
750 static int
mlx5e_sq_tls_populate(struct mbuf * mb,uint64_t * pseq)751 mlx5e_sq_tls_populate(struct mbuf *mb, uint64_t *pseq)
752 {
753 
754 	for (; mb != NULL; mb = mb->m_next) {
755 		if (!(mb->m_flags & M_EXTPG))
756 			continue;
757 		*pseq = mb->m_epg_seqno;
758 		return (1);
759 	}
760 	return (0);
761 }
762 
763 int
mlx5e_sq_tls_xmit(struct mlx5e_sq * sq,struct mlx5e_xmit_args * parg,struct mbuf ** ppmb)764 mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **ppmb)
765 {
766 	struct mlx5e_tls_tag *ptls_tag;
767 	struct m_snd_tag *ptag;
768 	const struct tcphdr *th;
769 	struct mbuf *mb = *ppmb;
770 	u64 rcd_sn;
771 	u32 header_size;
772 	u32 mb_seq;
773 
774 	if ((mb->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0)
775 		return (MLX5E_TLS_CONTINUE);
776 
777 	ptag = mb->m_pkthdr.snd_tag;
778 
779 	if (
780 #ifdef RATELIMIT
781 	    ptag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT &&
782 #endif
783 	    ptag->sw->type != IF_SND_TAG_TYPE_TLS)
784 		return (MLX5E_TLS_CONTINUE);
785 
786 	ptls_tag = container_of(ptag, struct mlx5e_tls_tag, tag);
787 
788 	header_size = mlx5e_get_full_header_size(mb, &th);
789 	if (unlikely(header_size == 0 || th == NULL))
790 		return (MLX5E_TLS_FAILURE);
791 
792 	/*
793 	 * Send non-TLS TCP packets AS-IS:
794 	 */
795 	if (header_size == mb->m_pkthdr.len ||
796 	    mlx5e_sq_tls_populate(mb, &rcd_sn) == 0) {
797 		parg->tisn = 0;
798 		parg->ihs = header_size;
799 		return (MLX5E_TLS_CONTINUE);
800 	}
801 
802 	mb_seq = ntohl(th->th_seq);
803 
804 	MLX5E_TLS_TAG_LOCK(ptls_tag);
805 	switch (ptls_tag->state) {
806 	case MLX5E_TLS_ST_INIT:
807 		MLX5E_TLS_TAG_UNLOCK(ptls_tag);
808 		return (MLX5E_TLS_FAILURE);
809 	case MLX5E_TLS_ST_SETUP:
810 		ptls_tag->state = MLX5E_TLS_ST_TXRDY;
811 		ptls_tag->expected_seq = ~mb_seq;	/* force setup */
812 	default:
813 		MLX5E_TLS_TAG_UNLOCK(ptls_tag);
814 		break;
815 	}
816 
817 	if (unlikely(ptls_tag->expected_seq != mb_seq)) {
818 		bool is_start;
819 		struct mbuf *r_mb;
820 		uint32_t tcp_seq = mb_seq;
821 
822 		r_mb = sbtls_recover_record(mb, M_NOWAIT, ptls_tag->expected_seq, &tcp_seq, &is_start);
823 		if (r_mb == NULL) {
824 			MLX5E_TLS_STAT_INC(ptls_tag, tx_error, 1);
825 			return (MLX5E_TLS_FAILURE);
826 		}
827 
828 		MLX5E_TLS_STAT_INC(ptls_tag, tx_packets_ooo, 1);
829 
830 		/* check if this is the first fragment of a TLS record */
831 		if (is_start) {
832 			/* setup TLS static parameters */
833 			MLX5_SET64(sw_tls_cntx, ptls_tag->crypto_params,
834 			    param.initial_record_number, rcd_sn);
835 
836 			/*
837 			 * NOTE: The sendqueue should have enough room to
838 			 * carry both the static and the progress parameters
839 			 * when we get here!
840 			 */
841 			mlx5e_tls_send_static_parameters(sq, ptls_tag);
842 			mlx5e_tls_send_progress_parameters(sq, ptls_tag);
843 
844 			if (r_mb == SBTLS_MBUF_NO_DATA) {
845 				mlx5e_tls_send_nop(sq, ptls_tag);
846 				ptls_tag->expected_seq = mb_seq;
847 				return (MLX5E_TLS_LOOP);
848 			}
849 		}
850 
851 		MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes_ooo, r_mb->m_pkthdr.len);
852 
853 		/* setup transmit arguments */
854 		parg->tisn = ptls_tag->tisn;
855 		parg->mst = &ptls_tag->tag;
856 
857 		/* try to send DUMP data */
858 		if (mlx5e_sq_dump_xmit(sq, parg, &r_mb) != 0) {
859 			m_freem(r_mb);
860 			ptls_tag->expected_seq = tcp_seq;
861 			return (MLX5E_TLS_FAILURE);
862 		} else {
863 			ptls_tag->expected_seq = mb_seq;
864 			return (MLX5E_TLS_LOOP);
865 		}
866 	} else {
867 		MLX5E_TLS_STAT_INC(ptls_tag, tx_packets, 1);
868 		MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes, mb->m_pkthdr.len);
869 	}
870 	ptls_tag->expected_seq += mb->m_pkthdr.len - header_size;
871 
872 	parg->tisn = ptls_tag->tisn;
873 	parg->ihs = header_size;
874 	parg->mst = &ptls_tag->tag;
875 	return (MLX5E_TLS_CONTINUE);
876 }
877 
878 #else
879 
880 int
mlx5e_tls_init(struct mlx5e_priv * priv)881 mlx5e_tls_init(struct mlx5e_priv *priv)
882 {
883 
884 	return (0);
885 }
886 
887 void
mlx5e_tls_cleanup(struct mlx5e_priv * priv)888 mlx5e_tls_cleanup(struct mlx5e_priv *priv)
889 {
890 	/* NOP */
891 }
892 
893 #endif		/* KERN_TLS */
894