xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c (revision d6d66936c45e99ec284554292181d26b6a5558d3)
1 /*-
2  * Copyright (c) 2021-2022 NVIDIA corporation & affiliates.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_kern_tls.h"
27 #include "opt_rss.h"
28 #include "opt_ratelimit.h"
29 
30 #include <dev/mlx5/mlx5_en/en.h>
31 
32 #include <dev/mlx5/crypto.h>
33 #include <dev/mlx5/tls.h>
34 
35 #include <dev/mlx5/fs.h>
36 #include <dev/mlx5/mlx5_core/fs_tcp.h>
37 
38 #include <sys/ktls.h>
39 #include <opencrypto/cryptodev.h>
40 
41 #ifdef KERN_TLS
42 
43 static if_snd_tag_free_t mlx5e_tls_rx_snd_tag_free;
44 static if_snd_tag_modify_t mlx5e_tls_rx_snd_tag_modify;
45 static if_snd_tag_status_str_t mlx5e_tls_rx_snd_tag_status_str;
46 
47 static const struct if_snd_tag_sw mlx5e_tls_rx_snd_tag_sw = {
48 	.snd_tag_modify = mlx5e_tls_rx_snd_tag_modify,
49 	.snd_tag_free = mlx5e_tls_rx_snd_tag_free,
50 	.snd_tag_status_str = mlx5e_tls_rx_snd_tag_status_str,
51 	.type = IF_SND_TAG_TYPE_TLS_RX
52 };
53 
54 static const char *mlx5e_tls_rx_progress_params_auth_state_str[] = {
55 	[MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD] = "no_offload",
56 	[MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD] = "offload",
57 	[MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION] =
58 	    "authentication",
59 };
60 
61 static const char *mlx5e_tls_rx_progress_params_record_tracker_state_str[] = {
62 	[MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START] = "start",
63 	[MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING] =
64 	    "tracking",
65 	[MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING] =
66 	    "searching",
67 };
68 
69 MALLOC_DEFINE(M_MLX5E_TLS_RX, "MLX5E_TLS_RX", "MLX5 ethernet HW TLS RX");
70 
71 /* software TLS RX context */
72 struct mlx5_ifc_sw_tls_rx_cntx_bits {
73 	struct mlx5_ifc_tls_static_params_bits param;
74 	struct mlx5_ifc_tls_progress_params_bits progress;
75 	struct {
76 		uint8_t key_data[8][0x20];
77 		uint8_t key_len[0x20];
78 	} key;
79 };
80 
81 CTASSERT(MLX5_ST_SZ_BYTES(sw_tls_rx_cntx) <= sizeof(((struct mlx5e_tls_rx_tag *)NULL)->crypto_params));
82 CTASSERT(MLX5_ST_SZ_BYTES(mkc) == sizeof(((struct mlx5e_tx_umr_wqe *)NULL)->mkc));
83 
84 static const char *mlx5e_tls_rx_stats_desc[] = {
85 	MLX5E_TLS_RX_STATS(MLX5E_STATS_DESC)
86 };
87 
88 static void mlx5e_tls_rx_work(struct work_struct *);
89 static bool mlx5e_tls_rx_snd_tag_find_tcp_sn_and_tls_rcd(struct mlx5e_tls_rx_tag *,
90     uint32_t, uint32_t *, uint64_t *);
91 
92 CTASSERT((MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param) % 16) == 0);
93 
94 static uint32_t
mlx5e_tls_rx_get_ch(struct mlx5e_priv * priv,uint32_t flowid,uint32_t flowtype)95 mlx5e_tls_rx_get_ch(struct mlx5e_priv *priv, uint32_t flowid, uint32_t flowtype)
96 {
97 	u32 ch;
98 #ifdef RSS
99 	u32 temp;
100 #endif
101 
102 	/* keep this code synced with mlx5e_select_queue() */
103 	ch = priv->params.num_channels;
104 #ifdef RSS
105 	if (rss_hash2bucket(flowid, flowtype, &temp) == 0)
106 		ch = temp % ch;
107 	else
108 #endif
109 		ch = (flowid % 128) % ch;
110 	return (ch);
111 }
112 
113 /*
114  * This function gets a pointer to an internal queue, IQ, based on the
115  * provided "flowid" and "flowtype". The IQ returned may in some rare
116  * cases not be activated or running, but this is all handled by the
117  * "mlx5e_iq_get_producer_index()" function.
118  *
119  * The idea behind this function is to spread the IQ traffic as much
120  * as possible and to avoid congestion on the same IQ when processing
121  * RX traffic.
122  */
123 static struct mlx5e_iq *
mlx5e_tls_rx_get_iq(struct mlx5e_priv * priv,uint32_t flowid,uint32_t flowtype)124 mlx5e_tls_rx_get_iq(struct mlx5e_priv *priv, uint32_t flowid, uint32_t flowtype)
125 {
126 	/*
127 	 * NOTE: The channels array is only freed at detach
128 	 * and it safe to return a pointer to the send tag
129 	 * inside the channels structure as long as we
130 	 * reference the priv.
131 	 */
132 	return (&priv->channel[mlx5e_tls_rx_get_ch(priv, flowid, flowtype)].iq);
133 }
134 
135 static void
mlx5e_tls_rx_send_static_parameters_cb(void * arg)136 mlx5e_tls_rx_send_static_parameters_cb(void *arg)
137 {
138 	struct mlx5e_tls_rx_tag *ptag;
139 
140 	ptag = (struct mlx5e_tls_rx_tag *)arg;
141 
142 	m_snd_tag_rele(&ptag->tag);
143 }
144 
145 /*
146  * This function sends the so-called TLS RX static parameters to the
147  * hardware. These parameters are temporarily stored in the
148  * "crypto_params" field of the TLS RX tag.  Most importantly this
149  * function sets the TCP sequence number (32-bit) and TLS record
150  * number (64-bit) where the decryption can resume.
151  *
152  * Zero is returned upon success. Else some error happend.
153  */
154 static int
mlx5e_tls_rx_send_static_parameters(struct mlx5e_iq * iq,struct mlx5e_tls_rx_tag * ptag)155 mlx5e_tls_rx_send_static_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_rx_tag *ptag)
156 {
157 	const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) +
158 	    MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param), MLX5_SEND_WQE_DS);
159 	struct mlx5e_tx_umr_wqe *wqe;
160 	int pi;
161 
162 	mtx_lock(&iq->lock);
163 	pi = mlx5e_iq_get_producer_index(iq);
164 	if (pi < 0) {
165 		mtx_unlock(&iq->lock);
166 		return (-ENOMEM);
167 	}
168 	wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi);
169 
170 	memset(wqe, 0, sizeof(*wqe));
171 
172 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) |
173 	    MLX5_OPCODE_UMR | (MLX5_OPCODE_MOD_UMR_TLS_TIR_STATIC_PARAMS << 24));
174 	wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt);
175 	wqe->ctrl.imm = cpu_to_be32(ptag->tirn << 8);
176 	wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
177 
178 	/* fill out UMR control segment */
179 	wqe->umr.flags = 0x80;	/* inline data */
180 	wqe->umr.bsf_octowords =
181 	    cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param) / 16);
182 
183 	/* copy in the static crypto parameters */
184 	memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, param),
185 	    MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param));
186 
187 	/* copy data for doorbell */
188 	memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
189 
190 	iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
191 	iq->data[pi].callback = &mlx5e_tls_rx_send_static_parameters_cb;
192 	iq->data[pi].arg = ptag;
193 
194 	m_snd_tag_ref(&ptag->tag);
195 
196 	iq->pc += iq->data[pi].num_wqebbs;
197 
198 	mlx5e_iq_notify_hw(iq);
199 
200 	mtx_unlock(&iq->lock);
201 
202 	return (0);	/* success */
203 }
204 
205 static void
mlx5e_tls_rx_send_progress_parameters_cb(void * arg)206 mlx5e_tls_rx_send_progress_parameters_cb(void *arg)
207 {
208 	struct mlx5e_tls_rx_tag *ptag;
209 
210 	ptag = (struct mlx5e_tls_rx_tag *)arg;
211 
212 	complete(&ptag->progress_complete);
213 }
214 
215 CTASSERT(MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, progress) ==
216     sizeof(((struct mlx5e_tx_psv_wqe *)NULL)->psv));
217 
218 /*
219  * This function resets the state of the TIR context to start
220  * searching for a valid TLS header and is used only when allocating
221  * the TLS RX tag.
222  *
223  * Zero is returned upon success, else some error happened.
224  */
225 static int
mlx5e_tls_rx_send_progress_parameters_sync(struct mlx5e_iq * iq,struct mlx5e_tls_rx_tag * ptag)226 mlx5e_tls_rx_send_progress_parameters_sync(struct mlx5e_iq *iq,
227     struct mlx5e_tls_rx_tag *ptag)
228 {
229 	const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe),
230 	    MLX5_SEND_WQE_DS);
231 	struct mlx5e_priv *priv;
232 	struct mlx5e_tx_psv_wqe *wqe;
233 	int pi;
234 
235 	mtx_lock(&iq->lock);
236 	pi = mlx5e_iq_get_producer_index(iq);
237 	if (pi < 0) {
238 		mtx_unlock(&iq->lock);
239 		return (-ENOMEM);
240 	}
241 	wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi);
242 
243 	memset(wqe, 0, sizeof(*wqe));
244 
245 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) |
246 	    MLX5_OPCODE_SET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIR_PROGRESS_PARAMS << 24));
247 	wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt);
248 	wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
249 
250 	/* copy in the PSV control segment */
251 	memcpy(&wqe->psv, MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, progress),
252 	    sizeof(wqe->psv));
253 
254 	/* copy data for doorbell */
255 	memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
256 
257 	iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
258 	iq->data[pi].callback = &mlx5e_tls_rx_send_progress_parameters_cb;
259 	iq->data[pi].arg = ptag;
260 
261 	iq->pc += iq->data[pi].num_wqebbs;
262 
263 	init_completion(&ptag->progress_complete);
264 
265 	mlx5e_iq_notify_hw(iq);
266 
267 	mtx_unlock(&iq->lock);
268 
269 	while (1) {
270 		if (wait_for_completion_timeout(&ptag->progress_complete,
271 		    msecs_to_jiffies(1000)) != 0)
272 			break;
273 		priv = container_of(iq, struct mlx5e_channel, iq)->priv;
274 		if (priv->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
275 		    pci_channel_offline(priv->mdev->pdev) != 0)
276 			return (-EWOULDBLOCK);
277 	}
278 
279 	return (0);	/* success */
280 }
281 
282 CTASSERT(MLX5E_TLS_RX_PROGRESS_BUFFER_SIZE >= MLX5_ST_SZ_BYTES(tls_progress_params));
283 CTASSERT(MLX5E_TLS_RX_PROGRESS_BUFFER_SIZE <= PAGE_SIZE);
284 
285 struct mlx5e_get_tls_progress_params_wqe {
286 	struct mlx5_wqe_ctrl_seg ctrl;
287 	struct mlx5_seg_get_psv	 psv;
288 };
289 
290 static void
mlx5e_tls_rx_receive_progress_parameters_cb(void * arg)291 mlx5e_tls_rx_receive_progress_parameters_cb(void *arg)
292 {
293 	struct mlx5e_tls_rx_tag *ptag;
294 	struct mlx5e_iq *iq;
295 	uint32_t tcp_curr_sn_he;
296 	uint32_t tcp_next_sn_he;
297 	uint64_t tls_rcd_num;
298 	void *buffer;
299 
300 	ptag = (struct mlx5e_tls_rx_tag *)arg;
301 	buffer = mlx5e_tls_rx_get_progress_buffer(ptag);
302 
303 	MLX5E_TLS_RX_TAG_LOCK(ptag);
304 
305 	ptag->tcp_resync_pending = 0;
306 
307 	switch (MLX5_GET(tls_progress_params, buffer, record_tracker_state)) {
308 	case MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING:
309 		break;
310 	default:
311 		goto done;
312 	}
313 
314 	switch (MLX5_GET(tls_progress_params, buffer, auth_state)) {
315 	case MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD:
316 		break;
317 	default:
318 		goto done;
319 	}
320 
321 	tcp_curr_sn_he = MLX5_GET(tls_progress_params, buffer, hw_resync_tcp_sn);
322 
323 	if (mlx5e_tls_rx_snd_tag_find_tcp_sn_and_tls_rcd(ptag, tcp_curr_sn_he,
324 	    &tcp_next_sn_he, &tls_rcd_num)) {
325 
326 		MLX5_SET64(sw_tls_rx_cntx, ptag->crypto_params,
327 		    param.initial_record_number, tls_rcd_num);
328 		MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params,
329 		    param.resync_tcp_sn, tcp_curr_sn_he);
330 
331 		iq = mlx5e_tls_rx_get_iq(
332 		    container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx),
333 		    ptag->flowid, ptag->flowtype);
334 
335 		if (mlx5e_tls_rx_send_static_parameters(iq, ptag) != 0)
336 			MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
337 	}
338 done:
339 	MLX5E_TLS_RX_TAG_UNLOCK(ptag);
340 
341 	m_snd_tag_rele(&ptag->tag);
342 }
343 
344 /*
345  * This function queries the hardware for the current state of the TIR
346  * in question. It is typically called when encrypted data is received
347  * to re-establish hardware decryption of received TLS data.
348  *
349  * Zero is returned upon success, else some error happened.
350  */
351 static int
mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq * iq,struct mlx5e_tls_rx_tag * ptag,mlx5e_iq_callback_t * cb)352 mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq,
353     struct mlx5e_tls_rx_tag *ptag, mlx5e_iq_callback_t *cb)
354 {
355 	struct mlx5e_get_tls_progress_params_wqe *wqe;
356 	const u32 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
357 	u64 dma_address;
358 	int pi;
359 
360 	mtx_lock(&iq->lock);
361 	pi = mlx5e_iq_get_producer_index(iq);
362 	if (pi < 0) {
363 		mtx_unlock(&iq->lock);
364 		return (-ENOMEM);
365 	}
366 
367 	mlx5e_iq_load_memory_single(iq, pi,
368 	    mlx5e_tls_rx_get_progress_buffer(ptag),
369 	    MLX5E_TLS_RX_PROGRESS_BUFFER_SIZE,
370 	    &dma_address, BUS_DMASYNC_PREREAD);
371 
372 	wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi);
373 
374 	memset(wqe, 0, sizeof(*wqe));
375 
376 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) |
377 	    MLX5_OPCODE_GET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIR_PROGRESS_PARAMS << 24));
378 	wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt);
379 	wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
380 	wqe->psv.num_psv = 1 << 4;
381 	wqe->psv.l_key = iq->mkey_be;
382 	wqe->psv.psv_index[0] = cpu_to_be32(ptag->tirn);
383 	wqe->psv.va = cpu_to_be64(dma_address);
384 
385 	/* copy data for doorbell */
386 	memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
387 
388 	iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
389 	iq->data[pi].callback = cb;
390 	iq->data[pi].arg = ptag;
391 
392 	m_snd_tag_ref(&ptag->tag);
393 
394 	iq->pc += iq->data[pi].num_wqebbs;
395 
396 	mlx5e_iq_notify_hw(iq);
397 
398 	mtx_unlock(&iq->lock);
399 
400 	return (0);	/* success */
401 }
402 
403 /*
404  * This is the import function for TLS RX tags.
405  */
406 static int
mlx5e_tls_rx_tag_import(void * arg,void ** store,int cnt,int domain,int flags)407 mlx5e_tls_rx_tag_import(void *arg, void **store, int cnt, int domain, int flags)
408 {
409 	struct mlx5e_tls_rx_tag *ptag;
410 	struct mlx5_core_dev *mdev = arg;
411 	int i;
412 
413 	for (i = 0; i != cnt; i++) {
414 		ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS_RX,
415 		    mlx5_dev_domainset(mdev), flags | M_ZERO);
416 		mtx_init(&ptag->mtx, "mlx5-tls-rx-tag-mtx", NULL, MTX_DEF);
417 		INIT_WORK(&ptag->work, mlx5e_tls_rx_work);
418 		store[i] = ptag;
419 	}
420 	return (i);
421 }
422 
423 /*
424  * This is the release function for TLS RX tags.
425  */
426 static void
mlx5e_tls_rx_tag_release(void * arg,void ** store,int cnt)427 mlx5e_tls_rx_tag_release(void *arg, void **store, int cnt)
428 {
429 	struct mlx5e_tls_rx_tag *ptag;
430 	int i;
431 
432 	for (i = 0; i != cnt; i++) {
433 		ptag = store[i];
434 
435 		flush_work(&ptag->work);
436 		mtx_destroy(&ptag->mtx);
437 		free(ptag, M_MLX5E_TLS_RX);
438 	}
439 }
440 
441 /*
442  * This is a convenience function to free TLS RX tags. It resets some
443  * selected fields, updates the number of resources and returns the
444  * TLS RX tag to the UMA pool of free tags.
445  */
446 static void
mlx5e_tls_rx_tag_zfree(struct mlx5e_tls_rx_tag * ptag)447 mlx5e_tls_rx_tag_zfree(struct mlx5e_tls_rx_tag *ptag)
448 {
449 	/* make sure any unhandled taskqueue events are ignored */
450 	ptag->state = MLX5E_TLS_RX_ST_FREED;
451 
452 	/* reset some variables */
453 	ptag->dek_index = 0;
454 	ptag->dek_index_ok = 0;
455 	ptag->tirn = 0;
456 	ptag->flow_rule = NULL;
457 	ptag->tcp_resync_active = 0;
458 	ptag->tcp_resync_pending = 0;
459 
460 	/* avoid leaking keys */
461 	memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params));
462 
463 	/* update number of resources in use */
464 	atomic_add_32(&ptag->tls_rx->num_resources, -1U);
465 
466 	/* return tag to UMA */
467 	uma_zfree(ptag->tls_rx->zone, ptag);
468 }
469 
470 /*
471  * This function enables TLS RX support for the given NIC, if all
472  * needed firmware capabilites are present.
473  */
474 int
mlx5e_tls_rx_init(struct mlx5e_priv * priv)475 mlx5e_tls_rx_init(struct mlx5e_priv *priv)
476 {
477 	struct mlx5e_tls_rx *ptls = &priv->tls_rx;
478 	struct sysctl_oid *node;
479 	uint32_t x;
480 
481 	if (MLX5_CAP_GEN(priv->mdev, tls_rx) == 0 ||
482 	    MLX5_CAP_GEN(priv->mdev, log_max_dek) == 0 ||
483 	    MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version) == 0)
484 		return (0);
485 
486 	ptls->wq = create_singlethread_workqueue("mlx5-tls-rx-wq");
487 	if (ptls->wq == NULL)
488 		return (ENOMEM);
489 
490 	sysctl_ctx_init(&ptls->ctx);
491 
492 	snprintf(ptls->zname, sizeof(ptls->zname),
493 	    "mlx5_%u_tls_rx", device_get_unit(priv->mdev->pdev->dev.bsddev));
494 
495 	ptls->zone = uma_zcache_create(ptls->zname,
496 	    sizeof(struct mlx5e_tls_rx_tag), NULL, NULL, NULL, NULL,
497 	    mlx5e_tls_rx_tag_import, mlx5e_tls_rx_tag_release, priv->mdev,
498 	    UMA_ZONE_UNMANAGED);
499 
500 	/* shared between RX and TX TLS */
501 	ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1);
502 
503 	for (x = 0; x != MLX5E_TLS_RX_STATS_NUM; x++)
504 		ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK);
505 
506 	ptls->init = 1;
507 
508 	node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
509 	    SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
510 	    "tls_rx", CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "Hardware TLS receive offload");
511 	if (node == NULL)
512 		return (0);
513 
514 	mlx5e_create_counter_stats(&ptls->ctx,
515 	    SYSCTL_CHILDREN(node), "stats",
516 	    mlx5e_tls_rx_stats_desc, MLX5E_TLS_RX_STATS_NUM,
517 	    ptls->stats.arg);
518 
519 	return (0);
520 }
521 
522 /*
523  * This function disables TLS RX support for the given NIC.
524  */
525 void
mlx5e_tls_rx_cleanup(struct mlx5e_priv * priv)526 mlx5e_tls_rx_cleanup(struct mlx5e_priv *priv)
527 {
528 	struct mlx5e_tls_rx *ptls = &priv->tls_rx;
529 	uint32_t x;
530 
531 	if (ptls->init == 0)
532 		return;
533 
534 	ptls->init = 0;
535 	flush_workqueue(ptls->wq);
536 	sysctl_ctx_free(&ptls->ctx);
537 	uma_zdestroy(ptls->zone);
538 	destroy_workqueue(ptls->wq);
539 
540 	/* check if all resources are freed */
541 	MPASS(priv->tls_rx.num_resources == 0);
542 
543 	for (x = 0; x != MLX5E_TLS_RX_STATS_NUM; x++)
544 		counter_u64_free(ptls->stats.arg[x]);
545 }
546 
547 /*
548  * This function is used to serialize sleeping firmware operations
549  * needed in order to establish and destroy a TLS RX tag.
550  */
551 static void
mlx5e_tls_rx_work(struct work_struct * work)552 mlx5e_tls_rx_work(struct work_struct *work)
553 {
554 	struct mlx5e_tls_rx_tag *ptag;
555 	struct mlx5e_priv *priv;
556 	int err;
557 
558 	ptag = container_of(work, struct mlx5e_tls_rx_tag, work);
559 	priv = container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx);
560 
561 	switch (ptag->state) {
562 	case MLX5E_TLS_RX_ST_INIT:
563 		/* try to allocate new TIR context */
564 		err = mlx5_tls_open_tir(priv->mdev, priv->tdn,
565 		    priv->channel[mlx5e_tls_rx_get_ch(priv, ptag->flowid, ptag->flowtype)].rqtn,
566 		    &ptag->tirn);
567 		if (err) {
568 			MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
569 			break;
570 		}
571 		MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params, progress.pd, ptag->tirn);
572 
573 		/* try to allocate a DEK context ID */
574 		err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
575 		    MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
576 		    MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, key.key_data),
577 		    MLX5_GET(sw_tls_rx_cntx, ptag->crypto_params, key.key_len),
578 		    &ptag->dek_index);
579 		if (err) {
580 			MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
581 			break;
582 		}
583 
584 		MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index);
585 
586 		ptag->dek_index_ok = 1;
587 
588 		MLX5E_TLS_RX_TAG_LOCK(ptag);
589 		if (ptag->state == MLX5E_TLS_RX_ST_INIT)
590 			ptag->state = MLX5E_TLS_RX_ST_SETUP;
591 		MLX5E_TLS_RX_TAG_UNLOCK(ptag);
592 		break;
593 
594 	case MLX5E_TLS_RX_ST_RELEASE:
595 		/* remove flow rule for incoming traffic, if any */
596 		if (ptag->flow_rule != NULL)
597 			mlx5e_accel_fs_del_inpcb(ptag->flow_rule);
598 
599 		/* try to destroy DEK context by ID */
600 		if (ptag->dek_index_ok)
601 			mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
602 
603 		/* try to destroy TIR context by ID */
604 		if (ptag->tirn != 0)
605 			mlx5_tls_close_tir(priv->mdev, ptag->tirn);
606 
607 		/* free tag */
608 		mlx5e_tls_rx_tag_zfree(ptag);
609 		break;
610 
611 	default:
612 		break;
613 	}
614 }
615 
616 /*
617  * This function translates the crypto parameters into the format used
618  * by the firmware and hardware. Currently only AES-128 and AES-256 is
619  * supported for TLS v1.2 and TLS v1.3.
620  *
621  * Returns zero on success, else an error happened.
622  */
623 static int
mlx5e_tls_rx_set_params(void * ctx,struct inpcb * inp,const struct tls_session_params * en)624 mlx5e_tls_rx_set_params(void *ctx, struct inpcb *inp, const struct tls_session_params *en)
625 {
626 	uint32_t tcp_sn_he;
627 	uint64_t tls_sn_he;
628 
629 	MLX5_SET(sw_tls_rx_cntx, ctx, param.const_2, 2);
630 	if (en->tls_vminor == TLS_MINOR_VER_TWO)
631 		MLX5_SET(sw_tls_rx_cntx, ctx, param.tls_version, 2); /* v1.2 */
632 	else
633 		MLX5_SET(sw_tls_rx_cntx, ctx, param.tls_version, 3); /* v1.3 */
634 	MLX5_SET(sw_tls_rx_cntx, ctx, param.const_1, 1);
635 	MLX5_SET(sw_tls_rx_cntx, ctx, param.encryption_standard, 1); /* TLS */
636 
637 	/* copy the initial vector in place */
638 	switch (en->iv_len) {
639 	case MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param.gcm_iv):
640 	case MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param.gcm_iv) +
641 	     MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param.implicit_iv):
642 		memcpy(MLX5_ADDR_OF(sw_tls_rx_cntx, ctx, param.gcm_iv),
643 		    en->iv, en->iv_len);
644 		break;
645 	default:
646 		return (EINVAL);
647 	}
648 
649 	if (en->cipher_key_len <= MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, key.key_data)) {
650 		memcpy(MLX5_ADDR_OF(sw_tls_rx_cntx, ctx, key.key_data),
651 		    en->cipher_key, en->cipher_key_len);
652 		MLX5_SET(sw_tls_rx_cntx, ctx, key.key_len, en->cipher_key_len);
653 	} else {
654 		return (EINVAL);
655 	}
656 
657 	if (__predict_false(inp == NULL ||
658 	    ktls_get_rx_sequence(inp, &tcp_sn_he, &tls_sn_he) != 0))
659 		return (EINVAL);
660 
661 	MLX5_SET64(sw_tls_rx_cntx, ctx, param.initial_record_number, tls_sn_he);
662 	MLX5_SET(sw_tls_rx_cntx, ctx, param.resync_tcp_sn, 0);
663 	MLX5_SET(sw_tls_rx_cntx, ctx, progress.next_record_tcp_sn, tcp_sn_he);
664 
665 	return (0);
666 }
667 
668 /* Verify zero default */
669 CTASSERT(MLX5E_TLS_RX_ST_INIT == 0);
670 
671 /*
672  * This function is responsible for allocating a TLS RX tag. It is a
673  * callback function invoked by the network stack.
674  *
675  * Returns zero on success else an error happened.
676  */
677 int
mlx5e_tls_rx_snd_tag_alloc(if_t ifp,union if_snd_tag_alloc_params * params,struct m_snd_tag ** ppmt)678 mlx5e_tls_rx_snd_tag_alloc(if_t ifp,
679     union if_snd_tag_alloc_params *params,
680     struct m_snd_tag **ppmt)
681 {
682 	struct mlx5e_iq *iq;
683 	struct mlx5e_priv *priv;
684 	struct mlx5e_tls_rx_tag *ptag;
685 	struct mlx5_flow_handle *flow_rule;
686 	const struct tls_session_params *en;
687 	uint32_t value;
688 	int error;
689 
690 	priv = if_getsoftc(ifp);
691 
692 	if (unlikely(priv->gone != 0 || priv->tls_rx.init == 0 ||
693 	    params->hdr.flowtype == M_HASHTYPE_NONE))
694 		return (EOPNOTSUPP);
695 
696 	/* allocate new tag from zone, if any */
697 	ptag = uma_zalloc(priv->tls_rx.zone, M_NOWAIT);
698 	if (ptag == NULL)
699 		return (ENOMEM);
700 
701 	/* sanity check default values */
702 	MPASS(ptag->dek_index == 0);
703 	MPASS(ptag->dek_index_ok == 0);
704 
705 	/* setup TLS RX tag */
706 	ptag->tls_rx = &priv->tls_rx;
707 	ptag->flowtype = params->hdr.flowtype;
708 	ptag->flowid = params->hdr.flowid;
709 
710 	value = atomic_fetchadd_32(&priv->tls_rx.num_resources, 1U);
711 
712 	/* check resource limits */
713 	if (value >= priv->tls_rx.max_resources) {
714 		error = ENOMEM;
715 		goto failure;
716 	}
717 
718 	en = &params->tls_rx.tls->params;
719 
720 	/* only TLS v1.2 and v1.3 is currently supported */
721 	if (en->tls_vmajor != TLS_MAJOR_VER_ONE ||
722 	    (en->tls_vminor != TLS_MINOR_VER_TWO
723 #ifdef TLS_MINOR_VER_THREE
724 	     && en->tls_vminor != TLS_MINOR_VER_THREE
725 #endif
726 	     )) {
727 		error = EPROTONOSUPPORT;
728 		goto failure;
729 	}
730 
731 	switch (en->cipher_algorithm) {
732 	case CRYPTO_AES_NIST_GCM_16:
733 		switch (en->cipher_key_len) {
734 		case 128 / 8:
735 			if (en->tls_vminor == TLS_MINOR_VER_TWO) {
736 				if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) {
737 					error = EPROTONOSUPPORT;
738 					goto failure;
739 				}
740 			} else {
741 				if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_128) == 0) {
742 					error = EPROTONOSUPPORT;
743 					goto failure;
744 				}
745 			}
746 			error = mlx5e_tls_rx_set_params(
747 			    ptag->crypto_params, params->tls_rx.inp, en);
748 			if (error)
749 				goto failure;
750 			break;
751 
752 		case 256 / 8:
753 			if (en->tls_vminor == TLS_MINOR_VER_TWO) {
754 				if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) {
755 					error = EPROTONOSUPPORT;
756 					goto failure;
757 				}
758 			} else {
759 				if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_256) == 0) {
760 					error = EPROTONOSUPPORT;
761 					goto failure;
762 				}
763 			}
764 			error = mlx5e_tls_rx_set_params(
765 			    ptag->crypto_params, params->tls_rx.inp, en);
766 			if (error)
767 				goto failure;
768 			break;
769 
770 		default:
771 			error = EINVAL;
772 			goto failure;
773 		}
774 		break;
775 	default:
776 		error = EPROTONOSUPPORT;
777 		goto failure;
778 	}
779 
780 	/* store pointer to mbuf tag */
781 	MPASS(ptag->tag.refcount == 0);
782 	m_snd_tag_init(&ptag->tag, ifp, &mlx5e_tls_rx_snd_tag_sw);
783 	*ppmt = &ptag->tag;
784 
785 	/* reset state */
786 	ptag->state = MLX5E_TLS_RX_ST_INIT;
787 
788 	queue_work(priv->tls_rx.wq, &ptag->work);
789 	flush_work(&ptag->work);
790 
791 	/* check that worker task completed successfully */
792 	MLX5E_TLS_RX_TAG_LOCK(ptag);
793 	if (ptag->state == MLX5E_TLS_RX_ST_SETUP) {
794 		ptag->state = MLX5E_TLS_RX_ST_READY;
795 		error = 0;
796 	} else {
797 		error = ENOMEM;
798 	}
799 	MLX5E_TLS_RX_TAG_UNLOCK(ptag);
800 
801 	if (unlikely(error))
802 		goto cleanup;
803 
804 	iq = mlx5e_tls_rx_get_iq(priv, ptag->flowid, ptag->flowtype);
805 
806 	/* establish connection between DEK and TIR */
807 	if (mlx5e_tls_rx_send_static_parameters(iq, ptag) != 0) {
808 		MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
809 		error = ENOMEM;
810 		goto cleanup;
811 	}
812 
813 	MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params, progress.auth_state,
814 	    MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
815 	MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params, progress.record_tracker_state,
816 	    MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
817 
818 	/* reset state to all zeros */
819 	if (mlx5e_tls_rx_send_progress_parameters_sync(iq, ptag) != 0) {
820 		MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
821 		error = ENOMEM;
822 		goto cleanup;
823 	}
824 
825 	if (if_getpcp(ifp) != IFNET_PCP_NONE || params->tls_rx.vlan_id != 0) {
826 		/* create flow rule for TLS RX traffic (tagged) */
827 		flow_rule = mlx5e_accel_fs_add_inpcb(priv, params->tls_rx.inp,
828 		    ptag->tirn, MLX5_FS_DEFAULT_FLOW_TAG, params->tls_rx.vlan_id);
829 	} else {
830 		/* create flow rule for TLS RX traffic (untagged) */
831 		flow_rule = mlx5e_accel_fs_add_inpcb(priv, params->tls_rx.inp,
832 		    ptag->tirn, MLX5_FS_DEFAULT_FLOW_TAG, MLX5E_ACCEL_FS_ADD_INPCB_NO_VLAN);
833 	}
834 
835 	if (IS_ERR_OR_NULL(flow_rule)) {
836 		MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
837 		error = ENOMEM;
838 		goto cleanup;
839 	}
840 
841 	ptag->flow_rule = flow_rule;
842 	init_completion(&ptag->progress_complete);
843 
844 	return (0);
845 
846 cleanup:
847 	m_snd_tag_rele(&ptag->tag);
848 	return (error);
849 
850 failure:
851 	mlx5e_tls_rx_tag_zfree(ptag);
852 	return (error);
853 }
854 
855 
856 /*
857  * This function adds the TCP sequence number and TLS record number in
858  * host endian format to a small database. When TLS records have the
859  * same length, they are simply accumulated by counting instead of
860  * separated entries in the TLS database. The dimension of the
861  * database is such that it cannot store more than 1GByte of
862  * continuous TCP data to avoid issues with TCP sequence number wrap
863  * around. A record length of zero bytes has special meaning and means
864  * that resync completed and all data in the database can be
865  * discarded. This function is called after the TCP stack has
866  * re-assembled all TCP fragments due to out of order packet reception
867  * and all TCP sequence numbers should be sequential.
868  *
869  * This function returns true if a so-called TLS RX resync operation
870  * is in progress. Else no such operation is in progress.
871  */
872 static bool
mlx5e_tls_rx_snd_tag_add_tcp_sequence(struct mlx5e_tls_rx_tag * ptag,uint32_t tcp_sn_he,uint32_t len,uint64_t tls_rcd)873 mlx5e_tls_rx_snd_tag_add_tcp_sequence(struct mlx5e_tls_rx_tag *ptag,
874     uint32_t tcp_sn_he, uint32_t len, uint64_t tls_rcd)
875 {
876 	uint16_t i, j, n;
877 
878 	if (ptag->tcp_resync_active == 0 ||
879 	    ptag->tcp_resync_next != tcp_sn_he ||
880 	    len == 0) {
881 		/* start over again or terminate */
882 		ptag->tcp_resync_active = (len != 0);
883 		ptag->tcp_resync_len[0] = len;
884 		ptag->tcp_resync_num[0] = 1;
885 		ptag->tcp_resync_pc = (len != 0);
886 		ptag->tcp_resync_cc = 0;
887 		ptag->tcp_resync_start = tcp_sn_he;
888 		ptag->rcd_resync_start = tls_rcd;
889 	} else {
890 		i = (ptag->tcp_resync_pc - 1) & (MLX5E_TLS_RX_RESYNC_MAX - 1);
891 		n = ptag->tcp_resync_pc - ptag->tcp_resync_cc;
892 
893 		/* check if same length like last time */
894 		if (ptag->tcp_resync_len[i] == len &&
895 		    ptag->tcp_resync_num[i] != MLX5E_TLS_RX_NUM_MAX) {
896 			/* use existing entry */
897 			ptag->tcp_resync_num[i]++;
898 		} else if (n == MLX5E_TLS_RX_RESYNC_MAX) {
899 			j = ptag->tcp_resync_cc++ & (MLX5E_TLS_RX_RESYNC_MAX - 1);
900 			/* adjust starting TCP sequence number */
901 			ptag->rcd_resync_start += ptag->tcp_resync_num[j];
902 			ptag->tcp_resync_start += ptag->tcp_resync_len[j] * ptag->tcp_resync_num[j];
903 			i = ptag->tcp_resync_pc++ & (MLX5E_TLS_RX_RESYNC_MAX - 1);
904 			/* store new entry */
905 			ptag->tcp_resync_len[i] = len;
906 			ptag->tcp_resync_num[i] = 1;
907 		} else {
908 			i = ptag->tcp_resync_pc++ & (MLX5E_TLS_RX_RESYNC_MAX - 1);
909 			/* add new entry */
910 			ptag->tcp_resync_len[i] = len;
911 			ptag->tcp_resync_num[i] = 1;
912 		}
913 	}
914 
915 	/* store next TCP SN in host endian format */
916 	ptag->tcp_resync_next = tcp_sn_he + len;
917 
918 	return (ptag->tcp_resync_active);
919 }
920 
921 /*
922  * This function checks if the given TCP sequence number points to the
923  * beginning of a valid TLS header.
924  *
925  * Returns true if a match is found. Else false.
926  */
927 static bool
mlx5e_tls_rx_snd_tag_find_tcp_sn_and_tls_rcd(struct mlx5e_tls_rx_tag * ptag,uint32_t tcp_sn_he,uint32_t * p_next_tcp_sn_he,uint64_t * p_tls_rcd)928 mlx5e_tls_rx_snd_tag_find_tcp_sn_and_tls_rcd(struct mlx5e_tls_rx_tag *ptag,
929     uint32_t tcp_sn_he, uint32_t *p_next_tcp_sn_he, uint64_t *p_tls_rcd)
930 {
931 	uint16_t i, j;
932 	uint32_t off = 0;
933 	uint32_t rcd = 0;
934 	uint32_t delta;
935 	uint32_t leap;
936 
937 	for (i = ptag->tcp_resync_cc; i != ptag->tcp_resync_pc; i++) {
938 		delta = tcp_sn_he - off - ptag->tcp_resync_start;
939 
940 		/* check if subtraction went negative */
941 		if ((int32_t)delta < 0)
942 			break;
943 
944 		j = i & (MLX5E_TLS_RX_RESYNC_MAX - 1);
945 		leap = ptag->tcp_resync_len[j] * ptag->tcp_resync_num[j];
946 		if (delta < leap) {
947 			if ((delta % ptag->tcp_resync_len[j]) == 0) {
948 				*p_next_tcp_sn_he = tcp_sn_he +
949 				    ptag->tcp_resync_len[j];
950 				*p_tls_rcd = ptag->rcd_resync_start +
951 				    (uint64_t)rcd +
952 				    (uint64_t)(delta / ptag->tcp_resync_len[j]);
953 				return (true);		/* success */
954 			}
955 			break;	/* invalid offset */
956 		}
957 		rcd += ptag->tcp_resync_num[j];
958 		off += leap;
959 	}
960 	return (false);	/* not found */
961 }
962 
963 /*
964  * This is a callback function from the network stack to keep track of
965  * TLS RX TCP sequence numbers.
966  *
967  * Returns zero on success else an error happened.
968  */
969 static int
mlx5e_tls_rx_snd_tag_modify(struct m_snd_tag * pmt,union if_snd_tag_modify_params * params)970 mlx5e_tls_rx_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
971 {
972 	struct mlx5e_tls_rx_tag *ptag;
973 	struct mlx5e_priv *priv;
974 	struct mlx5e_iq *iq;
975 	int err;
976 
977 	ptag = container_of(pmt, struct mlx5e_tls_rx_tag, tag);
978 	priv = container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx);
979 
980 	if (unlikely(priv->gone != 0))
981 		return (ENXIO);
982 
983 	iq = mlx5e_tls_rx_get_iq(priv, ptag->flowid, ptag->flowtype);
984 
985 	MLX5E_TLS_RX_TAG_LOCK(ptag);
986 
987 	if (mlx5e_tls_rx_snd_tag_add_tcp_sequence(ptag,
988 	    params->tls_rx.tls_hdr_tcp_sn,
989 	    params->tls_rx.tls_rec_length,
990 	    params->tls_rx.tls_seq_number) &&
991 	    ptag->tcp_resync_pending == 0) {
992 		err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag,
993 		    &mlx5e_tls_rx_receive_progress_parameters_cb);
994 		if (err != 0) {
995 			MLX5E_TLS_RX_STAT_INC(ptag, rx_resync_err, 1);
996 		} else {
997 			ptag->tcp_resync_pending = 1;
998 			MLX5E_TLS_RX_STAT_INC(ptag, rx_resync_ok, 1);
999 		}
1000 	} else {
1001 		err = 0;
1002 	}
1003 	MLX5E_TLS_RX_TAG_UNLOCK(ptag);
1004 
1005 	return (-err);
1006 }
1007 
1008 /*
1009  * This function frees a TLS RX tag in a non-blocking way.
1010  */
1011 static void
mlx5e_tls_rx_snd_tag_free(struct m_snd_tag * pmt)1012 mlx5e_tls_rx_snd_tag_free(struct m_snd_tag *pmt)
1013 {
1014 	struct mlx5e_tls_rx_tag *ptag =
1015 	    container_of(pmt, struct mlx5e_tls_rx_tag, tag);
1016 	struct mlx5e_priv *priv;
1017 
1018 	MLX5E_TLS_RX_TAG_LOCK(ptag);
1019 	ptag->state = MLX5E_TLS_RX_ST_RELEASE;
1020 	MLX5E_TLS_RX_TAG_UNLOCK(ptag);
1021 
1022 	priv = if_getsoftc(ptag->tag.ifp);
1023 	queue_work(priv->tls_rx.wq, &ptag->work);
1024 }
1025 
1026 static void
mlx5e_tls_rx_str_status_cb(void * arg)1027 mlx5e_tls_rx_str_status_cb(void *arg)
1028 {
1029 	struct mlx5e_tls_rx_tag *ptag;
1030 
1031 	ptag = (struct mlx5e_tls_rx_tag *)arg;
1032 	complete_all(&ptag->progress_complete);
1033 	m_snd_tag_rele(&ptag->tag);
1034 }
1035 
1036 static int
mlx5e_tls_rx_snd_tag_status_str(struct m_snd_tag * pmt,char * buf,size_t * sz)1037 mlx5e_tls_rx_snd_tag_status_str(struct m_snd_tag *pmt, char *buf, size_t *sz)
1038 {
1039 	int err, out_size;
1040 	struct mlx5e_iq *iq;
1041 	void *buffer;
1042 	uint32_t tracker_state_val;
1043 	uint32_t auth_state_val;
1044 	struct mlx5e_priv *priv;
1045 	struct mlx5e_tls_rx_tag *ptag =
1046 	    container_of(pmt, struct mlx5e_tls_rx_tag, tag);
1047 
1048 	if (buf == NULL)
1049 		return (0);
1050 
1051 	MLX5E_TLS_RX_TAG_LOCK(ptag);
1052 	priv = container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx);
1053 	iq = mlx5e_tls_rx_get_iq(priv, ptag->flowid, ptag->flowtype);
1054 	reinit_completion(&ptag->progress_complete);
1055 	err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag,
1056 	    &mlx5e_tls_rx_str_status_cb);
1057 	MLX5E_TLS_RX_TAG_UNLOCK(ptag);
1058 	if (err != 0)
1059 		return (err);
1060 
1061 	for (;;) {
1062 		if (wait_for_completion_timeout(&ptag->progress_complete,
1063 		    msecs_to_jiffies(1000)) != 0)
1064 			break;
1065 		if (priv->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
1066 		    pci_channel_offline(priv->mdev->pdev) != 0)
1067 			return (ENXIO);
1068 	}
1069 	buffer = mlx5e_tls_rx_get_progress_buffer(ptag);
1070 	tracker_state_val = MLX5_GET(tls_progress_params, buffer,
1071 	    record_tracker_state);
1072 	auth_state_val = MLX5_GET(tls_progress_params, buffer, auth_state);
1073 
1074 	/* Validate tracker state value is in range */
1075 	if (tracker_state_val >
1076 	    MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING)
1077 		return (EINVAL);
1078 
1079 	/* Validate auth state value is in range */
1080 	if (auth_state_val >
1081 	    MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION)
1082 		return (EINVAL);
1083 
1084 	out_size = snprintf(buf, *sz, "tracker_state: %s, auth_state: %s",
1085 	    mlx5e_tls_rx_progress_params_record_tracker_state_str[
1086 		tracker_state_val],
1087 	    mlx5e_tls_rx_progress_params_auth_state_str[auth_state_val]);
1088 
1089 	if (out_size <= *sz)
1090 		*sz = out_size;
1091 	return (0);
1092 }
1093 
1094 #else
1095 
1096 int
mlx5e_tls_rx_init(struct mlx5e_priv * priv)1097 mlx5e_tls_rx_init(struct mlx5e_priv *priv)
1098 {
1099 
1100 	return (0);
1101 }
1102 
1103 void
mlx5e_tls_rx_cleanup(struct mlx5e_priv * priv)1104 mlx5e_tls_rx_cleanup(struct mlx5e_priv *priv)
1105 {
1106 	/* NOP */
1107 }
1108 
1109 #endif
1110