xref: /linux/drivers/net/ethernet/netronome/nfp/crypto/tls.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/ipv6.h>
6 #include <linux/skbuff.h>
7 #include <linux/string.h>
8 #include <net/inet6_hashtables.h>
9 #include <net/tls.h>
10 
11 #include "../ccm.h"
12 #include "../nfp_net.h"
13 #include "crypto.h"
14 #include "fw.h"
15 
16 #define NFP_NET_TLS_CCM_MBOX_OPS_MASK		\
17 	(BIT(NFP_CCM_TYPE_CRYPTO_RESET) |	\
18 	 BIT(NFP_CCM_TYPE_CRYPTO_ADD) |		\
19 	 BIT(NFP_CCM_TYPE_CRYPTO_DEL) |		\
20 	 BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
21 
22 #define NFP_NET_TLS_OPCODE_MASK_RX			\
23 	BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
24 
25 #define NFP_NET_TLS_OPCODE_MASK_TX			\
26 	BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
27 
28 #define NFP_NET_TLS_OPCODE_MASK						\
29 	(NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
30 
31 static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
32 {
33 	u32 off, val;
34 
35 	off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
36 
37 	val = nn_readl(nn, off);
38 	if (on)
39 		val |= BIT(opcode & 31);
40 	else
41 		val &= ~BIT(opcode & 31);
42 	nn_writel(nn, off, val);
43 }
44 
45 static bool
46 __nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
47 			       enum tls_offload_ctx_dir direction)
48 {
49 	u8 opcode;
50 	int cnt;
51 
52 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
53 		opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
54 		nn->ktls_tx_conn_cnt += add;
55 		cnt = nn->ktls_tx_conn_cnt;
56 		nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
57 	} else {
58 		opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
59 		nn->ktls_rx_conn_cnt += add;
60 		cnt = nn->ktls_rx_conn_cnt;
61 	}
62 
63 	/* Care only about 0 -> 1 and 1 -> 0 transitions */
64 	if (cnt > 1)
65 		return false;
66 
67 	nfp_net_crypto_set_op(nn, opcode, cnt);
68 	return true;
69 }
70 
71 static int
72 nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
73 			     enum tls_offload_ctx_dir direction)
74 {
75 	int ret = 0;
76 
77 	/* Use the BAR lock to protect the connection counts */
78 	nn_ctrl_bar_lock(nn);
79 	if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
80 		ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
81 		/* Undo the cnt adjustment if failed */
82 		if (ret)
83 			__nfp_net_tls_conn_cnt_changed(nn, -add, direction);
84 	}
85 	nn_ctrl_bar_unlock(nn);
86 
87 	return ret;
88 }
89 
90 static int
91 nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
92 {
93 	return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
94 }
95 
96 static int
97 nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
98 {
99 	return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
100 }
101 
102 static struct sk_buff *
103 nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
104 {
105 	return nfp_ccm_mbox_msg_alloc(nn, req_sz,
106 				      sizeof(struct nfp_crypto_reply_simple),
107 				      flags);
108 }
109 
110 static int
111 nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
112 			       const char *name, enum nfp_ccm_type type)
113 {
114 	struct nfp_crypto_reply_simple *reply;
115 	int err;
116 
117 	err = __nfp_ccm_mbox_communicate(nn, skb, type,
118 					 sizeof(*reply), sizeof(*reply),
119 					 type == NFP_CCM_TYPE_CRYPTO_DEL);
120 	if (err) {
121 		nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
122 		return err;
123 	}
124 
125 	reply = (void *)skb->data;
126 	err = -be32_to_cpu(reply->error);
127 	if (err)
128 		nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
129 			   name, err);
130 	dev_consume_skb_any(skb);
131 
132 	return err;
133 }
134 
135 static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
136 {
137 	struct nfp_crypto_req_del *req;
138 	struct sk_buff *skb;
139 
140 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
141 	if (!skb)
142 		return;
143 
144 	req = (void *)skb->data;
145 	req->ep_id = 0;
146 	memcpy(req->handle, fw_handle, sizeof(req->handle));
147 
148 	nfp_net_tls_communicate_simple(nn, skb, "delete",
149 				       NFP_CCM_TYPE_CRYPTO_DEL);
150 }
151 
152 static void
153 nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
154 {
155 	front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
156 					FIELD_PREP(NFP_NET_TLS_VLAN,
157 						   NFP_NET_TLS_VLAN_UNUSED));
158 }
159 
160 static void
161 nfp_net_tls_assign_conn_id(struct nfp_net *nn,
162 			   struct nfp_crypto_req_add_front *front)
163 {
164 	u32 len;
165 	u64 id;
166 
167 	id = atomic64_inc_return(&nn->ktls_conn_id_gen);
168 	len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
169 
170 	memcpy(front->l3_addrs, &id, sizeof(id));
171 	memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
172 }
173 
174 static struct nfp_crypto_req_add_back *
175 nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
176 		     struct sock *sk, int direction)
177 {
178 	struct inet_sock *inet = inet_sk(sk);
179 
180 	req->front.key_len += sizeof(__be32) * 2;
181 
182 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
183 		nfp_net_tls_assign_conn_id(nn,
184 			container_of(&req->front,
185 				     struct nfp_crypto_req_add_front, __hdr));
186 	} else {
187 		req->src_ip = inet->inet_daddr;
188 		req->dst_ip = inet->inet_saddr;
189 	}
190 
191 	return &req->back;
192 }
193 
194 static struct nfp_crypto_req_add_back *
195 nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
196 		     struct sock *sk, int direction)
197 {
198 #if IS_ENABLED(CONFIG_IPV6)
199 	struct ipv6_pinfo *np = inet6_sk(sk);
200 
201 	req->front.key_len += sizeof(struct in6_addr) * 2;
202 
203 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
204 		nfp_net_tls_assign_conn_id(nn,
205 			container_of(&req->front,
206 				     struct nfp_crypto_req_add_front, __hdr));
207 	} else {
208 		memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
209 		memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
210 	}
211 
212 #endif
213 	return &req->back;
214 }
215 
216 static void
217 nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
218 		   struct nfp_crypto_req_add_back *back, struct sock *sk,
219 		   int direction)
220 {
221 	struct inet_sock *inet = inet_sk(sk);
222 
223 	front->l4_proto = IPPROTO_TCP;
224 
225 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
226 		back->src_port = 0;
227 		back->dst_port = 0;
228 	} else {
229 		back->src_port = inet->inet_dport;
230 		back->dst_port = inet->inet_sport;
231 	}
232 }
233 
234 static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
235 {
236 	switch (direction) {
237 	case TLS_OFFLOAD_CTX_DIR_TX:
238 		return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
239 	case TLS_OFFLOAD_CTX_DIR_RX:
240 		return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
241 	default:
242 		WARN_ON_ONCE(1);
243 		return 0;
244 	}
245 }
246 
247 static bool
248 nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
249 			 enum tls_offload_ctx_dir direction)
250 {
251 	u8 bit;
252 
253 	switch (cipher_type) {
254 	case TLS_CIPHER_AES_GCM_128:
255 		if (direction == TLS_OFFLOAD_CTX_DIR_TX)
256 			bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
257 		else
258 			bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
259 		break;
260 	default:
261 		return false;
262 	}
263 
264 	return nn->tlv_caps.crypto_ops & BIT(bit);
265 }
266 
267 static int
268 nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
269 		enum tls_offload_ctx_dir direction,
270 		struct tls_crypto_info *crypto_info,
271 		u32 start_offload_tcp_sn)
272 {
273 	struct tls12_crypto_info_aes_gcm_128 *tls_ci;
274 	struct nfp_net *nn = netdev_priv(netdev);
275 	struct nfp_crypto_req_add_front *front;
276 	struct nfp_net_tls_offload_ctx *ntls;
277 	struct nfp_crypto_req_add_back *back;
278 	struct nfp_crypto_reply_add *reply;
279 	struct sk_buff *skb;
280 	size_t req_sz;
281 	void *req;
282 	bool ipv6;
283 	int err;
284 
285 	BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
286 		     TLS_DRIVER_STATE_SIZE_TX);
287 	BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
288 		     TLS_DRIVER_STATE_SIZE_RX);
289 
290 	if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
291 		return -EOPNOTSUPP;
292 
293 	switch (sk->sk_family) {
294 #if IS_ENABLED(CONFIG_IPV6)
295 	case AF_INET6:
296 		if (ipv6_only_sock(sk) ||
297 		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
298 			req_sz = sizeof(struct nfp_crypto_req_add_v6);
299 			ipv6 = true;
300 			break;
301 		}
302 		fallthrough;
303 #endif
304 	case AF_INET:
305 		req_sz = sizeof(struct nfp_crypto_req_add_v4);
306 		ipv6 = false;
307 		break;
308 	default:
309 		return -EOPNOTSUPP;
310 	}
311 
312 	err = nfp_net_tls_conn_add(nn, direction);
313 	if (err)
314 		return err;
315 
316 	skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
317 	if (!skb) {
318 		err = -ENOMEM;
319 		goto err_conn_remove;
320 	}
321 
322 	front = (void *)skb->data;
323 	front->ep_id = 0;
324 	front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
325 	front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
326 	memset(front->resv, 0, sizeof(front->resv));
327 
328 	nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
329 
330 	req = (void *)skb->data;
331 	if (ipv6)
332 		back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
333 	else
334 		back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
335 
336 	nfp_net_tls_set_l4(front, back, sk, direction);
337 
338 	back->counter = 0;
339 	back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
340 
341 	tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
342 	memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
343 	memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
344 	       sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
345 	memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
346 	memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
347 	memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
348 
349 	/* Get an extra ref on the skb so we can wipe the key after */
350 	skb_get(skb);
351 
352 	err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
353 				       sizeof(*reply), sizeof(*reply));
354 	reply = (void *)skb->data;
355 
356 	/* We depend on CCM MBOX code not reallocating skb we sent
357 	 * so we can clear the key material out of the memory.
358 	 */
359 	if (!WARN_ON_ONCE((u8 *)back < skb->head ||
360 			  (u8 *)back > skb_end_pointer(skb)) &&
361 	    !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
362 		memzero_explicit(back, sizeof(*back));
363 	dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
364 
365 	if (err) {
366 		nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
367 			   err, direction == TLS_OFFLOAD_CTX_DIR_TX);
368 		/* communicate frees skb on error */
369 		goto err_conn_remove;
370 	}
371 
372 	err = -be32_to_cpu(reply->error);
373 	if (err) {
374 		if (err == -ENOSPC) {
375 			if (!atomic_fetch_inc(&nn->ktls_no_space))
376 				nn_info(nn, "HW TLS table full\n");
377 		} else {
378 			nn_dp_warn(&nn->dp,
379 				   "failed to add TLS, FW replied: %d\n", err);
380 		}
381 		goto err_free_skb;
382 	}
383 
384 	if (!reply->handle[0] && !reply->handle[1]) {
385 		nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
386 		err = -EINVAL;
387 		goto err_fw_remove;
388 	}
389 
390 	ntls = tls_driver_ctx(sk, direction);
391 	memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
392 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
393 		ntls->next_seq = start_offload_tcp_sn;
394 	dev_consume_skb_any(skb);
395 
396 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
397 		return 0;
398 
399 	if (!nn->tlv_caps.tls_resync_ss)
400 		tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
401 
402 	return 0;
403 
404 err_fw_remove:
405 	nfp_net_tls_del_fw(nn, reply->handle);
406 err_free_skb:
407 	dev_consume_skb_any(skb);
408 err_conn_remove:
409 	nfp_net_tls_conn_remove(nn, direction);
410 	return err;
411 }
412 
413 static void
414 nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
415 		enum tls_offload_ctx_dir direction)
416 {
417 	struct nfp_net *nn = netdev_priv(netdev);
418 	struct nfp_net_tls_offload_ctx *ntls;
419 
420 	nfp_net_tls_conn_remove(nn, direction);
421 
422 	ntls = __tls_driver_ctx(tls_ctx, direction);
423 	nfp_net_tls_del_fw(nn, ntls->fw_handle);
424 }
425 
426 static int
427 nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
428 		   u8 *rcd_sn, enum tls_offload_ctx_dir direction)
429 {
430 	struct nfp_net *nn = netdev_priv(netdev);
431 	struct nfp_net_tls_offload_ctx *ntls;
432 	struct nfp_crypto_req_update *req;
433 	enum nfp_ccm_type type;
434 	struct sk_buff *skb;
435 	gfp_t flags;
436 	int err;
437 
438 	flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
439 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
440 	if (!skb)
441 		return -ENOMEM;
442 
443 	ntls = tls_driver_ctx(sk, direction);
444 	req = (void *)skb->data;
445 	req->ep_id = 0;
446 	req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
447 	memset(req->resv, 0, sizeof(req->resv));
448 	memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
449 	req->tcp_seq = cpu_to_be32(seq);
450 	memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
451 
452 	type = NFP_CCM_TYPE_CRYPTO_UPDATE;
453 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
454 		err = nfp_net_tls_communicate_simple(nn, skb, "sync", type);
455 		if (err)
456 			return err;
457 		ntls->next_seq = seq;
458 	} else {
459 		if (nn->tlv_caps.tls_resync_ss)
460 			type = NFP_CCM_TYPE_CRYPTO_RESYNC;
461 		nfp_ccm_mbox_post(nn, skb, type,
462 				  sizeof(struct nfp_crypto_reply_simple));
463 		atomic_inc(&nn->ktls_rx_resync_sent);
464 	}
465 
466 	return 0;
467 }
468 
469 static const struct tlsdev_ops nfp_net_tls_ops = {
470 	.tls_dev_add = nfp_net_tls_add,
471 	.tls_dev_del = nfp_net_tls_del,
472 	.tls_dev_resync = nfp_net_tls_resync,
473 };
474 
475 int nfp_net_tls_rx_resync_req(struct net_device *netdev,
476 			      struct nfp_net_tls_resync_req *req,
477 			      void *pkt, unsigned int pkt_len)
478 {
479 	struct nfp_net *nn = netdev_priv(netdev);
480 	struct nfp_net_tls_offload_ctx *ntls;
481 	struct net *net = dev_net(netdev);
482 	struct ipv6hdr *ipv6h;
483 	struct tcphdr *th;
484 	struct iphdr *iph;
485 	struct sock *sk;
486 	__be32 tcp_seq;
487 	int err;
488 
489 	iph = pkt + req->l3_offset;
490 	ipv6h = pkt + req->l3_offset;
491 	th = pkt + req->l4_offset;
492 
493 	if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) {
494 		netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n",
495 				 req->l3_offset, req->l4_offset, pkt_len);
496 		err = -EINVAL;
497 		goto err_cnt_ign;
498 	}
499 
500 	switch (ipv6h->version) {
501 	case 4:
502 		sk = inet_lookup_established(net, iph->saddr, th->source,
503 					     iph->daddr, th->dest,
504 					     netdev->ifindex);
505 		break;
506 #if IS_ENABLED(CONFIG_IPV6)
507 	case 6:
508 		sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
509 						&ipv6h->daddr, ntohs(th->dest),
510 						netdev->ifindex, 0);
511 		break;
512 #endif
513 	default:
514 		netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n",
515 				 req->l3_offset, req->l4_offset, iph->version);
516 		err = -EINVAL;
517 		goto err_cnt_ign;
518 	}
519 
520 	err = 0;
521 	if (!sk)
522 		goto err_cnt_ign;
523 	if (!tls_is_sk_rx_device_offloaded(sk) ||
524 	    sk->sk_shutdown & RCV_SHUTDOWN)
525 		goto err_put_sock;
526 
527 	ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
528 	/* some FW versions can't report the handle and report 0s */
529 	if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) &&
530 	    memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle)))
531 		goto err_put_sock;
532 
533 	/* copy to ensure alignment */
534 	memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq));
535 	tls_offload_rx_resync_request(sk, tcp_seq);
536 	atomic_inc(&nn->ktls_rx_resync_req);
537 
538 	sock_gen_put(sk);
539 	return 0;
540 
541 err_put_sock:
542 	sock_gen_put(sk);
543 err_cnt_ign:
544 	atomic_inc(&nn->ktls_rx_resync_ign);
545 	return err;
546 }
547 
548 static int nfp_net_tls_reset(struct nfp_net *nn)
549 {
550 	struct nfp_crypto_req_reset *req;
551 	struct sk_buff *skb;
552 
553 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
554 	if (!skb)
555 		return -ENOMEM;
556 
557 	req = (void *)skb->data;
558 	req->ep_id = 0;
559 
560 	return nfp_net_tls_communicate_simple(nn, skb, "reset",
561 					      NFP_CCM_TYPE_CRYPTO_RESET);
562 }
563 
564 int nfp_net_tls_init(struct nfp_net *nn)
565 {
566 	struct net_device *netdev = nn->dp.netdev;
567 	int err;
568 
569 	if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
570 		return 0;
571 
572 	if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
573 	    NFP_NET_TLS_CCM_MBOX_OPS_MASK)
574 		return 0;
575 
576 	if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
577 		nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
578 			nn->tlv_caps.mbox_len);
579 		return 0;
580 	}
581 
582 	err = nfp_net_tls_reset(nn);
583 	if (err)
584 		return err;
585 
586 	nn_ctrl_bar_lock(nn);
587 	nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
588 	err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
589 	nn_ctrl_bar_unlock(nn);
590 	if (err)
591 		return err;
592 
593 	if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
594 		netdev->hw_features |= NETIF_F_HW_TLS_RX;
595 		netdev->features |= NETIF_F_HW_TLS_RX;
596 	}
597 	if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
598 		netdev->hw_features |= NETIF_F_HW_TLS_TX;
599 		netdev->features |= NETIF_F_HW_TLS_TX;
600 	}
601 
602 	netdev->tlsdev_ops = &nfp_net_tls_ops;
603 
604 	return 0;
605 }
606