xref: /linux/drivers/net/ethernet/netronome/nfp/crypto/tls.c (revision b5d9a834f4fd1b6abfa527ec351c871084dd23a3)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/ipv6.h>
6 #include <linux/skbuff.h>
7 #include <net/tls.h>
8 
9 #include "../ccm.h"
10 #include "../nfp_net.h"
11 #include "crypto.h"
12 #include "fw.h"
13 
14 #define NFP_NET_TLS_CCM_MBOX_OPS_MASK		\
15 	(BIT(NFP_CCM_TYPE_CRYPTO_RESET) |	\
16 	 BIT(NFP_CCM_TYPE_CRYPTO_ADD) |		\
17 	 BIT(NFP_CCM_TYPE_CRYPTO_DEL) |		\
18 	 BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
19 
20 #define NFP_NET_TLS_OPCODE_MASK_RX			\
21 	BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
22 
23 #define NFP_NET_TLS_OPCODE_MASK_TX			\
24 	BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
25 
26 #define NFP_NET_TLS_OPCODE_MASK						\
27 	(NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
28 
29 static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
30 {
31 	u32 off, val;
32 
33 	off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
34 
35 	val = nn_readl(nn, off);
36 	if (on)
37 		val |= BIT(opcode & 31);
38 	else
39 		val &= ~BIT(opcode & 31);
40 	nn_writel(nn, off, val);
41 }
42 
43 static bool
44 __nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
45 			       enum tls_offload_ctx_dir direction)
46 {
47 	u8 opcode;
48 	int cnt;
49 
50 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
51 		opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
52 		nn->ktls_tx_conn_cnt += add;
53 		cnt = nn->ktls_tx_conn_cnt;
54 		nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
55 	} else {
56 		opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
57 		nn->ktls_rx_conn_cnt += add;
58 		cnt = nn->ktls_rx_conn_cnt;
59 	}
60 
61 	/* Care only about 0 -> 1 and 1 -> 0 transitions */
62 	if (cnt > 1)
63 		return false;
64 
65 	nfp_net_crypto_set_op(nn, opcode, cnt);
66 	return true;
67 }
68 
69 static int
70 nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
71 			     enum tls_offload_ctx_dir direction)
72 {
73 	int ret = 0;
74 
75 	/* Use the BAR lock to protect the connection counts */
76 	nn_ctrl_bar_lock(nn);
77 	if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
78 		ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
79 		/* Undo the cnt adjustment if failed */
80 		if (ret)
81 			__nfp_net_tls_conn_cnt_changed(nn, -add, direction);
82 	}
83 	nn_ctrl_bar_unlock(nn);
84 
85 	return ret;
86 }
87 
88 static int
89 nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
90 {
91 	return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
92 }
93 
94 static int
95 nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
96 {
97 	return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
98 }
99 
100 static struct sk_buff *
101 nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
102 {
103 	return nfp_ccm_mbox_msg_alloc(nn, req_sz,
104 				      sizeof(struct nfp_crypto_reply_simple),
105 				      flags);
106 }
107 
108 static int
109 nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
110 			       const char *name, enum nfp_ccm_type type)
111 {
112 	struct nfp_crypto_reply_simple *reply;
113 	int err;
114 
115 	err = __nfp_ccm_mbox_communicate(nn, skb, type,
116 					 sizeof(*reply), sizeof(*reply),
117 					 type == NFP_CCM_TYPE_CRYPTO_DEL);
118 	if (err) {
119 		nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
120 		return err;
121 	}
122 
123 	reply = (void *)skb->data;
124 	err = -be32_to_cpu(reply->error);
125 	if (err)
126 		nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
127 			   name, err);
128 	dev_consume_skb_any(skb);
129 
130 	return err;
131 }
132 
133 static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
134 {
135 	struct nfp_crypto_req_del *req;
136 	struct sk_buff *skb;
137 
138 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
139 	if (!skb)
140 		return;
141 
142 	req = (void *)skb->data;
143 	req->ep_id = 0;
144 	memcpy(req->handle, fw_handle, sizeof(req->handle));
145 
146 	nfp_net_tls_communicate_simple(nn, skb, "delete",
147 				       NFP_CCM_TYPE_CRYPTO_DEL);
148 }
149 
150 static void
151 nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
152 {
153 	front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
154 					FIELD_PREP(NFP_NET_TLS_VLAN,
155 						   NFP_NET_TLS_VLAN_UNUSED));
156 }
157 
158 static void
159 nfp_net_tls_assign_conn_id(struct nfp_net *nn,
160 			   struct nfp_crypto_req_add_front *front)
161 {
162 	u32 len;
163 	u64 id;
164 
165 	id = atomic64_inc_return(&nn->ktls_conn_id_gen);
166 	len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
167 
168 	memcpy(front->l3_addrs, &id, sizeof(id));
169 	memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
170 }
171 
172 static struct nfp_crypto_req_add_back *
173 nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
174 		     struct sock *sk, int direction)
175 {
176 	struct inet_sock *inet = inet_sk(sk);
177 
178 	req->front.key_len += sizeof(__be32) * 2;
179 
180 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
181 		nfp_net_tls_assign_conn_id(nn, &req->front);
182 	} else {
183 		req->src_ip = inet->inet_daddr;
184 		req->dst_ip = inet->inet_saddr;
185 	}
186 
187 	return &req->back;
188 }
189 
190 static struct nfp_crypto_req_add_back *
191 nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
192 		     struct sock *sk, int direction)
193 {
194 #if IS_ENABLED(CONFIG_IPV6)
195 	struct ipv6_pinfo *np = inet6_sk(sk);
196 
197 	req->front.key_len += sizeof(struct in6_addr) * 2;
198 
199 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
200 		nfp_net_tls_assign_conn_id(nn, &req->front);
201 	} else {
202 		memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
203 		memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
204 	}
205 
206 #endif
207 	return &req->back;
208 }
209 
210 static void
211 nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
212 		   struct nfp_crypto_req_add_back *back, struct sock *sk,
213 		   int direction)
214 {
215 	struct inet_sock *inet = inet_sk(sk);
216 
217 	front->l4_proto = IPPROTO_TCP;
218 
219 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
220 		back->src_port = 0;
221 		back->dst_port = 0;
222 	} else {
223 		back->src_port = inet->inet_dport;
224 		back->dst_port = inet->inet_sport;
225 	}
226 }
227 
228 static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
229 {
230 	switch (direction) {
231 	case TLS_OFFLOAD_CTX_DIR_TX:
232 		return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
233 	case TLS_OFFLOAD_CTX_DIR_RX:
234 		return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
235 	default:
236 		WARN_ON_ONCE(1);
237 		return 0;
238 	}
239 }
240 
241 static bool
242 nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
243 			 enum tls_offload_ctx_dir direction)
244 {
245 	u8 bit;
246 
247 	switch (cipher_type) {
248 	case TLS_CIPHER_AES_GCM_128:
249 		if (direction == TLS_OFFLOAD_CTX_DIR_TX)
250 			bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
251 		else
252 			bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
253 		break;
254 	default:
255 		return false;
256 	}
257 
258 	return nn->tlv_caps.crypto_ops & BIT(bit);
259 }
260 
261 static int
262 nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
263 		enum tls_offload_ctx_dir direction,
264 		struct tls_crypto_info *crypto_info,
265 		u32 start_offload_tcp_sn)
266 {
267 	struct tls12_crypto_info_aes_gcm_128 *tls_ci;
268 	struct nfp_net *nn = netdev_priv(netdev);
269 	struct nfp_crypto_req_add_front *front;
270 	struct nfp_net_tls_offload_ctx *ntls;
271 	struct nfp_crypto_req_add_back *back;
272 	struct nfp_crypto_reply_add *reply;
273 	struct sk_buff *skb;
274 	size_t req_sz;
275 	void *req;
276 	bool ipv6;
277 	int err;
278 
279 	BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
280 		     TLS_DRIVER_STATE_SIZE_TX);
281 	BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
282 		     TLS_DRIVER_STATE_SIZE_RX);
283 
284 	if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
285 		return -EOPNOTSUPP;
286 
287 	switch (sk->sk_family) {
288 #if IS_ENABLED(CONFIG_IPV6)
289 	case AF_INET6:
290 		if (sk->sk_ipv6only ||
291 		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
292 			req_sz = sizeof(struct nfp_crypto_req_add_v6);
293 			ipv6 = true;
294 			break;
295 		}
296 #endif
297 		/* fall through */
298 	case AF_INET:
299 		req_sz = sizeof(struct nfp_crypto_req_add_v4);
300 		ipv6 = false;
301 		break;
302 	default:
303 		return -EOPNOTSUPP;
304 	}
305 
306 	err = nfp_net_tls_conn_add(nn, direction);
307 	if (err)
308 		return err;
309 
310 	skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
311 	if (!skb) {
312 		err = -ENOMEM;
313 		goto err_conn_remove;
314 	}
315 
316 	front = (void *)skb->data;
317 	front->ep_id = 0;
318 	front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
319 	front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
320 	memset(front->resv, 0, sizeof(front->resv));
321 
322 	nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
323 
324 	req = (void *)skb->data;
325 	if (ipv6)
326 		back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
327 	else
328 		back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
329 
330 	nfp_net_tls_set_l4(front, back, sk, direction);
331 
332 	back->counter = 0;
333 	back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
334 
335 	tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
336 	memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
337 	memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
338 	       sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
339 	memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
340 	memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
341 	memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
342 
343 	err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
344 				       sizeof(*reply), sizeof(*reply));
345 	if (err) {
346 		nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
347 			   err, direction == TLS_OFFLOAD_CTX_DIR_TX);
348 		/* communicate frees skb on error */
349 		goto err_conn_remove;
350 	}
351 
352 	reply = (void *)skb->data;
353 	err = -be32_to_cpu(reply->error);
354 	if (err) {
355 		if (err == -ENOSPC) {
356 			if (!atomic_fetch_inc(&nn->ktls_no_space))
357 				nn_info(nn, "HW TLS table full\n");
358 		} else {
359 			nn_dp_warn(&nn->dp,
360 				   "failed to add TLS, FW replied: %d\n", err);
361 		}
362 		goto err_free_skb;
363 	}
364 
365 	if (!reply->handle[0] && !reply->handle[1]) {
366 		nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
367 		err = -EINVAL;
368 		goto err_fw_remove;
369 	}
370 
371 	ntls = tls_driver_ctx(sk, direction);
372 	memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
373 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
374 		ntls->next_seq = start_offload_tcp_sn;
375 	dev_consume_skb_any(skb);
376 
377 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
378 		return 0;
379 
380 	tls_offload_rx_resync_set_type(sk,
381 				       TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
382 	return 0;
383 
384 err_fw_remove:
385 	nfp_net_tls_del_fw(nn, reply->handle);
386 err_free_skb:
387 	dev_consume_skb_any(skb);
388 err_conn_remove:
389 	nfp_net_tls_conn_remove(nn, direction);
390 	return err;
391 }
392 
393 static void
394 nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
395 		enum tls_offload_ctx_dir direction)
396 {
397 	struct nfp_net *nn = netdev_priv(netdev);
398 	struct nfp_net_tls_offload_ctx *ntls;
399 
400 	nfp_net_tls_conn_remove(nn, direction);
401 
402 	ntls = __tls_driver_ctx(tls_ctx, direction);
403 	nfp_net_tls_del_fw(nn, ntls->fw_handle);
404 }
405 
406 static int
407 nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
408 		   u8 *rcd_sn, enum tls_offload_ctx_dir direction)
409 {
410 	struct nfp_net *nn = netdev_priv(netdev);
411 	struct nfp_net_tls_offload_ctx *ntls;
412 	struct nfp_crypto_req_update *req;
413 	struct sk_buff *skb;
414 	gfp_t flags;
415 	int err;
416 
417 	flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
418 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
419 	if (!skb)
420 		return -ENOMEM;
421 
422 	ntls = tls_driver_ctx(sk, direction);
423 	req = (void *)skb->data;
424 	req->ep_id = 0;
425 	req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
426 	memset(req->resv, 0, sizeof(req->resv));
427 	memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
428 	req->tcp_seq = cpu_to_be32(seq);
429 	memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
430 
431 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
432 		err = nfp_net_tls_communicate_simple(nn, skb, "sync",
433 						     NFP_CCM_TYPE_CRYPTO_UPDATE);
434 		if (err)
435 			return err;
436 		ntls->next_seq = seq;
437 	} else {
438 		nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
439 				  sizeof(struct nfp_crypto_reply_simple));
440 	}
441 
442 	return 0;
443 }
444 
445 static const struct tlsdev_ops nfp_net_tls_ops = {
446 	.tls_dev_add = nfp_net_tls_add,
447 	.tls_dev_del = nfp_net_tls_del,
448 	.tls_dev_resync = nfp_net_tls_resync,
449 };
450 
451 static int nfp_net_tls_reset(struct nfp_net *nn)
452 {
453 	struct nfp_crypto_req_reset *req;
454 	struct sk_buff *skb;
455 
456 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
457 	if (!skb)
458 		return -ENOMEM;
459 
460 	req = (void *)skb->data;
461 	req->ep_id = 0;
462 
463 	return nfp_net_tls_communicate_simple(nn, skb, "reset",
464 					      NFP_CCM_TYPE_CRYPTO_RESET);
465 }
466 
467 int nfp_net_tls_init(struct nfp_net *nn)
468 {
469 	struct net_device *netdev = nn->dp.netdev;
470 	int err;
471 
472 	if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
473 		return 0;
474 
475 	if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
476 	    NFP_NET_TLS_CCM_MBOX_OPS_MASK)
477 		return 0;
478 
479 	if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
480 		nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
481 			nn->tlv_caps.mbox_len);
482 		return 0;
483 	}
484 
485 	err = nfp_net_tls_reset(nn);
486 	if (err)
487 		return err;
488 
489 	nn_ctrl_bar_lock(nn);
490 	nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
491 	err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
492 	nn_ctrl_bar_unlock(nn);
493 	if (err)
494 		return err;
495 
496 	if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
497 		netdev->hw_features |= NETIF_F_HW_TLS_RX;
498 		netdev->features |= NETIF_F_HW_TLS_RX;
499 	}
500 	if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
501 		netdev->hw_features |= NETIF_F_HW_TLS_TX;
502 		netdev->features |= NETIF_F_HW_TLS_TX;
503 	}
504 
505 	netdev->tlsdev_ops = &nfp_net_tls_ops;
506 
507 	return 0;
508 }
509