xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include <linux/debugfs.h>
5 #include "en.h"
6 #include "lib/mlx5.h"
7 #include "lib/crypto.h"
8 #include "en_accel/ktls.h"
9 #include "en_accel/ktls_utils.h"
10 #include "en_accel/fs_tcp.h"
11 
mlx5_ktls_create_key(struct mlx5_crypto_dek_pool * dek_pool,struct tls_crypto_info * crypto_info)12 struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
13 					     struct tls_crypto_info *crypto_info)
14 {
15 	const void *key;
16 	u32 sz_bytes;
17 
18 	switch (crypto_info->cipher_type) {
19 	case TLS_CIPHER_AES_GCM_128: {
20 		struct tls12_crypto_info_aes_gcm_128 *info =
21 			(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
22 
23 		key      = info->key;
24 		sz_bytes = sizeof(info->key);
25 		break;
26 	}
27 	case TLS_CIPHER_AES_GCM_256: {
28 		struct tls12_crypto_info_aes_gcm_256 *info =
29 			(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
30 
31 		key      = info->key;
32 		sz_bytes = sizeof(info->key);
33 		break;
34 	}
35 	default:
36 		return ERR_PTR(-EINVAL);
37 	}
38 
39 	return mlx5_crypto_dek_create(dek_pool, key, sz_bytes);
40 }
41 
mlx5_ktls_destroy_key(struct mlx5_crypto_dek_pool * dek_pool,struct mlx5_crypto_dek * dek)42 void mlx5_ktls_destroy_key(struct mlx5_crypto_dek_pool *dek_pool,
43 			   struct mlx5_crypto_dek *dek)
44 {
45 	mlx5_crypto_dek_destroy(dek_pool, dek);
46 }
47 
mlx5e_ktls_add(struct net_device * netdev,struct sock * sk,enum tls_offload_ctx_dir direction,struct tls_crypto_info * crypto_info,u32 start_offload_tcp_sn)48 static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
49 			  enum tls_offload_ctx_dir direction,
50 			  struct tls_crypto_info *crypto_info,
51 			  u32 start_offload_tcp_sn)
52 {
53 	struct mlx5e_priv *priv = netdev_priv(netdev);
54 	struct mlx5_core_dev *mdev = priv->mdev;
55 	int err;
56 
57 	if (!mlx5e_ktls_type_check(mdev, crypto_info))
58 		return -EOPNOTSUPP;
59 
60 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
61 		err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn);
62 	else
63 		err = mlx5e_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn);
64 
65 	return err;
66 }
67 
mlx5e_ktls_del(struct net_device * netdev,struct tls_context * tls_ctx,enum tls_offload_ctx_dir direction)68 static void mlx5e_ktls_del(struct net_device *netdev,
69 			   struct tls_context *tls_ctx,
70 			   enum tls_offload_ctx_dir direction)
71 {
72 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
73 		mlx5e_ktls_del_tx(netdev, tls_ctx);
74 	else
75 		mlx5e_ktls_del_rx(netdev, tls_ctx);
76 }
77 
mlx5e_ktls_resync(struct net_device * netdev,struct sock * sk,u32 seq,u8 * rcd_sn,enum tls_offload_ctx_dir direction)78 static int mlx5e_ktls_resync(struct net_device *netdev,
79 			     struct sock *sk, u32 seq, u8 *rcd_sn,
80 			     enum tls_offload_ctx_dir direction)
81 {
82 	if (unlikely(direction != TLS_OFFLOAD_CTX_DIR_RX))
83 		return -EOPNOTSUPP;
84 
85 	mlx5e_ktls_rx_resync(netdev, sk, seq, rcd_sn);
86 	return 0;
87 }
88 
89 static const struct tlsdev_ops mlx5e_ktls_ops = {
90 	.tls_dev_add = mlx5e_ktls_add,
91 	.tls_dev_del = mlx5e_ktls_del,
92 	.tls_dev_resync = mlx5e_ktls_resync,
93 };
94 
mlx5e_is_ktls_rx(struct mlx5_core_dev * mdev)95 bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
96 {
97 	u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
98 
99 	if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx) || mlx5_get_sd(mdev))
100 		return false;
101 
102 	/* Check the possibility to post the required ICOSQ WQEs. */
103 	if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS))
104 		return false;
105 	if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS))
106 		return false;
107 	if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_KTLS_GET_PROGRESS_WQEBBS))
108 		return false;
109 
110 	return true;
111 }
112 
mlx5e_ktls_build_netdev(struct mlx5e_priv * priv)113 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
114 {
115 	struct net_device *netdev = priv->netdev;
116 	struct mlx5_core_dev *mdev = priv->mdev;
117 
118 	if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev))
119 		return;
120 
121 	if (mlx5e_is_ktls_tx(mdev)) {
122 		netdev->hw_features |= NETIF_F_HW_TLS_TX;
123 		netdev->features    |= NETIF_F_HW_TLS_TX;
124 	}
125 
126 	if (mlx5e_is_ktls_rx(mdev))
127 		netdev->hw_features |= NETIF_F_HW_TLS_RX;
128 
129 	netdev->tlsdev_ops = &mlx5e_ktls_ops;
130 }
131 
mlx5e_ktls_set_feature_rx(struct net_device * netdev,bool enable)132 int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
133 {
134 	struct mlx5e_priv *priv = netdev_priv(netdev);
135 	int err = 0;
136 
137 	mutex_lock(&priv->state_lock);
138 	if (enable)
139 		err = mlx5e_accel_fs_tcp_create(priv->fs);
140 	else
141 		mlx5e_accel_fs_tcp_destroy(priv->fs);
142 	mutex_unlock(&priv->state_lock);
143 
144 	return err;
145 }
146 
mlx5e_ktls_init_rx(struct mlx5e_priv * priv)147 int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
148 {
149 	int err;
150 
151 	if (!mlx5e_is_ktls_rx(priv->mdev))
152 		return 0;
153 
154 	priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
155 	if (!priv->tls->rx_wq)
156 		return -ENOMEM;
157 
158 	if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
159 		err = mlx5e_accel_fs_tcp_create(priv->fs);
160 		if (err) {
161 			destroy_workqueue(priv->tls->rx_wq);
162 			return err;
163 		}
164 	}
165 
166 	return 0;
167 }
168 
mlx5e_ktls_cleanup_rx(struct mlx5e_priv * priv)169 void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
170 {
171 	if (!mlx5e_is_ktls_rx(priv->mdev))
172 		return;
173 
174 	if (priv->netdev->features & NETIF_F_HW_TLS_RX)
175 		mlx5e_accel_fs_tcp_destroy(priv->fs);
176 
177 	destroy_workqueue(priv->tls->rx_wq);
178 }
179 
mlx5e_tls_debugfs_init(struct mlx5e_tls * tls,struct dentry * dfs_root)180 static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls,
181 				   struct dentry *dfs_root)
182 {
183 	if (IS_ERR_OR_NULL(dfs_root))
184 		return;
185 
186 	tls->debugfs.dfs = debugfs_create_dir("tls", dfs_root);
187 }
188 
mlx5e_ktls_init(struct mlx5e_priv * priv)189 int mlx5e_ktls_init(struct mlx5e_priv *priv)
190 {
191 	struct mlx5e_tls *tls;
192 
193 	if (!mlx5e_is_ktls_device(priv->mdev))
194 		return 0;
195 
196 	tls = kzalloc(sizeof(*tls), GFP_KERNEL);
197 	if (!tls)
198 		return -ENOMEM;
199 	tls->mdev = priv->mdev;
200 
201 	priv->tls = tls;
202 
203 	mlx5e_tls_debugfs_init(tls, priv->dfs_root);
204 
205 	return 0;
206 }
207 
mlx5e_ktls_cleanup(struct mlx5e_priv * priv)208 void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
209 {
210 	struct mlx5e_tls *tls = priv->tls;
211 
212 	if (!mlx5e_is_ktls_device(priv->mdev))
213 		return;
214 
215 	debugfs_remove_recursive(tls->debugfs.dfs);
216 	tls->debugfs.dfs = NULL;
217 
218 	kfree(priv->tls);
219 	priv->tls = NULL;
220 }
221