1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2019 Netronome Systems, Inc. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/ipv6.h> 6 #include <linux/skbuff.h> 7 #include <net/tls.h> 8 9 #include "../ccm.h" 10 #include "../nfp_net.h" 11 #include "crypto.h" 12 #include "fw.h" 13 14 #define NFP_NET_TLS_CCM_MBOX_OPS_MASK \ 15 (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \ 16 BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \ 17 BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \ 18 BIT(NFP_CCM_TYPE_CRYPTO_UPDATE)) 19 20 #define NFP_NET_TLS_OPCODE_MASK_RX \ 21 BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC) 22 23 #define NFP_NET_TLS_OPCODE_MASK_TX \ 24 BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC) 25 26 #define NFP_NET_TLS_OPCODE_MASK \ 27 (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX) 28 29 static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on) 30 { 31 u32 off, val; 32 33 off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4); 34 35 val = nn_readl(nn, off); 36 if (on) 37 val |= BIT(opcode & 31); 38 else 39 val &= ~BIT(opcode & 31); 40 nn_writel(nn, off, val); 41 } 42 43 static bool 44 __nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add, 45 enum tls_offload_ctx_dir direction) 46 { 47 u8 opcode; 48 int cnt; 49 50 if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 51 opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC; 52 nn->ktls_tx_conn_cnt += add; 53 cnt = nn->ktls_tx_conn_cnt; 54 nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt; 55 } else { 56 opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC; 57 nn->ktls_rx_conn_cnt += add; 58 cnt = nn->ktls_rx_conn_cnt; 59 } 60 61 /* Care only about 0 -> 1 and 1 -> 0 transitions */ 62 if (cnt > 1) 63 return false; 64 65 nfp_net_crypto_set_op(nn, opcode, cnt); 66 return true; 67 } 68 69 static int 70 nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add, 71 enum tls_offload_ctx_dir direction) 72 { 73 int ret = 0; 74 75 /* Use the BAR lock to protect the connection counts */ 76 nn_ctrl_bar_lock(nn); 77 if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) { 78 ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO); 79 /* Undo the cnt adjustment if failed */ 80 if (ret) 81 __nfp_net_tls_conn_cnt_changed(nn, -add, direction); 82 } 83 nn_ctrl_bar_unlock(nn); 84 85 return ret; 86 } 87 88 static int 89 nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction) 90 { 91 return nfp_net_tls_conn_cnt_changed(nn, 1, direction); 92 } 93 94 static int 95 nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction) 96 { 97 return nfp_net_tls_conn_cnt_changed(nn, -1, direction); 98 } 99 100 static struct sk_buff * 101 nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags) 102 { 103 return nfp_ccm_mbox_msg_alloc(nn, req_sz, 104 sizeof(struct nfp_crypto_reply_simple), 105 flags); 106 } 107 108 static int 109 nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb, 110 const char *name, enum nfp_ccm_type type) 111 { 112 struct nfp_crypto_reply_simple *reply; 113 int err; 114 115 err = __nfp_ccm_mbox_communicate(nn, skb, type, 116 sizeof(*reply), sizeof(*reply), 117 type == NFP_CCM_TYPE_CRYPTO_DEL); 118 if (err) { 119 nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err); 120 return err; 121 } 122 123 reply = (void *)skb->data; 124 err = -be32_to_cpu(reply->error); 125 if (err) 126 nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n", 127 name, err); 128 dev_consume_skb_any(skb); 129 130 return err; 131 } 132 133 static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle) 134 { 135 struct nfp_crypto_req_del *req; 136 struct sk_buff *skb; 137 138 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL); 139 if (!skb) 140 return; 141 142 req = (void *)skb->data; 143 req->ep_id = 0; 144 memcpy(req->handle, fw_handle, sizeof(req->handle)); 145 146 nfp_net_tls_communicate_simple(nn, skb, "delete", 147 NFP_CCM_TYPE_CRYPTO_DEL); 148 } 149 150 static void 151 nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver) 152 { 153 front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) | 154 FIELD_PREP(NFP_NET_TLS_VLAN, 155 NFP_NET_TLS_VLAN_UNUSED)); 156 } 157 158 static struct nfp_crypto_req_add_back * 159 nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk, 160 int direction) 161 { 162 struct inet_sock *inet = inet_sk(sk); 163 164 req->front.key_len += sizeof(__be32) * 2; 165 166 if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 167 req->src_ip = inet->inet_saddr; 168 req->dst_ip = inet->inet_daddr; 169 } else { 170 req->src_ip = inet->inet_daddr; 171 req->dst_ip = inet->inet_saddr; 172 } 173 174 return &req->back; 175 } 176 177 static struct nfp_crypto_req_add_back * 178 nfp_net_tls_set_ipv6(struct nfp_crypto_req_add_v6 *req, struct sock *sk, 179 int direction) 180 { 181 #if IS_ENABLED(CONFIG_IPV6) 182 struct ipv6_pinfo *np = inet6_sk(sk); 183 184 req->front.key_len += sizeof(struct in6_addr) * 2; 185 186 if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 187 memcpy(req->src_ip, &np->saddr, sizeof(req->src_ip)); 188 memcpy(req->dst_ip, &sk->sk_v6_daddr, sizeof(req->dst_ip)); 189 } else { 190 memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip)); 191 memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip)); 192 } 193 194 #endif 195 return &req->back; 196 } 197 198 static void 199 nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front, 200 struct nfp_crypto_req_add_back *back, struct sock *sk, 201 int direction) 202 { 203 struct inet_sock *inet = inet_sk(sk); 204 205 front->l4_proto = IPPROTO_TCP; 206 207 if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 208 back->src_port = inet->inet_sport; 209 back->dst_port = inet->inet_dport; 210 } else { 211 back->src_port = inet->inet_dport; 212 back->dst_port = inet->inet_sport; 213 } 214 } 215 216 static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction) 217 { 218 switch (direction) { 219 case TLS_OFFLOAD_CTX_DIR_TX: 220 return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC; 221 case TLS_OFFLOAD_CTX_DIR_RX: 222 return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC; 223 default: 224 WARN_ON_ONCE(1); 225 return 0; 226 } 227 } 228 229 static bool 230 nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type, 231 enum tls_offload_ctx_dir direction) 232 { 233 u8 bit; 234 235 switch (cipher_type) { 236 case TLS_CIPHER_AES_GCM_128: 237 if (direction == TLS_OFFLOAD_CTX_DIR_TX) 238 bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC; 239 else 240 bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC; 241 break; 242 default: 243 return false; 244 } 245 246 return nn->tlv_caps.crypto_ops & BIT(bit); 247 } 248 249 static int 250 nfp_net_tls_add(struct net_device *netdev, struct sock *sk, 251 enum tls_offload_ctx_dir direction, 252 struct tls_crypto_info *crypto_info, 253 u32 start_offload_tcp_sn) 254 { 255 struct tls12_crypto_info_aes_gcm_128 *tls_ci; 256 struct nfp_net *nn = netdev_priv(netdev); 257 struct nfp_crypto_req_add_front *front; 258 struct nfp_net_tls_offload_ctx *ntls; 259 struct nfp_crypto_req_add_back *back; 260 struct nfp_crypto_reply_add *reply; 261 struct sk_buff *skb; 262 size_t req_sz; 263 bool ipv6; 264 int err; 265 266 BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) > 267 TLS_DRIVER_STATE_SIZE_TX); 268 BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) > 269 TLS_DRIVER_STATE_SIZE_RX); 270 271 if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction)) 272 return -EOPNOTSUPP; 273 274 switch (sk->sk_family) { 275 #if IS_ENABLED(CONFIG_IPV6) 276 case AF_INET6: 277 if (sk->sk_ipv6only || 278 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { 279 req_sz = sizeof(struct nfp_crypto_req_add_v6); 280 ipv6 = true; 281 break; 282 } 283 #endif 284 /* fall through */ 285 case AF_INET: 286 req_sz = sizeof(struct nfp_crypto_req_add_v4); 287 ipv6 = false; 288 break; 289 default: 290 return -EOPNOTSUPP; 291 } 292 293 err = nfp_net_tls_conn_add(nn, direction); 294 if (err) 295 return err; 296 297 skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL); 298 if (!skb) { 299 err = -ENOMEM; 300 goto err_conn_remove; 301 } 302 303 front = (void *)skb->data; 304 front->ep_id = 0; 305 front->key_len = 8; 306 front->opcode = nfp_tls_1_2_dir_to_opcode(direction); 307 memset(front->resv, 0, sizeof(front->resv)); 308 309 nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4); 310 311 if (ipv6) 312 back = nfp_net_tls_set_ipv6((void *)skb->data, sk, direction); 313 else 314 back = nfp_net_tls_set_ipv4((void *)skb->data, sk, direction); 315 316 nfp_net_tls_set_l4(front, back, sk, direction); 317 318 back->counter = 0; 319 back->tcp_seq = cpu_to_be32(start_offload_tcp_sn); 320 321 tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 322 memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); 323 memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0, 324 sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE); 325 memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE); 326 memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 327 memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq)); 328 329 err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD, 330 sizeof(*reply), sizeof(*reply)); 331 if (err) { 332 nn_dp_warn(&nn->dp, "failed to add TLS: %d\n", err); 333 /* communicate frees skb on error */ 334 goto err_conn_remove; 335 } 336 337 reply = (void *)skb->data; 338 err = -be32_to_cpu(reply->error); 339 if (err) { 340 if (err == -ENOSPC) { 341 if (!atomic_fetch_inc(&nn->ktls_no_space)) 342 nn_info(nn, "HW TLS table full\n"); 343 } else { 344 nn_dp_warn(&nn->dp, 345 "failed to add TLS, FW replied: %d\n", err); 346 } 347 goto err_free_skb; 348 } 349 350 if (!reply->handle[0] && !reply->handle[1]) { 351 nn_dp_warn(&nn->dp, "FW returned NULL handle\n"); 352 err = -EINVAL; 353 goto err_fw_remove; 354 } 355 356 ntls = tls_driver_ctx(sk, direction); 357 memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle)); 358 if (direction == TLS_OFFLOAD_CTX_DIR_TX) 359 ntls->next_seq = start_offload_tcp_sn; 360 dev_consume_skb_any(skb); 361 362 if (direction == TLS_OFFLOAD_CTX_DIR_TX) 363 return 0; 364 365 tls_offload_rx_resync_set_type(sk, 366 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT); 367 return 0; 368 369 err_fw_remove: 370 nfp_net_tls_del_fw(nn, reply->handle); 371 err_free_skb: 372 dev_consume_skb_any(skb); 373 err_conn_remove: 374 nfp_net_tls_conn_remove(nn, direction); 375 return err; 376 } 377 378 static void 379 nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx, 380 enum tls_offload_ctx_dir direction) 381 { 382 struct nfp_net *nn = netdev_priv(netdev); 383 struct nfp_net_tls_offload_ctx *ntls; 384 385 nfp_net_tls_conn_remove(nn, direction); 386 387 ntls = __tls_driver_ctx(tls_ctx, direction); 388 nfp_net_tls_del_fw(nn, ntls->fw_handle); 389 } 390 391 static void 392 nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, 393 u8 *rcd_sn, enum tls_offload_ctx_dir direction) 394 { 395 struct nfp_net *nn = netdev_priv(netdev); 396 struct nfp_net_tls_offload_ctx *ntls; 397 struct nfp_crypto_req_update *req; 398 struct sk_buff *skb; 399 gfp_t flags; 400 401 flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC; 402 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags); 403 if (!skb) 404 return; 405 406 ntls = tls_driver_ctx(sk, direction); 407 req = (void *)skb->data; 408 req->ep_id = 0; 409 req->opcode = nfp_tls_1_2_dir_to_opcode(direction); 410 memset(req->resv, 0, sizeof(req->resv)); 411 memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle)); 412 req->tcp_seq = cpu_to_be32(seq); 413 memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no)); 414 415 if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 416 nfp_net_tls_communicate_simple(nn, skb, "sync", 417 NFP_CCM_TYPE_CRYPTO_UPDATE); 418 ntls->next_seq = seq; 419 } else { 420 nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE, 421 sizeof(struct nfp_crypto_reply_simple)); 422 } 423 } 424 425 static const struct tlsdev_ops nfp_net_tls_ops = { 426 .tls_dev_add = nfp_net_tls_add, 427 .tls_dev_del = nfp_net_tls_del, 428 .tls_dev_resync = nfp_net_tls_resync, 429 }; 430 431 static int nfp_net_tls_reset(struct nfp_net *nn) 432 { 433 struct nfp_crypto_req_reset *req; 434 struct sk_buff *skb; 435 436 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL); 437 if (!skb) 438 return -ENOMEM; 439 440 req = (void *)skb->data; 441 req->ep_id = 0; 442 443 return nfp_net_tls_communicate_simple(nn, skb, "reset", 444 NFP_CCM_TYPE_CRYPTO_RESET); 445 } 446 447 int nfp_net_tls_init(struct nfp_net *nn) 448 { 449 struct net_device *netdev = nn->dp.netdev; 450 int err; 451 452 if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK)) 453 return 0; 454 455 if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) != 456 NFP_NET_TLS_CCM_MBOX_OPS_MASK) 457 return 0; 458 459 if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) { 460 nn_warn(nn, "disabling TLS offload - mbox too small: %d\n", 461 nn->tlv_caps.mbox_len); 462 return 0; 463 } 464 465 err = nfp_net_tls_reset(nn); 466 if (err) 467 return err; 468 469 nn_ctrl_bar_lock(nn); 470 nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0); 471 err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO); 472 nn_ctrl_bar_unlock(nn); 473 if (err) 474 return err; 475 476 if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) { 477 netdev->hw_features |= NETIF_F_HW_TLS_RX; 478 netdev->features |= NETIF_F_HW_TLS_RX; 479 } 480 if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) { 481 netdev->hw_features |= NETIF_F_HW_TLS_TX; 482 netdev->features |= NETIF_F_HW_TLS_TX; 483 } 484 485 netdev->tlsdev_ops = &nfp_net_tls_ops; 486 487 return 0; 488 } 489