1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2018 Chelsio Communications, Inc. 4 * 5 * Written by: Atul Gupta (atul.gupta@chelsio.com) 6 */ 7 8 #include <linux/module.h> 9 #include <linux/list.h> 10 #include <linux/workqueue.h> 11 #include <linux/skbuff.h> 12 #include <linux/timer.h> 13 #include <linux/notifier.h> 14 #include <linux/inetdevice.h> 15 #include <linux/ip.h> 16 #include <linux/tcp.h> 17 #include <linux/tls.h> 18 #include <net/tls.h> 19 20 #include "chtls.h" 21 #include "chtls_cm.h" 22 23 static void __set_tcb_field_direct(struct chtls_sock *csk, 24 struct cpl_set_tcb_field *req, u16 word, 25 u64 mask, u64 val, u8 cookie, int no_reply) 26 { 27 struct ulptx_idata *sc; 28 29 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, csk->tid); 30 req->wr.wr_mid |= htonl(FW_WR_FLOWID_V(csk->tid)); 31 req->reply_ctrl = htons(NO_REPLY_V(no_reply) | 32 QUEUENO_V(csk->rss_qid)); 33 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie)); 34 req->mask = cpu_to_be64(mask); 35 req->val = cpu_to_be64(val); 36 sc = (struct ulptx_idata *)(req + 1); 37 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP)); 38 sc->len = htonl(0); 39 } 40 41 static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word, 42 u64 mask, u64 val, u8 cookie, int no_reply) 43 { 44 struct cpl_set_tcb_field *req; 45 struct chtls_sock *csk; 46 struct ulptx_idata *sc; 47 unsigned int wrlen; 48 49 wrlen = roundup(sizeof(*req) + sizeof(*sc), 16); 50 csk = rcu_dereference_sk_user_data(sk); 51 52 req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen); 53 __set_tcb_field_direct(csk, req, word, mask, val, cookie, no_reply); 54 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 55 } 56 57 /* 58 * Send control message to HW, message go as immediate data and packet 59 * is freed immediately. 60 */ 61 static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val) 62 { 63 struct cpl_set_tcb_field *req; 64 unsigned int credits_needed; 65 struct chtls_sock *csk; 66 struct ulptx_idata *sc; 67 struct sk_buff *skb; 68 unsigned int wrlen; 69 int ret; 70 71 wrlen = roundup(sizeof(*req) + sizeof(*sc), 16); 72 73 skb = alloc_skb(wrlen, GFP_ATOMIC); 74 if (!skb) 75 return -ENOMEM; 76 77 credits_needed = DIV_ROUND_UP(wrlen, 16); 78 csk = rcu_dereference_sk_user_data(sk); 79 80 __set_tcb_field(sk, skb, word, mask, val, 0, 1); 81 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); 82 csk->wr_credits -= credits_needed; 83 csk->wr_unacked += credits_needed; 84 enqueue_wr(csk, skb); 85 ret = cxgb4_ofld_send(csk->egress_dev, skb); 86 if (ret < 0) 87 kfree_skb(skb); 88 return ret < 0 ? ret : 0; 89 } 90 91 void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word, 92 u64 mask, u64 val, u8 cookie, 93 int through_l2t) 94 { 95 struct sk_buff *skb; 96 unsigned int wrlen; 97 98 wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata); 99 wrlen = roundup(wrlen, 16); 100 101 skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL); 102 if (!skb) 103 return; 104 105 __set_tcb_field(sk, skb, word, mask, val, cookie, 0); 106 send_or_defer(sk, tcp_sk(sk), skb, through_l2t); 107 } 108 109 static int chtls_set_tcb_keyid(struct sock *sk, int keyid) 110 { 111 return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid); 112 } 113 114 static int chtls_set_tcb_seqno(struct sock *sk) 115 { 116 return chtls_set_tcb_field(sk, 28, ~0ULL, 0); 117 } 118 119 static int chtls_set_tcb_quiesce(struct sock *sk, int val) 120 { 121 return chtls_set_tcb_field(sk, 1, (1ULL << TF_RX_QUIESCE_S), 122 TF_RX_QUIESCE_V(val)); 123 } 124 125 void chtls_set_quiesce_ctrl(struct sock *sk, int val) 126 { 127 struct chtls_sock *csk; 128 struct sk_buff *skb; 129 unsigned int wrlen; 130 int ret; 131 132 wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata); 133 wrlen = roundup(wrlen, 16); 134 135 skb = alloc_skb(wrlen, GFP_ATOMIC); 136 if (!skb) 137 return; 138 139 csk = rcu_dereference_sk_user_data(sk); 140 141 __set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1); 142 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 143 ret = cxgb4_ofld_send(csk->egress_dev, skb); 144 if (ret < 0) 145 kfree_skb(skb); 146 } 147 148 /* TLS Key bitmap processing */ 149 int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi) 150 { 151 unsigned int num_key_ctx, bsize; 152 int ksize; 153 154 num_key_ctx = (lldi->vr->key.size / TLS_KEY_CONTEXT_SZ); 155 bsize = BITS_TO_LONGS(num_key_ctx); 156 157 cdev->kmap.size = num_key_ctx; 158 cdev->kmap.available = bsize; 159 ksize = sizeof(*cdev->kmap.addr) * bsize; 160 cdev->kmap.addr = kvzalloc(ksize, GFP_KERNEL); 161 if (!cdev->kmap.addr) 162 return -ENOMEM; 163 164 cdev->kmap.start = lldi->vr->key.start; 165 spin_lock_init(&cdev->kmap.lock); 166 return 0; 167 } 168 169 static int get_new_keyid(struct chtls_sock *csk, u32 optname) 170 { 171 struct net_device *dev = csk->egress_dev; 172 struct chtls_dev *cdev = csk->cdev; 173 struct chtls_hws *hws; 174 struct adapter *adap; 175 int keyid; 176 177 adap = netdev2adap(dev); 178 hws = &csk->tlshws; 179 180 spin_lock_bh(&cdev->kmap.lock); 181 keyid = find_first_zero_bit(cdev->kmap.addr, cdev->kmap.size); 182 if (keyid < cdev->kmap.size) { 183 __set_bit(keyid, cdev->kmap.addr); 184 if (optname == TLS_RX) 185 hws->rxkey = keyid; 186 else 187 hws->txkey = keyid; 188 atomic_inc(&adap->chcr_stats.tls_key); 189 } else { 190 keyid = -1; 191 } 192 spin_unlock_bh(&cdev->kmap.lock); 193 return keyid; 194 } 195 196 void free_tls_keyid(struct sock *sk) 197 { 198 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); 199 struct net_device *dev = csk->egress_dev; 200 struct chtls_dev *cdev = csk->cdev; 201 struct chtls_hws *hws; 202 struct adapter *adap; 203 204 if (!cdev->kmap.addr) 205 return; 206 207 adap = netdev2adap(dev); 208 hws = &csk->tlshws; 209 210 spin_lock_bh(&cdev->kmap.lock); 211 if (hws->rxkey >= 0) { 212 __clear_bit(hws->rxkey, cdev->kmap.addr); 213 atomic_dec(&adap->chcr_stats.tls_key); 214 hws->rxkey = -1; 215 } 216 if (hws->txkey >= 0) { 217 __clear_bit(hws->txkey, cdev->kmap.addr); 218 atomic_dec(&adap->chcr_stats.tls_key); 219 hws->txkey = -1; 220 } 221 spin_unlock_bh(&cdev->kmap.lock); 222 } 223 224 unsigned int keyid_to_addr(int start_addr, int keyid) 225 { 226 return (start_addr + (keyid * TLS_KEY_CONTEXT_SZ)) >> 5; 227 } 228 229 static void chtls_rxkey_ivauth(struct _key_ctx *kctx) 230 { 231 kctx->iv_to_auth = cpu_to_be64(KEYCTX_TX_WR_IV_V(6ULL) | 232 KEYCTX_TX_WR_AAD_V(1ULL) | 233 KEYCTX_TX_WR_AADST_V(5ULL) | 234 KEYCTX_TX_WR_CIPHER_V(14ULL) | 235 KEYCTX_TX_WR_CIPHERST_V(0ULL) | 236 KEYCTX_TX_WR_AUTH_V(14ULL) | 237 KEYCTX_TX_WR_AUTHST_V(16ULL) | 238 KEYCTX_TX_WR_AUTHIN_V(16ULL)); 239 } 240 241 static int chtls_key_info(struct chtls_sock *csk, 242 struct _key_ctx *kctx, 243 u32 keylen, u32 optname, 244 int cipher_type) 245 { 246 unsigned char key[AES_MAX_KEY_SIZE]; 247 unsigned char *key_p, *salt; 248 unsigned char ghash_h[AEAD_H_SIZE]; 249 int ck_size, key_ctx_size, kctx_mackey_size, salt_size; 250 struct crypto_aes_ctx aes; 251 int ret; 252 253 key_ctx_size = sizeof(struct _key_ctx) + 254 roundup(keylen, 16) + AEAD_H_SIZE; 255 256 /* GCM mode of AES supports 128 and 256 bit encryption, so 257 * prepare key context base on GCM cipher type 258 */ 259 switch (cipher_type) { 260 case TLS_CIPHER_AES_GCM_128: { 261 struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 = 262 (struct tls12_crypto_info_aes_gcm_128 *) 263 &csk->tlshws.crypto_info; 264 memcpy(key, gcm_ctx_128->key, keylen); 265 266 key_p = gcm_ctx_128->key; 267 salt = gcm_ctx_128->salt; 268 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 269 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; 270 kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 271 break; 272 } 273 case TLS_CIPHER_AES_GCM_256: { 274 struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 = 275 (struct tls12_crypto_info_aes_gcm_256 *) 276 &csk->tlshws.crypto_info; 277 memcpy(key, gcm_ctx_256->key, keylen); 278 279 key_p = gcm_ctx_256->key; 280 salt = gcm_ctx_256->salt; 281 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 282 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE; 283 kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 284 break; 285 } 286 default: 287 pr_err("GCM: Invalid key length %d\n", keylen); 288 return -EINVAL; 289 } 290 291 /* Calculate the H = CIPH(K, 0 repeated 16 times). 292 * It will go in key context 293 */ 294 ret = aes_expandkey(&aes, key, keylen); 295 if (ret) 296 return ret; 297 298 memset(ghash_h, 0, AEAD_H_SIZE); 299 aes_encrypt(&aes, ghash_h, ghash_h); 300 memzero_explicit(&aes, sizeof(aes)); 301 csk->tlshws.keylen = key_ctx_size; 302 303 /* Copy the Key context */ 304 if (optname == TLS_RX) { 305 int key_ctx; 306 307 key_ctx = ((key_ctx_size >> 4) << 3); 308 kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size, 309 kctx_mackey_size, 310 0, 0, key_ctx); 311 chtls_rxkey_ivauth(kctx); 312 } else { 313 kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size, 314 kctx_mackey_size, 315 0, 0, key_ctx_size >> 4); 316 } 317 318 memcpy(kctx->salt, salt, salt_size); 319 memcpy(kctx->key, key_p, keylen); 320 memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE); 321 /* erase key info from driver */ 322 memset(key_p, 0, keylen); 323 324 return 0; 325 } 326 327 static void chtls_set_scmd(struct chtls_sock *csk) 328 { 329 struct chtls_hws *hws = &csk->tlshws; 330 331 hws->scmd.seqno_numivs = 332 SCMD_SEQ_NO_CTRL_V(3) | 333 SCMD_PROTO_VERSION_V(0) | 334 SCMD_ENC_DEC_CTRL_V(0) | 335 SCMD_CIPH_AUTH_SEQ_CTRL_V(1) | 336 SCMD_CIPH_MODE_V(2) | 337 SCMD_AUTH_MODE_V(4) | 338 SCMD_HMAC_CTRL_V(0) | 339 SCMD_IV_SIZE_V(4) | 340 SCMD_NUM_IVS_V(1); 341 342 hws->scmd.ivgen_hdrlen = 343 SCMD_IV_GEN_CTRL_V(1) | 344 SCMD_KEY_CTX_INLINE_V(0) | 345 SCMD_TLS_FRAG_ENABLE_V(1); 346 } 347 348 int chtls_setkey(struct chtls_sock *csk, u32 keylen, 349 u32 optname, int cipher_type) 350 { 351 struct tls_key_req *kwr; 352 struct chtls_dev *cdev; 353 struct _key_ctx *kctx; 354 int wrlen, klen, len; 355 struct sk_buff *skb; 356 struct sock *sk; 357 int keyid; 358 int kaddr; 359 int ret; 360 361 cdev = csk->cdev; 362 sk = csk->sk; 363 364 klen = roundup((keylen + AEAD_H_SIZE) + sizeof(*kctx), 32); 365 wrlen = roundup(sizeof(*kwr), 16); 366 len = klen + wrlen; 367 368 /* Flush out-standing data before new key takes effect */ 369 if (optname == TLS_TX) { 370 lock_sock(sk); 371 if (skb_queue_len(&csk->txq)) 372 chtls_push_frames(csk, 0); 373 release_sock(sk); 374 } 375 376 skb = alloc_skb(len, GFP_KERNEL); 377 if (!skb) 378 return -ENOMEM; 379 380 keyid = get_new_keyid(csk, optname); 381 if (keyid < 0) { 382 ret = -ENOSPC; 383 goto out_nokey; 384 } 385 386 kaddr = keyid_to_addr(cdev->kmap.start, keyid); 387 kwr = (struct tls_key_req *)__skb_put_zero(skb, len); 388 kwr->wr.op_to_compl = 389 cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_COMPL_F | 390 FW_WR_ATOMIC_V(1U)); 391 kwr->wr.flowid_len16 = 392 cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16) | 393 FW_WR_FLOWID_V(csk->tid))); 394 kwr->wr.protocol = 0; 395 kwr->wr.mfs = htons(TLS_MFS); 396 kwr->wr.reneg_to_write_rx = optname; 397 398 /* ulptx command */ 399 kwr->req.cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 400 T5_ULP_MEMIO_ORDER_V(1) | 401 T5_ULP_MEMIO_IMM_V(1)); 402 kwr->req.len16 = cpu_to_be32((csk->tid << 8) | 403 DIV_ROUND_UP(len - sizeof(kwr->wr), 16)); 404 kwr->req.dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen >> 5)); 405 kwr->req.lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(kaddr)); 406 407 /* sub command */ 408 kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM)); 409 kwr->sc_imm.len = cpu_to_be32(klen); 410 411 lock_sock(sk); 412 /* key info */ 413 kctx = (struct _key_ctx *)(kwr + 1); 414 ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type); 415 if (ret) 416 goto out_notcb; 417 418 if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN))) 419 goto out_notcb; 420 421 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid); 422 csk->wr_credits -= DIV_ROUND_UP(len, 16); 423 csk->wr_unacked += DIV_ROUND_UP(len, 16); 424 enqueue_wr(csk, skb); 425 cxgb4_ofld_send(csk->egress_dev, skb); 426 skb = NULL; 427 428 chtls_set_scmd(csk); 429 /* Clear quiesce for Rx key */ 430 if (optname == TLS_RX) { 431 ret = chtls_set_tcb_keyid(sk, keyid); 432 if (ret) 433 goto out_notcb; 434 ret = chtls_set_tcb_field(sk, 0, 435 TCB_ULP_RAW_V(TCB_ULP_RAW_M), 436 TCB_ULP_RAW_V((TF_TLS_KEY_SIZE_V(1) | 437 TF_TLS_CONTROL_V(1) | 438 TF_TLS_ACTIVE_V(1) | 439 TF_TLS_ENABLE_V(1)))); 440 if (ret) 441 goto out_notcb; 442 ret = chtls_set_tcb_seqno(sk); 443 if (ret) 444 goto out_notcb; 445 ret = chtls_set_tcb_quiesce(sk, 0); 446 if (ret) 447 goto out_notcb; 448 csk->tlshws.rxkey = keyid; 449 } else { 450 csk->tlshws.tx_seq_no = 0; 451 csk->tlshws.txkey = keyid; 452 } 453 454 release_sock(sk); 455 return ret; 456 out_notcb: 457 release_sock(sk); 458 free_tls_keyid(sk); 459 out_nokey: 460 kfree_skb(skb); 461 return ret; 462 } 463