1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <crypto/algapi.h> 7 #include <crypto/internal/aead.h> 8 #include <crypto/authenc.h> 9 #include <crypto/des.h> 10 #include <linux/rtnetlink.h> 11 #include "cc_driver.h" 12 #include "cc_buffer_mgr.h" 13 #include "cc_aead.h" 14 #include "cc_request_mgr.h" 15 #include "cc_hash.h" 16 #include "cc_sram_mgr.h" 17 18 #define template_aead template_u.aead 19 20 #define MAX_AEAD_SETKEY_SEQ 12 21 #define MAX_AEAD_PROCESS_SEQ 23 22 23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE) 24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE) 25 26 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE 27 28 struct cc_aead_handle { 29 cc_sram_addr_t sram_workspace_addr; 30 struct list_head aead_list; 31 }; 32 33 struct cc_hmac_s { 34 u8 *padded_authkey; 35 u8 *ipad_opad; /* IPAD, OPAD*/ 36 dma_addr_t padded_authkey_dma_addr; 37 dma_addr_t ipad_opad_dma_addr; 38 }; 39 40 struct cc_xcbc_s { 41 u8 *xcbc_keys; /* K1,K2,K3 */ 42 dma_addr_t xcbc_keys_dma_addr; 43 }; 44 45 struct cc_aead_ctx { 46 struct cc_drvdata *drvdata; 47 u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */ 48 u8 *enckey; 49 dma_addr_t enckey_dma_addr; 50 union { 51 struct cc_hmac_s hmac; 52 struct cc_xcbc_s xcbc; 53 } auth_state; 54 unsigned int enc_keylen; 55 unsigned int auth_keylen; 56 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */ 57 unsigned int hash_len; 58 enum drv_cipher_mode cipher_mode; 59 enum cc_flow_mode flow_mode; 60 enum drv_hash_mode auth_mode; 61 }; 62 63 static inline bool valid_assoclen(struct aead_request *req) 64 { 65 return ((req->assoclen == 16) || (req->assoclen == 20)); 66 } 67 68 static void cc_aead_exit(struct crypto_aead *tfm) 69 { 70 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 71 struct device *dev = drvdata_to_dev(ctx->drvdata); 72 73 dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm), 74 crypto_tfm_alg_name(&tfm->base)); 75 76 /* Unmap enckey buffer */ 77 if (ctx->enckey) { 78 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, 79 ctx->enckey_dma_addr); 80 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n", 81 &ctx->enckey_dma_addr); 82 ctx->enckey_dma_addr = 0; 83 ctx->enckey = NULL; 84 } 85 86 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */ 87 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc; 88 89 if (xcbc->xcbc_keys) { 90 dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3, 91 xcbc->xcbc_keys, 92 xcbc->xcbc_keys_dma_addr); 93 } 94 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n", 95 &xcbc->xcbc_keys_dma_addr); 96 xcbc->xcbc_keys_dma_addr = 0; 97 xcbc->xcbc_keys = NULL; 98 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */ 99 struct cc_hmac_s *hmac = &ctx->auth_state.hmac; 100 101 if (hmac->ipad_opad) { 102 dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE, 103 hmac->ipad_opad, 104 hmac->ipad_opad_dma_addr); 105 dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n", 106 &hmac->ipad_opad_dma_addr); 107 hmac->ipad_opad_dma_addr = 0; 108 hmac->ipad_opad = NULL; 109 } 110 if (hmac->padded_authkey) { 111 dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE, 112 hmac->padded_authkey, 113 hmac->padded_authkey_dma_addr); 114 dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n", 115 &hmac->padded_authkey_dma_addr); 116 hmac->padded_authkey_dma_addr = 0; 117 hmac->padded_authkey = NULL; 118 } 119 } 120 } 121 122 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm) 123 { 124 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 125 126 return cc_get_default_hash_len(ctx->drvdata); 127 } 128 129 static int cc_aead_init(struct crypto_aead *tfm) 130 { 131 struct aead_alg *alg = crypto_aead_alg(tfm); 132 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 133 struct cc_crypto_alg *cc_alg = 134 container_of(alg, struct cc_crypto_alg, aead_alg); 135 struct device *dev = drvdata_to_dev(cc_alg->drvdata); 136 137 dev_dbg(dev, "Initializing context @%p for %s\n", ctx, 138 crypto_tfm_alg_name(&tfm->base)); 139 140 /* Initialize modes in instance */ 141 ctx->cipher_mode = cc_alg->cipher_mode; 142 ctx->flow_mode = cc_alg->flow_mode; 143 ctx->auth_mode = cc_alg->auth_mode; 144 ctx->drvdata = cc_alg->drvdata; 145 crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx)); 146 147 /* Allocate key buffer, cache line aligned */ 148 ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE, 149 &ctx->enckey_dma_addr, GFP_KERNEL); 150 if (!ctx->enckey) { 151 dev_err(dev, "Failed allocating key buffer\n"); 152 goto init_failed; 153 } 154 dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n", 155 ctx->enckey); 156 157 /* Set default authlen value */ 158 159 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */ 160 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc; 161 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3; 162 163 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */ 164 /* (and temporary for user key - up to 256b) */ 165 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size, 166 &xcbc->xcbc_keys_dma_addr, 167 GFP_KERNEL); 168 if (!xcbc->xcbc_keys) { 169 dev_err(dev, "Failed allocating buffer for XCBC keys\n"); 170 goto init_failed; 171 } 172 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */ 173 struct cc_hmac_s *hmac = &ctx->auth_state.hmac; 174 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE; 175 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr; 176 177 /* Allocate dma-coherent buffer for IPAD + OPAD */ 178 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size, 179 &hmac->ipad_opad_dma_addr, 180 GFP_KERNEL); 181 182 if (!hmac->ipad_opad) { 183 dev_err(dev, "Failed allocating IPAD/OPAD buffer\n"); 184 goto init_failed; 185 } 186 187 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n", 188 hmac->ipad_opad); 189 190 hmac->padded_authkey = dma_alloc_coherent(dev, 191 MAX_HMAC_BLOCK_SIZE, 192 pkey_dma, 193 GFP_KERNEL); 194 195 if (!hmac->padded_authkey) { 196 dev_err(dev, "failed to allocate padded_authkey\n"); 197 goto init_failed; 198 } 199 } else { 200 ctx->auth_state.hmac.ipad_opad = NULL; 201 ctx->auth_state.hmac.padded_authkey = NULL; 202 } 203 ctx->hash_len = cc_get_aead_hash_len(tfm); 204 205 return 0; 206 207 init_failed: 208 cc_aead_exit(tfm); 209 return -ENOMEM; 210 } 211 212 static void cc_aead_complete(struct device *dev, void *cc_req, int err) 213 { 214 struct aead_request *areq = (struct aead_request *)cc_req; 215 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); 216 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req); 217 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 218 219 /* BACKLOG notification */ 220 if (err == -EINPROGRESS) 221 goto done; 222 223 cc_unmap_aead_request(dev, areq); 224 225 /* Restore ordinary iv pointer */ 226 areq->iv = areq_ctx->backup_iv; 227 228 if (err) 229 goto done; 230 231 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { 232 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr, 233 ctx->authsize) != 0) { 234 dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n", 235 ctx->authsize, ctx->cipher_mode); 236 /* In case of payload authentication failure, MUST NOT 237 * revealed the decrypted message --> zero its memory. 238 */ 239 cc_zero_sgl(areq->dst, areq_ctx->cryptlen); 240 err = -EBADMSG; 241 } 242 /*ENCRYPT*/ 243 } else if (areq_ctx->is_icv_fragmented) { 244 u32 skip = areq->cryptlen + areq_ctx->dst_offset; 245 246 cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl, 247 skip, (skip + ctx->authsize), 248 CC_SG_FROM_BUF); 249 } 250 done: 251 aead_request_complete(areq, err); 252 } 253 254 static unsigned int xcbc_setkey(struct cc_hw_desc *desc, 255 struct cc_aead_ctx *ctx) 256 { 257 /* Load the AES key */ 258 hw_desc_init(&desc[0]); 259 /* We are using for the source/user key the same buffer 260 * as for the output keys, * because after this key loading it 261 * is not needed anymore 262 */ 263 set_din_type(&desc[0], DMA_DLLI, 264 ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen, 265 NS_BIT); 266 set_cipher_mode(&desc[0], DRV_CIPHER_ECB); 267 set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT); 268 set_key_size_aes(&desc[0], ctx->auth_keylen); 269 set_flow_mode(&desc[0], S_DIN_to_AES); 270 set_setup_mode(&desc[0], SETUP_LOAD_KEY0); 271 272 hw_desc_init(&desc[1]); 273 set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE); 274 set_flow_mode(&desc[1], DIN_AES_DOUT); 275 set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr, 276 AES_KEYSIZE_128, NS_BIT, 0); 277 278 hw_desc_init(&desc[2]); 279 set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE); 280 set_flow_mode(&desc[2], DIN_AES_DOUT); 281 set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr 282 + AES_KEYSIZE_128), 283 AES_KEYSIZE_128, NS_BIT, 0); 284 285 hw_desc_init(&desc[3]); 286 set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE); 287 set_flow_mode(&desc[3], DIN_AES_DOUT); 288 set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr 289 + 2 * AES_KEYSIZE_128), 290 AES_KEYSIZE_128, NS_BIT, 0); 291 292 return 4; 293 } 294 295 static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx) 296 { 297 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; 298 unsigned int digest_ofs = 0; 299 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? 300 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; 301 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 302 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE; 303 struct cc_hmac_s *hmac = &ctx->auth_state.hmac; 304 305 unsigned int idx = 0; 306 int i; 307 308 /* calc derived HMAC key */ 309 for (i = 0; i < 2; i++) { 310 /* Load hash initial state */ 311 hw_desc_init(&desc[idx]); 312 set_cipher_mode(&desc[idx], hash_mode); 313 set_din_sram(&desc[idx], 314 cc_larval_digest_addr(ctx->drvdata, 315 ctx->auth_mode), 316 digest_size); 317 set_flow_mode(&desc[idx], S_DIN_to_HASH); 318 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); 319 idx++; 320 321 /* Load the hash current length*/ 322 hw_desc_init(&desc[idx]); 323 set_cipher_mode(&desc[idx], hash_mode); 324 set_din_const(&desc[idx], 0, ctx->hash_len); 325 set_flow_mode(&desc[idx], S_DIN_to_HASH); 326 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 327 idx++; 328 329 /* Prepare ipad key */ 330 hw_desc_init(&desc[idx]); 331 set_xor_val(&desc[idx], hmac_pad_const[i]); 332 set_cipher_mode(&desc[idx], hash_mode); 333 set_flow_mode(&desc[idx], S_DIN_to_HASH); 334 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); 335 idx++; 336 337 /* Perform HASH update */ 338 hw_desc_init(&desc[idx]); 339 set_din_type(&desc[idx], DMA_DLLI, 340 hmac->padded_authkey_dma_addr, 341 SHA256_BLOCK_SIZE, NS_BIT); 342 set_cipher_mode(&desc[idx], hash_mode); 343 set_xor_active(&desc[idx]); 344 set_flow_mode(&desc[idx], DIN_HASH); 345 idx++; 346 347 /* Get the digset */ 348 hw_desc_init(&desc[idx]); 349 set_cipher_mode(&desc[idx], hash_mode); 350 set_dout_dlli(&desc[idx], 351 (hmac->ipad_opad_dma_addr + digest_ofs), 352 digest_size, NS_BIT, 0); 353 set_flow_mode(&desc[idx], S_HASH_to_DOUT); 354 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); 355 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); 356 idx++; 357 358 digest_ofs += digest_size; 359 } 360 361 return idx; 362 } 363 364 static int validate_keys_sizes(struct cc_aead_ctx *ctx) 365 { 366 struct device *dev = drvdata_to_dev(ctx->drvdata); 367 368 dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n", 369 ctx->enc_keylen, ctx->auth_keylen); 370 371 switch (ctx->auth_mode) { 372 case DRV_HASH_SHA1: 373 case DRV_HASH_SHA256: 374 break; 375 case DRV_HASH_XCBC_MAC: 376 if (ctx->auth_keylen != AES_KEYSIZE_128 && 377 ctx->auth_keylen != AES_KEYSIZE_192 && 378 ctx->auth_keylen != AES_KEYSIZE_256) 379 return -ENOTSUPP; 380 break; 381 case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */ 382 if (ctx->auth_keylen > 0) 383 return -EINVAL; 384 break; 385 default: 386 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode); 387 return -EINVAL; 388 } 389 /* Check cipher key size */ 390 if (ctx->flow_mode == S_DIN_to_DES) { 391 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) { 392 dev_err(dev, "Invalid cipher(3DES) key size: %u\n", 393 ctx->enc_keylen); 394 return -EINVAL; 395 } 396 } else { /* Default assumed to be AES ciphers */ 397 if (ctx->enc_keylen != AES_KEYSIZE_128 && 398 ctx->enc_keylen != AES_KEYSIZE_192 && 399 ctx->enc_keylen != AES_KEYSIZE_256) { 400 dev_err(dev, "Invalid cipher(AES) key size: %u\n", 401 ctx->enc_keylen); 402 return -EINVAL; 403 } 404 } 405 406 return 0; /* All tests of keys sizes passed */ 407 } 408 409 /* This function prepers the user key so it can pass to the hmac processing 410 * (copy to intenral buffer or hash in case of key longer than block 411 */ 412 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, 413 unsigned int keylen) 414 { 415 dma_addr_t key_dma_addr = 0; 416 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 417 struct device *dev = drvdata_to_dev(ctx->drvdata); 418 u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode); 419 struct cc_crypto_req cc_req = {}; 420 unsigned int blocksize; 421 unsigned int digestsize; 422 unsigned int hashmode; 423 unsigned int idx = 0; 424 int rc = 0; 425 u8 *key = NULL; 426 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; 427 dma_addr_t padded_authkey_dma_addr = 428 ctx->auth_state.hmac.padded_authkey_dma_addr; 429 430 switch (ctx->auth_mode) { /* auth_key required and >0 */ 431 case DRV_HASH_SHA1: 432 blocksize = SHA1_BLOCK_SIZE; 433 digestsize = SHA1_DIGEST_SIZE; 434 hashmode = DRV_HASH_HW_SHA1; 435 break; 436 case DRV_HASH_SHA256: 437 default: 438 blocksize = SHA256_BLOCK_SIZE; 439 digestsize = SHA256_DIGEST_SIZE; 440 hashmode = DRV_HASH_HW_SHA256; 441 } 442 443 if (keylen != 0) { 444 445 key = kmemdup(authkey, keylen, GFP_KERNEL); 446 if (!key) 447 return -ENOMEM; 448 449 key_dma_addr = dma_map_single(dev, (void *)key, keylen, 450 DMA_TO_DEVICE); 451 if (dma_mapping_error(dev, key_dma_addr)) { 452 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", 453 key, keylen); 454 kzfree(key); 455 return -ENOMEM; 456 } 457 if (keylen > blocksize) { 458 /* Load hash initial state */ 459 hw_desc_init(&desc[idx]); 460 set_cipher_mode(&desc[idx], hashmode); 461 set_din_sram(&desc[idx], larval_addr, digestsize); 462 set_flow_mode(&desc[idx], S_DIN_to_HASH); 463 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); 464 idx++; 465 466 /* Load the hash current length*/ 467 hw_desc_init(&desc[idx]); 468 set_cipher_mode(&desc[idx], hashmode); 469 set_din_const(&desc[idx], 0, ctx->hash_len); 470 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); 471 set_flow_mode(&desc[idx], S_DIN_to_HASH); 472 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 473 idx++; 474 475 hw_desc_init(&desc[idx]); 476 set_din_type(&desc[idx], DMA_DLLI, 477 key_dma_addr, keylen, NS_BIT); 478 set_flow_mode(&desc[idx], DIN_HASH); 479 idx++; 480 481 /* Get hashed key */ 482 hw_desc_init(&desc[idx]); 483 set_cipher_mode(&desc[idx], hashmode); 484 set_dout_dlli(&desc[idx], padded_authkey_dma_addr, 485 digestsize, NS_BIT, 0); 486 set_flow_mode(&desc[idx], S_HASH_to_DOUT); 487 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); 488 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); 489 set_cipher_config0(&desc[idx], 490 HASH_DIGEST_RESULT_LITTLE_ENDIAN); 491 idx++; 492 493 hw_desc_init(&desc[idx]); 494 set_din_const(&desc[idx], 0, (blocksize - digestsize)); 495 set_flow_mode(&desc[idx], BYPASS); 496 set_dout_dlli(&desc[idx], (padded_authkey_dma_addr + 497 digestsize), (blocksize - digestsize), 498 NS_BIT, 0); 499 idx++; 500 } else { 501 hw_desc_init(&desc[idx]); 502 set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, 503 keylen, NS_BIT); 504 set_flow_mode(&desc[idx], BYPASS); 505 set_dout_dlli(&desc[idx], padded_authkey_dma_addr, 506 keylen, NS_BIT, 0); 507 idx++; 508 509 if ((blocksize - keylen) != 0) { 510 hw_desc_init(&desc[idx]); 511 set_din_const(&desc[idx], 0, 512 (blocksize - keylen)); 513 set_flow_mode(&desc[idx], BYPASS); 514 set_dout_dlli(&desc[idx], 515 (padded_authkey_dma_addr + 516 keylen), 517 (blocksize - keylen), NS_BIT, 0); 518 idx++; 519 } 520 } 521 } else { 522 hw_desc_init(&desc[idx]); 523 set_din_const(&desc[idx], 0, (blocksize - keylen)); 524 set_flow_mode(&desc[idx], BYPASS); 525 set_dout_dlli(&desc[idx], padded_authkey_dma_addr, 526 blocksize, NS_BIT, 0); 527 idx++; 528 } 529 530 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); 531 if (rc) 532 dev_err(dev, "send_request() failed (rc=%d)\n", rc); 533 534 if (key_dma_addr) 535 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); 536 537 kzfree(key); 538 539 return rc; 540 } 541 542 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, 543 unsigned int keylen) 544 { 545 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 546 struct cc_crypto_req cc_req = {}; 547 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; 548 unsigned int seq_len = 0; 549 struct device *dev = drvdata_to_dev(ctx->drvdata); 550 const u8 *enckey, *authkey; 551 int rc; 552 553 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", 554 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); 555 556 /* STAT_PHASE_0: Init and sanity checks */ 557 558 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ 559 struct crypto_authenc_keys keys; 560 561 rc = crypto_authenc_extractkeys(&keys, key, keylen); 562 if (rc) 563 goto badkey; 564 enckey = keys.enckey; 565 authkey = keys.authkey; 566 ctx->enc_keylen = keys.enckeylen; 567 ctx->auth_keylen = keys.authkeylen; 568 569 if (ctx->cipher_mode == DRV_CIPHER_CTR) { 570 /* the nonce is stored in bytes at end of key */ 571 rc = -EINVAL; 572 if (ctx->enc_keylen < 573 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) 574 goto badkey; 575 /* Copy nonce from last 4 bytes in CTR key to 576 * first 4 bytes in CTR IV 577 */ 578 memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - 579 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); 580 /* Set CTR key size */ 581 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; 582 } 583 } else { /* non-authenc - has just one key */ 584 enckey = key; 585 authkey = NULL; 586 ctx->enc_keylen = keylen; 587 ctx->auth_keylen = 0; 588 } 589 590 rc = validate_keys_sizes(ctx); 591 if (rc) 592 goto badkey; 593 594 /* STAT_PHASE_1: Copy key to ctx */ 595 596 /* Get key material */ 597 memcpy(ctx->enckey, enckey, ctx->enc_keylen); 598 if (ctx->enc_keylen == 24) 599 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); 600 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { 601 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, 602 ctx->auth_keylen); 603 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ 604 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); 605 if (rc) 606 goto badkey; 607 } 608 609 /* STAT_PHASE_2: Create sequence */ 610 611 switch (ctx->auth_mode) { 612 case DRV_HASH_SHA1: 613 case DRV_HASH_SHA256: 614 seq_len = hmac_setkey(desc, ctx); 615 break; 616 case DRV_HASH_XCBC_MAC: 617 seq_len = xcbc_setkey(desc, ctx); 618 break; 619 case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */ 620 break; /* No auth. key setup */ 621 default: 622 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode); 623 rc = -ENOTSUPP; 624 goto badkey; 625 } 626 627 /* STAT_PHASE_3: Submit sequence to HW */ 628 629 if (seq_len > 0) { /* For CCM there is no sequence to setup the key */ 630 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len); 631 if (rc) { 632 dev_err(dev, "send_request() failed (rc=%d)\n", rc); 633 goto setkey_error; 634 } 635 } 636 637 /* Update STAT_PHASE_3 */ 638 return rc; 639 640 badkey: 641 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 642 643 setkey_error: 644 return rc; 645 } 646 647 static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key, 648 unsigned int keylen) 649 { 650 struct crypto_authenc_keys keys; 651 u32 flags; 652 int err; 653 654 err = crypto_authenc_extractkeys(&keys, key, keylen); 655 if (unlikely(err)) 656 goto badkey; 657 658 err = -EINVAL; 659 if (keys.enckeylen != DES3_EDE_KEY_SIZE) 660 goto badkey; 661 662 flags = crypto_aead_get_flags(aead); 663 err = __des3_verify_key(&flags, keys.enckey); 664 if (unlikely(err)) { 665 crypto_aead_set_flags(aead, flags); 666 goto out; 667 } 668 669 err = cc_aead_setkey(aead, key, keylen); 670 671 out: 672 memzero_explicit(&keys, sizeof(keys)); 673 return err; 674 675 badkey: 676 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 677 goto out; 678 } 679 680 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, 681 unsigned int keylen) 682 { 683 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 684 685 if (keylen < 3) 686 return -EINVAL; 687 688 keylen -= 3; 689 memcpy(ctx->ctr_nonce, key + keylen, 3); 690 691 return cc_aead_setkey(tfm, key, keylen); 692 } 693 694 static int cc_aead_setauthsize(struct crypto_aead *authenc, 695 unsigned int authsize) 696 { 697 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc); 698 struct device *dev = drvdata_to_dev(ctx->drvdata); 699 700 /* Unsupported auth. sizes */ 701 if (authsize == 0 || 702 authsize > crypto_aead_maxauthsize(authenc)) { 703 return -ENOTSUPP; 704 } 705 706 ctx->authsize = authsize; 707 dev_dbg(dev, "authlen=%d\n", ctx->authsize); 708 709 return 0; 710 } 711 712 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc, 713 unsigned int authsize) 714 { 715 switch (authsize) { 716 case 8: 717 case 12: 718 case 16: 719 break; 720 default: 721 return -EINVAL; 722 } 723 724 return cc_aead_setauthsize(authenc, authsize); 725 } 726 727 static int cc_ccm_setauthsize(struct crypto_aead *authenc, 728 unsigned int authsize) 729 { 730 switch (authsize) { 731 case 4: 732 case 6: 733 case 8: 734 case 10: 735 case 12: 736 case 14: 737 case 16: 738 break; 739 default: 740 return -EINVAL; 741 } 742 743 return cc_aead_setauthsize(authenc, authsize); 744 } 745 746 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode, 747 struct cc_hw_desc desc[], unsigned int *seq_size) 748 { 749 struct crypto_aead *tfm = crypto_aead_reqtfm(areq); 750 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 751 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); 752 enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type; 753 unsigned int idx = *seq_size; 754 struct device *dev = drvdata_to_dev(ctx->drvdata); 755 756 switch (assoc_dma_type) { 757 case CC_DMA_BUF_DLLI: 758 dev_dbg(dev, "ASSOC buffer type DLLI\n"); 759 hw_desc_init(&desc[idx]); 760 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src), 761 areq_ctx->assoclen, NS_BIT); 762 set_flow_mode(&desc[idx], flow_mode); 763 if (ctx->auth_mode == DRV_HASH_XCBC_MAC && 764 areq_ctx->cryptlen > 0) 765 set_din_not_last_indication(&desc[idx]); 766 break; 767 case CC_DMA_BUF_MLLI: 768 dev_dbg(dev, "ASSOC buffer type MLLI\n"); 769 hw_desc_init(&desc[idx]); 770 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr, 771 areq_ctx->assoc.mlli_nents, NS_BIT); 772 set_flow_mode(&desc[idx], flow_mode); 773 if (ctx->auth_mode == DRV_HASH_XCBC_MAC && 774 areq_ctx->cryptlen > 0) 775 set_din_not_last_indication(&desc[idx]); 776 break; 777 case CC_DMA_BUF_NULL: 778 default: 779 dev_err(dev, "Invalid ASSOC buffer type\n"); 780 } 781 782 *seq_size = (++idx); 783 } 784 785 static void cc_proc_authen_desc(struct aead_request *areq, 786 unsigned int flow_mode, 787 struct cc_hw_desc desc[], 788 unsigned int *seq_size, int direct) 789 { 790 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); 791 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type; 792 unsigned int idx = *seq_size; 793 struct crypto_aead *tfm = crypto_aead_reqtfm(areq); 794 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 795 struct device *dev = drvdata_to_dev(ctx->drvdata); 796 797 switch (data_dma_type) { 798 case CC_DMA_BUF_DLLI: 799 { 800 struct scatterlist *cipher = 801 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 802 areq_ctx->dst_sgl : areq_ctx->src_sgl; 803 804 unsigned int offset = 805 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 806 areq_ctx->dst_offset : areq_ctx->src_offset; 807 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n"); 808 hw_desc_init(&desc[idx]); 809 set_din_type(&desc[idx], DMA_DLLI, 810 (sg_dma_address(cipher) + offset), 811 areq_ctx->cryptlen, NS_BIT); 812 set_flow_mode(&desc[idx], flow_mode); 813 break; 814 } 815 case CC_DMA_BUF_MLLI: 816 { 817 /* DOUBLE-PASS flow (as default) 818 * assoc. + iv + data -compact in one table 819 * if assoclen is ZERO only IV perform 820 */ 821 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr; 822 u32 mlli_nents = areq_ctx->assoc.mlli_nents; 823 824 if (areq_ctx->is_single_pass) { 825 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { 826 mlli_addr = areq_ctx->dst.sram_addr; 827 mlli_nents = areq_ctx->dst.mlli_nents; 828 } else { 829 mlli_addr = areq_ctx->src.sram_addr; 830 mlli_nents = areq_ctx->src.mlli_nents; 831 } 832 } 833 834 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n"); 835 hw_desc_init(&desc[idx]); 836 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents, 837 NS_BIT); 838 set_flow_mode(&desc[idx], flow_mode); 839 break; 840 } 841 case CC_DMA_BUF_NULL: 842 default: 843 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n"); 844 } 845 846 *seq_size = (++idx); 847 } 848 849 static void cc_proc_cipher_desc(struct aead_request *areq, 850 unsigned int flow_mode, 851 struct cc_hw_desc desc[], 852 unsigned int *seq_size) 853 { 854 unsigned int idx = *seq_size; 855 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); 856 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type; 857 struct crypto_aead *tfm = crypto_aead_reqtfm(areq); 858 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 859 struct device *dev = drvdata_to_dev(ctx->drvdata); 860 861 if (areq_ctx->cryptlen == 0) 862 return; /*null processing*/ 863 864 switch (data_dma_type) { 865 case CC_DMA_BUF_DLLI: 866 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n"); 867 hw_desc_init(&desc[idx]); 868 set_din_type(&desc[idx], DMA_DLLI, 869 (sg_dma_address(areq_ctx->src_sgl) + 870 areq_ctx->src_offset), areq_ctx->cryptlen, 871 NS_BIT); 872 set_dout_dlli(&desc[idx], 873 (sg_dma_address(areq_ctx->dst_sgl) + 874 areq_ctx->dst_offset), 875 areq_ctx->cryptlen, NS_BIT, 0); 876 set_flow_mode(&desc[idx], flow_mode); 877 break; 878 case CC_DMA_BUF_MLLI: 879 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n"); 880 hw_desc_init(&desc[idx]); 881 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr, 882 areq_ctx->src.mlli_nents, NS_BIT); 883 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr, 884 areq_ctx->dst.mlli_nents, NS_BIT, 0); 885 set_flow_mode(&desc[idx], flow_mode); 886 break; 887 case CC_DMA_BUF_NULL: 888 default: 889 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n"); 890 } 891 892 *seq_size = (++idx); 893 } 894 895 static void cc_proc_digest_desc(struct aead_request *req, 896 struct cc_hw_desc desc[], 897 unsigned int *seq_size) 898 { 899 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 900 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 901 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 902 unsigned int idx = *seq_size; 903 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? 904 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; 905 int direct = req_ctx->gen_ctx.op_type; 906 907 /* Get final ICV result */ 908 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { 909 hw_desc_init(&desc[idx]); 910 set_flow_mode(&desc[idx], S_HASH_to_DOUT); 911 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); 912 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize, 913 NS_BIT, 1); 914 set_queue_last_ind(ctx->drvdata, &desc[idx]); 915 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { 916 set_aes_not_hash_mode(&desc[idx]); 917 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); 918 } else { 919 set_cipher_config0(&desc[idx], 920 HASH_DIGEST_RESULT_LITTLE_ENDIAN); 921 set_cipher_mode(&desc[idx], hash_mode); 922 } 923 } else { /*Decrypt*/ 924 /* Get ICV out from hardware */ 925 hw_desc_init(&desc[idx]); 926 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); 927 set_flow_mode(&desc[idx], S_HASH_to_DOUT); 928 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, 929 ctx->authsize, NS_BIT, 1); 930 set_queue_last_ind(ctx->drvdata, &desc[idx]); 931 set_cipher_config0(&desc[idx], 932 HASH_DIGEST_RESULT_LITTLE_ENDIAN); 933 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); 934 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { 935 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); 936 set_aes_not_hash_mode(&desc[idx]); 937 } else { 938 set_cipher_mode(&desc[idx], hash_mode); 939 } 940 } 941 942 *seq_size = (++idx); 943 } 944 945 static void cc_set_cipher_desc(struct aead_request *req, 946 struct cc_hw_desc desc[], 947 unsigned int *seq_size) 948 { 949 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 950 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 951 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 952 unsigned int hw_iv_size = req_ctx->hw_iv_size; 953 unsigned int idx = *seq_size; 954 int direct = req_ctx->gen_ctx.op_type; 955 956 /* Setup cipher state */ 957 hw_desc_init(&desc[idx]); 958 set_cipher_config0(&desc[idx], direct); 959 set_flow_mode(&desc[idx], ctx->flow_mode); 960 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr, 961 hw_iv_size, NS_BIT); 962 if (ctx->cipher_mode == DRV_CIPHER_CTR) 963 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); 964 else 965 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); 966 set_cipher_mode(&desc[idx], ctx->cipher_mode); 967 idx++; 968 969 /* Setup enc. key */ 970 hw_desc_init(&desc[idx]); 971 set_cipher_config0(&desc[idx], direct); 972 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 973 set_flow_mode(&desc[idx], ctx->flow_mode); 974 if (ctx->flow_mode == S_DIN_to_AES) { 975 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 976 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX : 977 ctx->enc_keylen), NS_BIT); 978 set_key_size_aes(&desc[idx], ctx->enc_keylen); 979 } else { 980 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 981 ctx->enc_keylen, NS_BIT); 982 set_key_size_des(&desc[idx], ctx->enc_keylen); 983 } 984 set_cipher_mode(&desc[idx], ctx->cipher_mode); 985 idx++; 986 987 *seq_size = idx; 988 } 989 990 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[], 991 unsigned int *seq_size, unsigned int data_flow_mode) 992 { 993 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 994 int direct = req_ctx->gen_ctx.op_type; 995 unsigned int idx = *seq_size; 996 997 if (req_ctx->cryptlen == 0) 998 return; /*null processing*/ 999 1000 cc_set_cipher_desc(req, desc, &idx); 1001 cc_proc_cipher_desc(req, data_flow_mode, desc, &idx); 1002 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { 1003 /* We must wait for DMA to write all cipher */ 1004 hw_desc_init(&desc[idx]); 1005 set_din_no_dma(&desc[idx], 0, 0xfffff0); 1006 set_dout_no_dma(&desc[idx], 0, 0, 1); 1007 idx++; 1008 } 1009 1010 *seq_size = idx; 1011 } 1012 1013 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[], 1014 unsigned int *seq_size) 1015 { 1016 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1017 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1018 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? 1019 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; 1020 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 1021 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE; 1022 unsigned int idx = *seq_size; 1023 1024 /* Loading hash ipad xor key state */ 1025 hw_desc_init(&desc[idx]); 1026 set_cipher_mode(&desc[idx], hash_mode); 1027 set_din_type(&desc[idx], DMA_DLLI, 1028 ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size, 1029 NS_BIT); 1030 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1031 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); 1032 idx++; 1033 1034 /* Load init. digest len (64 bytes) */ 1035 hw_desc_init(&desc[idx]); 1036 set_cipher_mode(&desc[idx], hash_mode); 1037 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), 1038 ctx->hash_len); 1039 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1040 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1041 idx++; 1042 1043 *seq_size = idx; 1044 } 1045 1046 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[], 1047 unsigned int *seq_size) 1048 { 1049 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1050 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1051 unsigned int idx = *seq_size; 1052 1053 /* Loading MAC state */ 1054 hw_desc_init(&desc[idx]); 1055 set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE); 1056 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); 1057 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); 1058 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 1059 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); 1060 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1061 set_aes_not_hash_mode(&desc[idx]); 1062 idx++; 1063 1064 /* Setup XCBC MAC K1 */ 1065 hw_desc_init(&desc[idx]); 1066 set_din_type(&desc[idx], DMA_DLLI, 1067 ctx->auth_state.xcbc.xcbc_keys_dma_addr, 1068 AES_KEYSIZE_128, NS_BIT); 1069 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1070 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); 1071 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 1072 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); 1073 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1074 set_aes_not_hash_mode(&desc[idx]); 1075 idx++; 1076 1077 /* Setup XCBC MAC K2 */ 1078 hw_desc_init(&desc[idx]); 1079 set_din_type(&desc[idx], DMA_DLLI, 1080 (ctx->auth_state.xcbc.xcbc_keys_dma_addr + 1081 AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT); 1082 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); 1083 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); 1084 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 1085 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); 1086 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1087 set_aes_not_hash_mode(&desc[idx]); 1088 idx++; 1089 1090 /* Setup XCBC MAC K3 */ 1091 hw_desc_init(&desc[idx]); 1092 set_din_type(&desc[idx], DMA_DLLI, 1093 (ctx->auth_state.xcbc.xcbc_keys_dma_addr + 1094 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT); 1095 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2); 1096 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); 1097 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 1098 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); 1099 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1100 set_aes_not_hash_mode(&desc[idx]); 1101 idx++; 1102 1103 *seq_size = idx; 1104 } 1105 1106 static void cc_proc_header_desc(struct aead_request *req, 1107 struct cc_hw_desc desc[], 1108 unsigned int *seq_size) 1109 { 1110 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1111 unsigned int idx = *seq_size; 1112 1113 /* Hash associated data */ 1114 if (areq_ctx->assoclen > 0) 1115 cc_set_assoc_desc(req, DIN_HASH, desc, &idx); 1116 1117 /* Hash IV */ 1118 *seq_size = idx; 1119 } 1120 1121 static void cc_proc_scheme_desc(struct aead_request *req, 1122 struct cc_hw_desc desc[], 1123 unsigned int *seq_size) 1124 { 1125 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1126 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1127 struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle; 1128 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? 1129 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; 1130 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 1131 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE; 1132 unsigned int idx = *seq_size; 1133 1134 hw_desc_init(&desc[idx]); 1135 set_cipher_mode(&desc[idx], hash_mode); 1136 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr, 1137 ctx->hash_len); 1138 set_flow_mode(&desc[idx], S_HASH_to_DOUT); 1139 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); 1140 set_cipher_do(&desc[idx], DO_PAD); 1141 idx++; 1142 1143 /* Get final ICV result */ 1144 hw_desc_init(&desc[idx]); 1145 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr, 1146 digest_size); 1147 set_flow_mode(&desc[idx], S_HASH_to_DOUT); 1148 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); 1149 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN); 1150 set_cipher_mode(&desc[idx], hash_mode); 1151 idx++; 1152 1153 /* Loading hash opad xor key state */ 1154 hw_desc_init(&desc[idx]); 1155 set_cipher_mode(&desc[idx], hash_mode); 1156 set_din_type(&desc[idx], DMA_DLLI, 1157 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size), 1158 digest_size, NS_BIT); 1159 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1160 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); 1161 idx++; 1162 1163 /* Load init. digest len (64 bytes) */ 1164 hw_desc_init(&desc[idx]); 1165 set_cipher_mode(&desc[idx], hash_mode); 1166 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), 1167 ctx->hash_len); 1168 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); 1169 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1170 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1171 idx++; 1172 1173 /* Perform HASH update */ 1174 hw_desc_init(&desc[idx]); 1175 set_din_sram(&desc[idx], aead_handle->sram_workspace_addr, 1176 digest_size); 1177 set_flow_mode(&desc[idx], DIN_HASH); 1178 idx++; 1179 1180 *seq_size = idx; 1181 } 1182 1183 static void cc_mlli_to_sram(struct aead_request *req, 1184 struct cc_hw_desc desc[], unsigned int *seq_size) 1185 { 1186 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1187 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1188 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1189 struct device *dev = drvdata_to_dev(ctx->drvdata); 1190 1191 if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || 1192 req_ctx->data_buff_type == CC_DMA_BUF_MLLI || 1193 !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) { 1194 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n", 1195 (unsigned int)ctx->drvdata->mlli_sram_addr, 1196 req_ctx->mlli_params.mlli_len); 1197 /* Copy MLLI table host-to-sram */ 1198 hw_desc_init(&desc[*seq_size]); 1199 set_din_type(&desc[*seq_size], DMA_DLLI, 1200 req_ctx->mlli_params.mlli_dma_addr, 1201 req_ctx->mlli_params.mlli_len, NS_BIT); 1202 set_dout_sram(&desc[*seq_size], 1203 ctx->drvdata->mlli_sram_addr, 1204 req_ctx->mlli_params.mlli_len); 1205 set_flow_mode(&desc[*seq_size], BYPASS); 1206 (*seq_size)++; 1207 } 1208 } 1209 1210 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct, 1211 enum cc_flow_mode setup_flow_mode, 1212 bool is_single_pass) 1213 { 1214 enum cc_flow_mode data_flow_mode; 1215 1216 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { 1217 if (setup_flow_mode == S_DIN_to_AES) 1218 data_flow_mode = is_single_pass ? 1219 AES_to_HASH_and_DOUT : DIN_AES_DOUT; 1220 else 1221 data_flow_mode = is_single_pass ? 1222 DES_to_HASH_and_DOUT : DIN_DES_DOUT; 1223 } else { /* Decrypt */ 1224 if (setup_flow_mode == S_DIN_to_AES) 1225 data_flow_mode = is_single_pass ? 1226 AES_and_HASH : DIN_AES_DOUT; 1227 else 1228 data_flow_mode = is_single_pass ? 1229 DES_and_HASH : DIN_DES_DOUT; 1230 } 1231 1232 return data_flow_mode; 1233 } 1234 1235 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[], 1236 unsigned int *seq_size) 1237 { 1238 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1239 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1240 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1241 int direct = req_ctx->gen_ctx.op_type; 1242 unsigned int data_flow_mode = 1243 cc_get_data_flow(direct, ctx->flow_mode, 1244 req_ctx->is_single_pass); 1245 1246 if (req_ctx->is_single_pass) { 1247 /** 1248 * Single-pass flow 1249 */ 1250 cc_set_hmac_desc(req, desc, seq_size); 1251 cc_set_cipher_desc(req, desc, seq_size); 1252 cc_proc_header_desc(req, desc, seq_size); 1253 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size); 1254 cc_proc_scheme_desc(req, desc, seq_size); 1255 cc_proc_digest_desc(req, desc, seq_size); 1256 return; 1257 } 1258 1259 /** 1260 * Double-pass flow 1261 * Fallback for unsupported single-pass modes, 1262 * i.e. using assoc. data of non-word-multiple 1263 */ 1264 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { 1265 /* encrypt first.. */ 1266 cc_proc_cipher(req, desc, seq_size, data_flow_mode); 1267 /* authenc after..*/ 1268 cc_set_hmac_desc(req, desc, seq_size); 1269 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct); 1270 cc_proc_scheme_desc(req, desc, seq_size); 1271 cc_proc_digest_desc(req, desc, seq_size); 1272 1273 } else { /*DECRYPT*/ 1274 /* authenc first..*/ 1275 cc_set_hmac_desc(req, desc, seq_size); 1276 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct); 1277 cc_proc_scheme_desc(req, desc, seq_size); 1278 /* decrypt after.. */ 1279 cc_proc_cipher(req, desc, seq_size, data_flow_mode); 1280 /* read the digest result with setting the completion bit 1281 * must be after the cipher operation 1282 */ 1283 cc_proc_digest_desc(req, desc, seq_size); 1284 } 1285 } 1286 1287 static void 1288 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[], 1289 unsigned int *seq_size) 1290 { 1291 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1292 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1293 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1294 int direct = req_ctx->gen_ctx.op_type; 1295 unsigned int data_flow_mode = 1296 cc_get_data_flow(direct, ctx->flow_mode, 1297 req_ctx->is_single_pass); 1298 1299 if (req_ctx->is_single_pass) { 1300 /** 1301 * Single-pass flow 1302 */ 1303 cc_set_xcbc_desc(req, desc, seq_size); 1304 cc_set_cipher_desc(req, desc, seq_size); 1305 cc_proc_header_desc(req, desc, seq_size); 1306 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size); 1307 cc_proc_digest_desc(req, desc, seq_size); 1308 return; 1309 } 1310 1311 /** 1312 * Double-pass flow 1313 * Fallback for unsupported single-pass modes, 1314 * i.e. using assoc. data of non-word-multiple 1315 */ 1316 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { 1317 /* encrypt first.. */ 1318 cc_proc_cipher(req, desc, seq_size, data_flow_mode); 1319 /* authenc after.. */ 1320 cc_set_xcbc_desc(req, desc, seq_size); 1321 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct); 1322 cc_proc_digest_desc(req, desc, seq_size); 1323 } else { /*DECRYPT*/ 1324 /* authenc first.. */ 1325 cc_set_xcbc_desc(req, desc, seq_size); 1326 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct); 1327 /* decrypt after..*/ 1328 cc_proc_cipher(req, desc, seq_size, data_flow_mode); 1329 /* read the digest result with setting the completion bit 1330 * must be after the cipher operation 1331 */ 1332 cc_proc_digest_desc(req, desc, seq_size); 1333 } 1334 } 1335 1336 static int validate_data_size(struct cc_aead_ctx *ctx, 1337 enum drv_crypto_direction direct, 1338 struct aead_request *req) 1339 { 1340 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1341 struct device *dev = drvdata_to_dev(ctx->drvdata); 1342 unsigned int assoclen = areq_ctx->assoclen; 1343 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ? 1344 (req->cryptlen - ctx->authsize) : req->cryptlen; 1345 1346 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT && 1347 req->cryptlen < ctx->authsize) 1348 goto data_size_err; 1349 1350 areq_ctx->is_single_pass = true; /*defaulted to fast flow*/ 1351 1352 switch (ctx->flow_mode) { 1353 case S_DIN_to_AES: 1354 if (ctx->cipher_mode == DRV_CIPHER_CBC && 1355 !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)) 1356 goto data_size_err; 1357 if (ctx->cipher_mode == DRV_CIPHER_CCM) 1358 break; 1359 if (ctx->cipher_mode == DRV_CIPHER_GCTR) { 1360 if (areq_ctx->plaintext_authenticate_only) 1361 areq_ctx->is_single_pass = false; 1362 break; 1363 } 1364 1365 if (!IS_ALIGNED(assoclen, sizeof(u32))) 1366 areq_ctx->is_single_pass = false; 1367 1368 if (ctx->cipher_mode == DRV_CIPHER_CTR && 1369 !IS_ALIGNED(cipherlen, sizeof(u32))) 1370 areq_ctx->is_single_pass = false; 1371 1372 break; 1373 case S_DIN_to_DES: 1374 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)) 1375 goto data_size_err; 1376 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)) 1377 areq_ctx->is_single_pass = false; 1378 break; 1379 default: 1380 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode); 1381 goto data_size_err; 1382 } 1383 1384 return 0; 1385 1386 data_size_err: 1387 return -EINVAL; 1388 } 1389 1390 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size) 1391 { 1392 unsigned int len = 0; 1393 1394 if (header_size == 0) 1395 return 0; 1396 1397 if (header_size < ((1UL << 16) - (1UL << 8))) { 1398 len = 2; 1399 1400 pa0_buff[0] = (header_size >> 8) & 0xFF; 1401 pa0_buff[1] = header_size & 0xFF; 1402 } else { 1403 len = 6; 1404 1405 pa0_buff[0] = 0xFF; 1406 pa0_buff[1] = 0xFE; 1407 pa0_buff[2] = (header_size >> 24) & 0xFF; 1408 pa0_buff[3] = (header_size >> 16) & 0xFF; 1409 pa0_buff[4] = (header_size >> 8) & 0xFF; 1410 pa0_buff[5] = header_size & 0xFF; 1411 } 1412 1413 return len; 1414 } 1415 1416 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize) 1417 { 1418 __be32 data; 1419 1420 memset(block, 0, csize); 1421 block += csize; 1422 1423 if (csize >= 4) 1424 csize = 4; 1425 else if (msglen > (1 << (8 * csize))) 1426 return -EOVERFLOW; 1427 1428 data = cpu_to_be32(msglen); 1429 memcpy(block - csize, (u8 *)&data + 4 - csize, csize); 1430 1431 return 0; 1432 } 1433 1434 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[], 1435 unsigned int *seq_size) 1436 { 1437 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1438 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1439 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1440 unsigned int idx = *seq_size; 1441 unsigned int cipher_flow_mode; 1442 dma_addr_t mac_result; 1443 1444 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { 1445 cipher_flow_mode = AES_to_HASH_and_DOUT; 1446 mac_result = req_ctx->mac_buf_dma_addr; 1447 } else { /* Encrypt */ 1448 cipher_flow_mode = AES_and_HASH; 1449 mac_result = req_ctx->icv_dma_addr; 1450 } 1451 1452 /* load key */ 1453 hw_desc_init(&desc[idx]); 1454 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); 1455 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 1456 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX : 1457 ctx->enc_keylen), NS_BIT); 1458 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1459 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1460 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 1461 set_flow_mode(&desc[idx], S_DIN_to_AES); 1462 idx++; 1463 1464 /* load ctr state */ 1465 hw_desc_init(&desc[idx]); 1466 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); 1467 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1468 set_din_type(&desc[idx], DMA_DLLI, 1469 req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT); 1470 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 1471 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); 1472 set_flow_mode(&desc[idx], S_DIN_to_AES); 1473 idx++; 1474 1475 /* load MAC key */ 1476 hw_desc_init(&desc[idx]); 1477 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); 1478 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 1479 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX : 1480 ctx->enc_keylen), NS_BIT); 1481 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1482 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1483 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 1484 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1485 set_aes_not_hash_mode(&desc[idx]); 1486 idx++; 1487 1488 /* load MAC state */ 1489 hw_desc_init(&desc[idx]); 1490 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); 1491 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1492 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr, 1493 AES_BLOCK_SIZE, NS_BIT); 1494 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 1495 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); 1496 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1497 set_aes_not_hash_mode(&desc[idx]); 1498 idx++; 1499 1500 /* process assoc data */ 1501 if (req_ctx->assoclen > 0) { 1502 cc_set_assoc_desc(req, DIN_HASH, desc, &idx); 1503 } else { 1504 hw_desc_init(&desc[idx]); 1505 set_din_type(&desc[idx], DMA_DLLI, 1506 sg_dma_address(&req_ctx->ccm_adata_sg), 1507 AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT); 1508 set_flow_mode(&desc[idx], DIN_HASH); 1509 idx++; 1510 } 1511 1512 /* process the cipher */ 1513 if (req_ctx->cryptlen) 1514 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx); 1515 1516 /* Read temporal MAC */ 1517 hw_desc_init(&desc[idx]); 1518 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); 1519 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize, 1520 NS_BIT, 0); 1521 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); 1522 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN); 1523 set_flow_mode(&desc[idx], S_HASH_to_DOUT); 1524 set_aes_not_hash_mode(&desc[idx]); 1525 idx++; 1526 1527 /* load AES-CTR state (for last MAC calculation)*/ 1528 hw_desc_init(&desc[idx]); 1529 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); 1530 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); 1531 set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr, 1532 AES_BLOCK_SIZE, NS_BIT); 1533 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1534 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); 1535 set_flow_mode(&desc[idx], S_DIN_to_AES); 1536 idx++; 1537 1538 hw_desc_init(&desc[idx]); 1539 set_din_no_dma(&desc[idx], 0, 0xfffff0); 1540 set_dout_no_dma(&desc[idx], 0, 0, 1); 1541 idx++; 1542 1543 /* encrypt the "T" value and store MAC in mac_state */ 1544 hw_desc_init(&desc[idx]); 1545 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr, 1546 ctx->authsize, NS_BIT); 1547 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1); 1548 set_queue_last_ind(ctx->drvdata, &desc[idx]); 1549 set_flow_mode(&desc[idx], DIN_AES_DOUT); 1550 idx++; 1551 1552 *seq_size = idx; 1553 return 0; 1554 } 1555 1556 static int config_ccm_adata(struct aead_request *req) 1557 { 1558 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1559 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1560 struct device *dev = drvdata_to_dev(ctx->drvdata); 1561 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1562 //unsigned int size_of_a = 0, rem_a_size = 0; 1563 unsigned int lp = req->iv[0]; 1564 /* Note: The code assume that req->iv[0] already contains the value 1565 * of L' of RFC3610 1566 */ 1567 unsigned int l = lp + 1; /* This is L' of RFC 3610. */ 1568 unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */ 1569 u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET; 1570 u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET; 1571 u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; 1572 unsigned int cryptlen = (req_ctx->gen_ctx.op_type == 1573 DRV_CRYPTO_DIRECTION_ENCRYPT) ? 1574 req->cryptlen : 1575 (req->cryptlen - ctx->authsize); 1576 int rc; 1577 1578 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE); 1579 memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3); 1580 1581 /* taken from crypto/ccm.c */ 1582 /* 2 <= L <= 8, so 1 <= L' <= 7. */ 1583 if (l < 2 || l > 8) { 1584 dev_err(dev, "illegal iv value %X\n", req->iv[0]); 1585 return -EINVAL; 1586 } 1587 memcpy(b0, req->iv, AES_BLOCK_SIZE); 1588 1589 /* format control info per RFC 3610 and 1590 * NIST Special Publication 800-38C 1591 */ 1592 *b0 |= (8 * ((m - 2) / 2)); 1593 if (req_ctx->assoclen > 0) 1594 *b0 |= 64; /* Enable bit 6 if Adata exists. */ 1595 1596 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */ 1597 if (rc) { 1598 dev_err(dev, "message len overflow detected"); 1599 return rc; 1600 } 1601 /* END of "taken from crypto/ccm.c" */ 1602 1603 /* l(a) - size of associated data. */ 1604 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen); 1605 1606 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1); 1607 req->iv[15] = 1; 1608 1609 memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE); 1610 ctr_count_0[15] = 0; 1611 1612 return 0; 1613 } 1614 1615 static void cc_proc_rfc4309_ccm(struct aead_request *req) 1616 { 1617 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1618 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1619 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1620 1621 /* L' */ 1622 memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE); 1623 /* For RFC 4309, always use 4 bytes for message length 1624 * (at most 2^32-1 bytes). 1625 */ 1626 areq_ctx->ctr_iv[0] = 3; 1627 1628 /* In RFC 4309 there is an 11-bytes nonce+IV part, 1629 * that we build here. 1630 */ 1631 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, 1632 CCM_BLOCK_NONCE_SIZE); 1633 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, 1634 CCM_BLOCK_IV_SIZE); 1635 req->iv = areq_ctx->ctr_iv; 1636 areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE; 1637 } 1638 1639 static void cc_set_ghash_desc(struct aead_request *req, 1640 struct cc_hw_desc desc[], unsigned int *seq_size) 1641 { 1642 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1643 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1644 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1645 unsigned int idx = *seq_size; 1646 1647 /* load key to AES*/ 1648 hw_desc_init(&desc[idx]); 1649 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB); 1650 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); 1651 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 1652 ctx->enc_keylen, NS_BIT); 1653 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1654 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1655 set_flow_mode(&desc[idx], S_DIN_to_AES); 1656 idx++; 1657 1658 /* process one zero block to generate hkey */ 1659 hw_desc_init(&desc[idx]); 1660 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE); 1661 set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE, 1662 NS_BIT, 0); 1663 set_flow_mode(&desc[idx], DIN_AES_DOUT); 1664 idx++; 1665 1666 /* Memory Barrier */ 1667 hw_desc_init(&desc[idx]); 1668 set_din_no_dma(&desc[idx], 0, 0xfffff0); 1669 set_dout_no_dma(&desc[idx], 0, 0, 1); 1670 idx++; 1671 1672 /* Load GHASH subkey */ 1673 hw_desc_init(&desc[idx]); 1674 set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr, 1675 AES_BLOCK_SIZE, NS_BIT); 1676 set_dout_no_dma(&desc[idx], 0, 0, 1); 1677 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1678 set_aes_not_hash_mode(&desc[idx]); 1679 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); 1680 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); 1681 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1682 idx++; 1683 1684 /* Configure Hash Engine to work with GHASH. 1685 * Since it was not possible to extend HASH submodes to add GHASH, 1686 * The following command is necessary in order to 1687 * select GHASH (according to HW designers) 1688 */ 1689 hw_desc_init(&desc[idx]); 1690 set_din_no_dma(&desc[idx], 0, 0xfffff0); 1691 set_dout_no_dma(&desc[idx], 0, 0, 1); 1692 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1693 set_aes_not_hash_mode(&desc[idx]); 1694 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); 1695 set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK 1696 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); 1697 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); 1698 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1699 idx++; 1700 1701 /* Load GHASH initial STATE (which is 0). (for any hash there is an 1702 * initial state) 1703 */ 1704 hw_desc_init(&desc[idx]); 1705 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE); 1706 set_dout_no_dma(&desc[idx], 0, 0, 1); 1707 set_flow_mode(&desc[idx], S_DIN_to_HASH); 1708 set_aes_not_hash_mode(&desc[idx]); 1709 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); 1710 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); 1711 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); 1712 idx++; 1713 1714 *seq_size = idx; 1715 } 1716 1717 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[], 1718 unsigned int *seq_size) 1719 { 1720 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1721 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1722 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1723 unsigned int idx = *seq_size; 1724 1725 /* load key to AES*/ 1726 hw_desc_init(&desc[idx]); 1727 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); 1728 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); 1729 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 1730 ctx->enc_keylen, NS_BIT); 1731 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1732 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); 1733 set_flow_mode(&desc[idx], S_DIN_to_AES); 1734 idx++; 1735 1736 if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) { 1737 /* load AES/CTR initial CTR value inc by 2*/ 1738 hw_desc_init(&desc[idx]); 1739 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); 1740 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1741 set_din_type(&desc[idx], DMA_DLLI, 1742 req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE, 1743 NS_BIT); 1744 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); 1745 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); 1746 set_flow_mode(&desc[idx], S_DIN_to_AES); 1747 idx++; 1748 } 1749 1750 *seq_size = idx; 1751 } 1752 1753 static void cc_proc_gcm_result(struct aead_request *req, 1754 struct cc_hw_desc desc[], 1755 unsigned int *seq_size) 1756 { 1757 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1758 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1759 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1760 dma_addr_t mac_result; 1761 unsigned int idx = *seq_size; 1762 1763 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { 1764 mac_result = req_ctx->mac_buf_dma_addr; 1765 } else { /* Encrypt */ 1766 mac_result = req_ctx->icv_dma_addr; 1767 } 1768 1769 /* process(ghash) gcm_block_len */ 1770 hw_desc_init(&desc[idx]); 1771 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr, 1772 AES_BLOCK_SIZE, NS_BIT); 1773 set_flow_mode(&desc[idx], DIN_HASH); 1774 idx++; 1775 1776 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */ 1777 hw_desc_init(&desc[idx]); 1778 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); 1779 set_din_no_dma(&desc[idx], 0, 0xfffff0); 1780 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE, 1781 NS_BIT, 0); 1782 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); 1783 set_flow_mode(&desc[idx], S_HASH_to_DOUT); 1784 set_aes_not_hash_mode(&desc[idx]); 1785 1786 idx++; 1787 1788 /* load AES/CTR initial CTR value inc by 1*/ 1789 hw_desc_init(&desc[idx]); 1790 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); 1791 set_key_size_aes(&desc[idx], ctx->enc_keylen); 1792 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr, 1793 AES_BLOCK_SIZE, NS_BIT); 1794 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); 1795 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); 1796 set_flow_mode(&desc[idx], S_DIN_to_AES); 1797 idx++; 1798 1799 /* Memory Barrier */ 1800 hw_desc_init(&desc[idx]); 1801 set_din_no_dma(&desc[idx], 0, 0xfffff0); 1802 set_dout_no_dma(&desc[idx], 0, 0, 1); 1803 idx++; 1804 1805 /* process GCTR on stored GHASH and store MAC in mac_state*/ 1806 hw_desc_init(&desc[idx]); 1807 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); 1808 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr, 1809 AES_BLOCK_SIZE, NS_BIT); 1810 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1); 1811 set_queue_last_ind(ctx->drvdata, &desc[idx]); 1812 set_flow_mode(&desc[idx], DIN_AES_DOUT); 1813 idx++; 1814 1815 *seq_size = idx; 1816 } 1817 1818 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[], 1819 unsigned int *seq_size) 1820 { 1821 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1822 unsigned int cipher_flow_mode; 1823 1824 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { 1825 cipher_flow_mode = AES_and_HASH; 1826 } else { /* Encrypt */ 1827 cipher_flow_mode = AES_to_HASH_and_DOUT; 1828 } 1829 1830 //in RFC4543 no data to encrypt. just copy data from src to dest. 1831 if (req_ctx->plaintext_authenticate_only) { 1832 cc_proc_cipher_desc(req, BYPASS, desc, seq_size); 1833 cc_set_ghash_desc(req, desc, seq_size); 1834 /* process(ghash) assoc data */ 1835 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size); 1836 cc_set_gctr_desc(req, desc, seq_size); 1837 cc_proc_gcm_result(req, desc, seq_size); 1838 return 0; 1839 } 1840 1841 // for gcm and rfc4106. 1842 cc_set_ghash_desc(req, desc, seq_size); 1843 /* process(ghash) assoc data */ 1844 if (req_ctx->assoclen > 0) 1845 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size); 1846 cc_set_gctr_desc(req, desc, seq_size); 1847 /* process(gctr+ghash) */ 1848 if (req_ctx->cryptlen) 1849 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size); 1850 cc_proc_gcm_result(req, desc, seq_size); 1851 1852 return 0; 1853 } 1854 1855 static int config_gcm_context(struct aead_request *req) 1856 { 1857 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1858 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1859 struct aead_req_ctx *req_ctx = aead_request_ctx(req); 1860 struct device *dev = drvdata_to_dev(ctx->drvdata); 1861 1862 unsigned int cryptlen = (req_ctx->gen_ctx.op_type == 1863 DRV_CRYPTO_DIRECTION_ENCRYPT) ? 1864 req->cryptlen : 1865 (req->cryptlen - ctx->authsize); 1866 __be32 counter = cpu_to_be32(2); 1867 1868 dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n", 1869 __func__, cryptlen, req_ctx->assoclen, ctx->authsize); 1870 1871 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE); 1872 1873 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE); 1874 1875 memcpy(req->iv + 12, &counter, 4); 1876 memcpy(req_ctx->gcm_iv_inc2, req->iv, 16); 1877 1878 counter = cpu_to_be32(1); 1879 memcpy(req->iv + 12, &counter, 4); 1880 memcpy(req_ctx->gcm_iv_inc1, req->iv, 16); 1881 1882 if (!req_ctx->plaintext_authenticate_only) { 1883 __be64 temp64; 1884 1885 temp64 = cpu_to_be64(req_ctx->assoclen * 8); 1886 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); 1887 temp64 = cpu_to_be64(cryptlen * 8); 1888 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); 1889 } else { 1890 /* rfc4543=> all data(AAD,IV,Plain) are considered additional 1891 * data that is nothing is encrypted. 1892 */ 1893 __be64 temp64; 1894 1895 temp64 = cpu_to_be64((req_ctx->assoclen + 1896 GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8); 1897 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); 1898 temp64 = 0; 1899 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); 1900 } 1901 1902 return 0; 1903 } 1904 1905 static void cc_proc_rfc4_gcm(struct aead_request *req) 1906 { 1907 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1908 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1909 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1910 1911 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, 1912 ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE); 1913 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, 1914 GCM_BLOCK_RFC4_IV_SIZE); 1915 req->iv = areq_ctx->ctr_iv; 1916 areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE; 1917 } 1918 1919 static int cc_proc_aead(struct aead_request *req, 1920 enum drv_crypto_direction direct) 1921 { 1922 int rc = 0; 1923 int seq_len = 0; 1924 struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ]; 1925 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1926 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1927 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1928 struct device *dev = drvdata_to_dev(ctx->drvdata); 1929 struct cc_crypto_req cc_req = {}; 1930 1931 dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n", 1932 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"), 1933 ctx, req, req->iv, sg_virt(req->src), req->src->offset, 1934 sg_virt(req->dst), req->dst->offset, req->cryptlen); 1935 1936 /* STAT_PHASE_0: Init and sanity checks */ 1937 1938 /* Check data length according to mode */ 1939 if (validate_data_size(ctx, direct, req)) { 1940 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", 1941 req->cryptlen, areq_ctx->assoclen); 1942 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); 1943 return -EINVAL; 1944 } 1945 1946 /* Setup request structure */ 1947 cc_req.user_cb = (void *)cc_aead_complete; 1948 cc_req.user_arg = (void *)req; 1949 1950 /* Setup request context */ 1951 areq_ctx->gen_ctx.op_type = direct; 1952 areq_ctx->req_authsize = ctx->authsize; 1953 areq_ctx->cipher_mode = ctx->cipher_mode; 1954 1955 /* STAT_PHASE_1: Map buffers */ 1956 1957 if (ctx->cipher_mode == DRV_CIPHER_CTR) { 1958 /* Build CTR IV - Copy nonce from last 4 bytes in 1959 * CTR key to first 4 bytes in CTR IV 1960 */ 1961 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, 1962 CTR_RFC3686_NONCE_SIZE); 1963 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv, 1964 CTR_RFC3686_IV_SIZE); 1965 /* Initialize counter portion of counter block */ 1966 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + 1967 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); 1968 1969 /* Replace with counter iv */ 1970 req->iv = areq_ctx->ctr_iv; 1971 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE; 1972 } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) || 1973 (ctx->cipher_mode == DRV_CIPHER_GCTR)) { 1974 areq_ctx->hw_iv_size = AES_BLOCK_SIZE; 1975 if (areq_ctx->ctr_iv != req->iv) { 1976 memcpy(areq_ctx->ctr_iv, req->iv, 1977 crypto_aead_ivsize(tfm)); 1978 req->iv = areq_ctx->ctr_iv; 1979 } 1980 } else { 1981 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm); 1982 } 1983 1984 if (ctx->cipher_mode == DRV_CIPHER_CCM) { 1985 rc = config_ccm_adata(req); 1986 if (rc) { 1987 dev_dbg(dev, "config_ccm_adata() returned with a failure %d!", 1988 rc); 1989 goto exit; 1990 } 1991 } else { 1992 areq_ctx->ccm_hdr_size = ccm_header_size_null; 1993 } 1994 1995 if (ctx->cipher_mode == DRV_CIPHER_GCTR) { 1996 rc = config_gcm_context(req); 1997 if (rc) { 1998 dev_dbg(dev, "config_gcm_context() returned with a failure %d!", 1999 rc); 2000 goto exit; 2001 } 2002 } 2003 2004 rc = cc_map_aead_request(ctx->drvdata, req); 2005 if (rc) { 2006 dev_err(dev, "map_request() failed\n"); 2007 goto exit; 2008 } 2009 2010 /* STAT_PHASE_2: Create sequence */ 2011 2012 /* Load MLLI tables to SRAM if necessary */ 2013 cc_mlli_to_sram(req, desc, &seq_len); 2014 2015 /*TODO: move seq len by reference */ 2016 switch (ctx->auth_mode) { 2017 case DRV_HASH_SHA1: 2018 case DRV_HASH_SHA256: 2019 cc_hmac_authenc(req, desc, &seq_len); 2020 break; 2021 case DRV_HASH_XCBC_MAC: 2022 cc_xcbc_authenc(req, desc, &seq_len); 2023 break; 2024 case DRV_HASH_NULL: 2025 if (ctx->cipher_mode == DRV_CIPHER_CCM) 2026 cc_ccm(req, desc, &seq_len); 2027 if (ctx->cipher_mode == DRV_CIPHER_GCTR) 2028 cc_gcm(req, desc, &seq_len); 2029 break; 2030 default: 2031 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode); 2032 cc_unmap_aead_request(dev, req); 2033 rc = -ENOTSUPP; 2034 goto exit; 2035 } 2036 2037 /* STAT_PHASE_3: Lock HW and push sequence */ 2038 2039 rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base); 2040 2041 if (rc != -EINPROGRESS && rc != -EBUSY) { 2042 dev_err(dev, "send_request() failed (rc=%d)\n", rc); 2043 cc_unmap_aead_request(dev, req); 2044 } 2045 2046 exit: 2047 return rc; 2048 } 2049 2050 static int cc_aead_encrypt(struct aead_request *req) 2051 { 2052 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2053 int rc; 2054 2055 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2056 2057 /* No generated IV required */ 2058 areq_ctx->backup_iv = req->iv; 2059 areq_ctx->assoclen = req->assoclen; 2060 areq_ctx->is_gcm4543 = false; 2061 2062 areq_ctx->plaintext_authenticate_only = false; 2063 2064 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); 2065 if (rc != -EINPROGRESS && rc != -EBUSY) 2066 req->iv = areq_ctx->backup_iv; 2067 2068 return rc; 2069 } 2070 2071 static int cc_rfc4309_ccm_encrypt(struct aead_request *req) 2072 { 2073 /* Very similar to cc_aead_encrypt() above. */ 2074 2075 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2076 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2077 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 2078 struct device *dev = drvdata_to_dev(ctx->drvdata); 2079 int rc = -EINVAL; 2080 2081 if (!valid_assoclen(req)) { 2082 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); 2083 goto out; 2084 } 2085 2086 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2087 2088 /* No generated IV required */ 2089 areq_ctx->backup_iv = req->iv; 2090 areq_ctx->assoclen = req->assoclen; 2091 areq_ctx->is_gcm4543 = true; 2092 2093 cc_proc_rfc4309_ccm(req); 2094 2095 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); 2096 if (rc != -EINPROGRESS && rc != -EBUSY) 2097 req->iv = areq_ctx->backup_iv; 2098 out: 2099 return rc; 2100 } 2101 2102 static int cc_aead_decrypt(struct aead_request *req) 2103 { 2104 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2105 int rc; 2106 2107 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2108 2109 /* No generated IV required */ 2110 areq_ctx->backup_iv = req->iv; 2111 areq_ctx->assoclen = req->assoclen; 2112 areq_ctx->is_gcm4543 = false; 2113 2114 areq_ctx->plaintext_authenticate_only = false; 2115 2116 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); 2117 if (rc != -EINPROGRESS && rc != -EBUSY) 2118 req->iv = areq_ctx->backup_iv; 2119 2120 return rc; 2121 } 2122 2123 static int cc_rfc4309_ccm_decrypt(struct aead_request *req) 2124 { 2125 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2126 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 2127 struct device *dev = drvdata_to_dev(ctx->drvdata); 2128 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2129 int rc = -EINVAL; 2130 2131 if (!valid_assoclen(req)) { 2132 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); 2133 goto out; 2134 } 2135 2136 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2137 2138 /* No generated IV required */ 2139 areq_ctx->backup_iv = req->iv; 2140 areq_ctx->assoclen = req->assoclen; 2141 2142 areq_ctx->is_gcm4543 = true; 2143 cc_proc_rfc4309_ccm(req); 2144 2145 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); 2146 if (rc != -EINPROGRESS && rc != -EBUSY) 2147 req->iv = areq_ctx->backup_iv; 2148 2149 out: 2150 return rc; 2151 } 2152 2153 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, 2154 unsigned int keylen) 2155 { 2156 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 2157 struct device *dev = drvdata_to_dev(ctx->drvdata); 2158 2159 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key); 2160 2161 if (keylen < 4) 2162 return -EINVAL; 2163 2164 keylen -= 4; 2165 memcpy(ctx->ctr_nonce, key + keylen, 4); 2166 2167 return cc_aead_setkey(tfm, key, keylen); 2168 } 2169 2170 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, 2171 unsigned int keylen) 2172 { 2173 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 2174 struct device *dev = drvdata_to_dev(ctx->drvdata); 2175 2176 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key); 2177 2178 if (keylen < 4) 2179 return -EINVAL; 2180 2181 keylen -= 4; 2182 memcpy(ctx->ctr_nonce, key + keylen, 4); 2183 2184 return cc_aead_setkey(tfm, key, keylen); 2185 } 2186 2187 static int cc_gcm_setauthsize(struct crypto_aead *authenc, 2188 unsigned int authsize) 2189 { 2190 switch (authsize) { 2191 case 4: 2192 case 8: 2193 case 12: 2194 case 13: 2195 case 14: 2196 case 15: 2197 case 16: 2198 break; 2199 default: 2200 return -EINVAL; 2201 } 2202 2203 return cc_aead_setauthsize(authenc, authsize); 2204 } 2205 2206 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc, 2207 unsigned int authsize) 2208 { 2209 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc); 2210 struct device *dev = drvdata_to_dev(ctx->drvdata); 2211 2212 dev_dbg(dev, "authsize %d\n", authsize); 2213 2214 switch (authsize) { 2215 case 8: 2216 case 12: 2217 case 16: 2218 break; 2219 default: 2220 return -EINVAL; 2221 } 2222 2223 return cc_aead_setauthsize(authenc, authsize); 2224 } 2225 2226 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc, 2227 unsigned int authsize) 2228 { 2229 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc); 2230 struct device *dev = drvdata_to_dev(ctx->drvdata); 2231 2232 dev_dbg(dev, "authsize %d\n", authsize); 2233 2234 if (authsize != 16) 2235 return -EINVAL; 2236 2237 return cc_aead_setauthsize(authenc, authsize); 2238 } 2239 2240 static int cc_rfc4106_gcm_encrypt(struct aead_request *req) 2241 { 2242 /* Very similar to cc_aead_encrypt() above. */ 2243 2244 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2245 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 2246 struct device *dev = drvdata_to_dev(ctx->drvdata); 2247 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2248 int rc = -EINVAL; 2249 2250 if (!valid_assoclen(req)) { 2251 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); 2252 goto out; 2253 } 2254 2255 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2256 2257 /* No generated IV required */ 2258 areq_ctx->backup_iv = req->iv; 2259 areq_ctx->assoclen = req->assoclen; 2260 areq_ctx->plaintext_authenticate_only = false; 2261 2262 cc_proc_rfc4_gcm(req); 2263 areq_ctx->is_gcm4543 = true; 2264 2265 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); 2266 if (rc != -EINPROGRESS && rc != -EBUSY) 2267 req->iv = areq_ctx->backup_iv; 2268 out: 2269 return rc; 2270 } 2271 2272 static int cc_rfc4543_gcm_encrypt(struct aead_request *req) 2273 { 2274 /* Very similar to cc_aead_encrypt() above. */ 2275 2276 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2277 int rc; 2278 2279 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2280 2281 //plaintext is not encryped with rfc4543 2282 areq_ctx->plaintext_authenticate_only = true; 2283 2284 /* No generated IV required */ 2285 areq_ctx->backup_iv = req->iv; 2286 areq_ctx->assoclen = req->assoclen; 2287 2288 cc_proc_rfc4_gcm(req); 2289 areq_ctx->is_gcm4543 = true; 2290 2291 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); 2292 if (rc != -EINPROGRESS && rc != -EBUSY) 2293 req->iv = areq_ctx->backup_iv; 2294 2295 return rc; 2296 } 2297 2298 static int cc_rfc4106_gcm_decrypt(struct aead_request *req) 2299 { 2300 /* Very similar to cc_aead_decrypt() above. */ 2301 2302 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2303 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 2304 struct device *dev = drvdata_to_dev(ctx->drvdata); 2305 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2306 int rc = -EINVAL; 2307 2308 if (!valid_assoclen(req)) { 2309 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); 2310 goto out; 2311 } 2312 2313 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2314 2315 /* No generated IV required */ 2316 areq_ctx->backup_iv = req->iv; 2317 areq_ctx->assoclen = req->assoclen; 2318 areq_ctx->plaintext_authenticate_only = false; 2319 2320 cc_proc_rfc4_gcm(req); 2321 areq_ctx->is_gcm4543 = true; 2322 2323 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); 2324 if (rc != -EINPROGRESS && rc != -EBUSY) 2325 req->iv = areq_ctx->backup_iv; 2326 out: 2327 return rc; 2328 } 2329 2330 static int cc_rfc4543_gcm_decrypt(struct aead_request *req) 2331 { 2332 /* Very similar to cc_aead_decrypt() above. */ 2333 2334 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2335 int rc; 2336 2337 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2338 2339 //plaintext is not decryped with rfc4543 2340 areq_ctx->plaintext_authenticate_only = true; 2341 2342 /* No generated IV required */ 2343 areq_ctx->backup_iv = req->iv; 2344 areq_ctx->assoclen = req->assoclen; 2345 2346 cc_proc_rfc4_gcm(req); 2347 areq_ctx->is_gcm4543 = true; 2348 2349 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); 2350 if (rc != -EINPROGRESS && rc != -EBUSY) 2351 req->iv = areq_ctx->backup_iv; 2352 2353 return rc; 2354 } 2355 2356 /* aead alg */ 2357 static struct cc_alg_template aead_algs[] = { 2358 { 2359 .name = "authenc(hmac(sha1),cbc(aes))", 2360 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree", 2361 .blocksize = AES_BLOCK_SIZE, 2362 .template_aead = { 2363 .setkey = cc_aead_setkey, 2364 .setauthsize = cc_aead_setauthsize, 2365 .encrypt = cc_aead_encrypt, 2366 .decrypt = cc_aead_decrypt, 2367 .init = cc_aead_init, 2368 .exit = cc_aead_exit, 2369 .ivsize = AES_BLOCK_SIZE, 2370 .maxauthsize = SHA1_DIGEST_SIZE, 2371 }, 2372 .cipher_mode = DRV_CIPHER_CBC, 2373 .flow_mode = S_DIN_to_AES, 2374 .auth_mode = DRV_HASH_SHA1, 2375 .min_hw_rev = CC_HW_REV_630, 2376 .std_body = CC_STD_NIST, 2377 }, 2378 { 2379 .name = "authenc(hmac(sha1),cbc(des3_ede))", 2380 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree", 2381 .blocksize = DES3_EDE_BLOCK_SIZE, 2382 .template_aead = { 2383 .setkey = cc_des3_aead_setkey, 2384 .setauthsize = cc_aead_setauthsize, 2385 .encrypt = cc_aead_encrypt, 2386 .decrypt = cc_aead_decrypt, 2387 .init = cc_aead_init, 2388 .exit = cc_aead_exit, 2389 .ivsize = DES3_EDE_BLOCK_SIZE, 2390 .maxauthsize = SHA1_DIGEST_SIZE, 2391 }, 2392 .cipher_mode = DRV_CIPHER_CBC, 2393 .flow_mode = S_DIN_to_DES, 2394 .auth_mode = DRV_HASH_SHA1, 2395 .min_hw_rev = CC_HW_REV_630, 2396 .std_body = CC_STD_NIST, 2397 }, 2398 { 2399 .name = "authenc(hmac(sha256),cbc(aes))", 2400 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree", 2401 .blocksize = AES_BLOCK_SIZE, 2402 .template_aead = { 2403 .setkey = cc_aead_setkey, 2404 .setauthsize = cc_aead_setauthsize, 2405 .encrypt = cc_aead_encrypt, 2406 .decrypt = cc_aead_decrypt, 2407 .init = cc_aead_init, 2408 .exit = cc_aead_exit, 2409 .ivsize = AES_BLOCK_SIZE, 2410 .maxauthsize = SHA256_DIGEST_SIZE, 2411 }, 2412 .cipher_mode = DRV_CIPHER_CBC, 2413 .flow_mode = S_DIN_to_AES, 2414 .auth_mode = DRV_HASH_SHA256, 2415 .min_hw_rev = CC_HW_REV_630, 2416 .std_body = CC_STD_NIST, 2417 }, 2418 { 2419 .name = "authenc(hmac(sha256),cbc(des3_ede))", 2420 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree", 2421 .blocksize = DES3_EDE_BLOCK_SIZE, 2422 .template_aead = { 2423 .setkey = cc_des3_aead_setkey, 2424 .setauthsize = cc_aead_setauthsize, 2425 .encrypt = cc_aead_encrypt, 2426 .decrypt = cc_aead_decrypt, 2427 .init = cc_aead_init, 2428 .exit = cc_aead_exit, 2429 .ivsize = DES3_EDE_BLOCK_SIZE, 2430 .maxauthsize = SHA256_DIGEST_SIZE, 2431 }, 2432 .cipher_mode = DRV_CIPHER_CBC, 2433 .flow_mode = S_DIN_to_DES, 2434 .auth_mode = DRV_HASH_SHA256, 2435 .min_hw_rev = CC_HW_REV_630, 2436 .std_body = CC_STD_NIST, 2437 }, 2438 { 2439 .name = "authenc(xcbc(aes),cbc(aes))", 2440 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree", 2441 .blocksize = AES_BLOCK_SIZE, 2442 .template_aead = { 2443 .setkey = cc_aead_setkey, 2444 .setauthsize = cc_aead_setauthsize, 2445 .encrypt = cc_aead_encrypt, 2446 .decrypt = cc_aead_decrypt, 2447 .init = cc_aead_init, 2448 .exit = cc_aead_exit, 2449 .ivsize = AES_BLOCK_SIZE, 2450 .maxauthsize = AES_BLOCK_SIZE, 2451 }, 2452 .cipher_mode = DRV_CIPHER_CBC, 2453 .flow_mode = S_DIN_to_AES, 2454 .auth_mode = DRV_HASH_XCBC_MAC, 2455 .min_hw_rev = CC_HW_REV_630, 2456 .std_body = CC_STD_NIST, 2457 }, 2458 { 2459 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", 2460 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree", 2461 .blocksize = 1, 2462 .template_aead = { 2463 .setkey = cc_aead_setkey, 2464 .setauthsize = cc_aead_setauthsize, 2465 .encrypt = cc_aead_encrypt, 2466 .decrypt = cc_aead_decrypt, 2467 .init = cc_aead_init, 2468 .exit = cc_aead_exit, 2469 .ivsize = CTR_RFC3686_IV_SIZE, 2470 .maxauthsize = SHA1_DIGEST_SIZE, 2471 }, 2472 .cipher_mode = DRV_CIPHER_CTR, 2473 .flow_mode = S_DIN_to_AES, 2474 .auth_mode = DRV_HASH_SHA1, 2475 .min_hw_rev = CC_HW_REV_630, 2476 .std_body = CC_STD_NIST, 2477 }, 2478 { 2479 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", 2480 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree", 2481 .blocksize = 1, 2482 .template_aead = { 2483 .setkey = cc_aead_setkey, 2484 .setauthsize = cc_aead_setauthsize, 2485 .encrypt = cc_aead_encrypt, 2486 .decrypt = cc_aead_decrypt, 2487 .init = cc_aead_init, 2488 .exit = cc_aead_exit, 2489 .ivsize = CTR_RFC3686_IV_SIZE, 2490 .maxauthsize = SHA256_DIGEST_SIZE, 2491 }, 2492 .cipher_mode = DRV_CIPHER_CTR, 2493 .flow_mode = S_DIN_to_AES, 2494 .auth_mode = DRV_HASH_SHA256, 2495 .min_hw_rev = CC_HW_REV_630, 2496 .std_body = CC_STD_NIST, 2497 }, 2498 { 2499 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))", 2500 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree", 2501 .blocksize = 1, 2502 .template_aead = { 2503 .setkey = cc_aead_setkey, 2504 .setauthsize = cc_aead_setauthsize, 2505 .encrypt = cc_aead_encrypt, 2506 .decrypt = cc_aead_decrypt, 2507 .init = cc_aead_init, 2508 .exit = cc_aead_exit, 2509 .ivsize = CTR_RFC3686_IV_SIZE, 2510 .maxauthsize = AES_BLOCK_SIZE, 2511 }, 2512 .cipher_mode = DRV_CIPHER_CTR, 2513 .flow_mode = S_DIN_to_AES, 2514 .auth_mode = DRV_HASH_XCBC_MAC, 2515 .min_hw_rev = CC_HW_REV_630, 2516 .std_body = CC_STD_NIST, 2517 }, 2518 { 2519 .name = "ccm(aes)", 2520 .driver_name = "ccm-aes-ccree", 2521 .blocksize = 1, 2522 .template_aead = { 2523 .setkey = cc_aead_setkey, 2524 .setauthsize = cc_ccm_setauthsize, 2525 .encrypt = cc_aead_encrypt, 2526 .decrypt = cc_aead_decrypt, 2527 .init = cc_aead_init, 2528 .exit = cc_aead_exit, 2529 .ivsize = AES_BLOCK_SIZE, 2530 .maxauthsize = AES_BLOCK_SIZE, 2531 }, 2532 .cipher_mode = DRV_CIPHER_CCM, 2533 .flow_mode = S_DIN_to_AES, 2534 .auth_mode = DRV_HASH_NULL, 2535 .min_hw_rev = CC_HW_REV_630, 2536 .std_body = CC_STD_NIST, 2537 }, 2538 { 2539 .name = "rfc4309(ccm(aes))", 2540 .driver_name = "rfc4309-ccm-aes-ccree", 2541 .blocksize = 1, 2542 .template_aead = { 2543 .setkey = cc_rfc4309_ccm_setkey, 2544 .setauthsize = cc_rfc4309_ccm_setauthsize, 2545 .encrypt = cc_rfc4309_ccm_encrypt, 2546 .decrypt = cc_rfc4309_ccm_decrypt, 2547 .init = cc_aead_init, 2548 .exit = cc_aead_exit, 2549 .ivsize = CCM_BLOCK_IV_SIZE, 2550 .maxauthsize = AES_BLOCK_SIZE, 2551 }, 2552 .cipher_mode = DRV_CIPHER_CCM, 2553 .flow_mode = S_DIN_to_AES, 2554 .auth_mode = DRV_HASH_NULL, 2555 .min_hw_rev = CC_HW_REV_630, 2556 .std_body = CC_STD_NIST, 2557 }, 2558 { 2559 .name = "gcm(aes)", 2560 .driver_name = "gcm-aes-ccree", 2561 .blocksize = 1, 2562 .template_aead = { 2563 .setkey = cc_aead_setkey, 2564 .setauthsize = cc_gcm_setauthsize, 2565 .encrypt = cc_aead_encrypt, 2566 .decrypt = cc_aead_decrypt, 2567 .init = cc_aead_init, 2568 .exit = cc_aead_exit, 2569 .ivsize = 12, 2570 .maxauthsize = AES_BLOCK_SIZE, 2571 }, 2572 .cipher_mode = DRV_CIPHER_GCTR, 2573 .flow_mode = S_DIN_to_AES, 2574 .auth_mode = DRV_HASH_NULL, 2575 .min_hw_rev = CC_HW_REV_630, 2576 .std_body = CC_STD_NIST, 2577 }, 2578 { 2579 .name = "rfc4106(gcm(aes))", 2580 .driver_name = "rfc4106-gcm-aes-ccree", 2581 .blocksize = 1, 2582 .template_aead = { 2583 .setkey = cc_rfc4106_gcm_setkey, 2584 .setauthsize = cc_rfc4106_gcm_setauthsize, 2585 .encrypt = cc_rfc4106_gcm_encrypt, 2586 .decrypt = cc_rfc4106_gcm_decrypt, 2587 .init = cc_aead_init, 2588 .exit = cc_aead_exit, 2589 .ivsize = GCM_BLOCK_RFC4_IV_SIZE, 2590 .maxauthsize = AES_BLOCK_SIZE, 2591 }, 2592 .cipher_mode = DRV_CIPHER_GCTR, 2593 .flow_mode = S_DIN_to_AES, 2594 .auth_mode = DRV_HASH_NULL, 2595 .min_hw_rev = CC_HW_REV_630, 2596 .std_body = CC_STD_NIST, 2597 }, 2598 { 2599 .name = "rfc4543(gcm(aes))", 2600 .driver_name = "rfc4543-gcm-aes-ccree", 2601 .blocksize = 1, 2602 .template_aead = { 2603 .setkey = cc_rfc4543_gcm_setkey, 2604 .setauthsize = cc_rfc4543_gcm_setauthsize, 2605 .encrypt = cc_rfc4543_gcm_encrypt, 2606 .decrypt = cc_rfc4543_gcm_decrypt, 2607 .init = cc_aead_init, 2608 .exit = cc_aead_exit, 2609 .ivsize = GCM_BLOCK_RFC4_IV_SIZE, 2610 .maxauthsize = AES_BLOCK_SIZE, 2611 }, 2612 .cipher_mode = DRV_CIPHER_GCTR, 2613 .flow_mode = S_DIN_to_AES, 2614 .auth_mode = DRV_HASH_NULL, 2615 .min_hw_rev = CC_HW_REV_630, 2616 .std_body = CC_STD_NIST, 2617 }, 2618 }; 2619 2620 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl, 2621 struct device *dev) 2622 { 2623 struct cc_crypto_alg *t_alg; 2624 struct aead_alg *alg; 2625 2626 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 2627 if (!t_alg) 2628 return ERR_PTR(-ENOMEM); 2629 2630 alg = &tmpl->template_aead; 2631 2632 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 2633 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 2634 tmpl->driver_name); 2635 alg->base.cra_module = THIS_MODULE; 2636 alg->base.cra_priority = CC_CRA_PRIO; 2637 2638 alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx); 2639 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2640 alg->init = cc_aead_init; 2641 alg->exit = cc_aead_exit; 2642 2643 t_alg->aead_alg = *alg; 2644 2645 t_alg->cipher_mode = tmpl->cipher_mode; 2646 t_alg->flow_mode = tmpl->flow_mode; 2647 t_alg->auth_mode = tmpl->auth_mode; 2648 2649 return t_alg; 2650 } 2651 2652 int cc_aead_free(struct cc_drvdata *drvdata) 2653 { 2654 struct cc_crypto_alg *t_alg, *n; 2655 struct cc_aead_handle *aead_handle = 2656 (struct cc_aead_handle *)drvdata->aead_handle; 2657 2658 if (aead_handle) { 2659 /* Remove registered algs */ 2660 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, 2661 entry) { 2662 crypto_unregister_aead(&t_alg->aead_alg); 2663 list_del(&t_alg->entry); 2664 kfree(t_alg); 2665 } 2666 kfree(aead_handle); 2667 drvdata->aead_handle = NULL; 2668 } 2669 2670 return 0; 2671 } 2672 2673 int cc_aead_alloc(struct cc_drvdata *drvdata) 2674 { 2675 struct cc_aead_handle *aead_handle; 2676 struct cc_crypto_alg *t_alg; 2677 int rc = -ENOMEM; 2678 int alg; 2679 struct device *dev = drvdata_to_dev(drvdata); 2680 2681 aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL); 2682 if (!aead_handle) { 2683 rc = -ENOMEM; 2684 goto fail0; 2685 } 2686 2687 INIT_LIST_HEAD(&aead_handle->aead_list); 2688 drvdata->aead_handle = aead_handle; 2689 2690 aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata, 2691 MAX_HMAC_DIGEST_SIZE); 2692 2693 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) { 2694 dev_err(dev, "SRAM pool exhausted\n"); 2695 rc = -ENOMEM; 2696 goto fail1; 2697 } 2698 2699 /* Linux crypto */ 2700 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) { 2701 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) || 2702 !(drvdata->std_bodies & aead_algs[alg].std_body)) 2703 continue; 2704 2705 t_alg = cc_create_aead_alg(&aead_algs[alg], dev); 2706 if (IS_ERR(t_alg)) { 2707 rc = PTR_ERR(t_alg); 2708 dev_err(dev, "%s alg allocation failed\n", 2709 aead_algs[alg].driver_name); 2710 goto fail1; 2711 } 2712 t_alg->drvdata = drvdata; 2713 rc = crypto_register_aead(&t_alg->aead_alg); 2714 if (rc) { 2715 dev_err(dev, "%s alg registration failed\n", 2716 t_alg->aead_alg.base.cra_driver_name); 2717 goto fail2; 2718 } else { 2719 list_add_tail(&t_alg->entry, &aead_handle->aead_list); 2720 dev_dbg(dev, "Registered %s\n", 2721 t_alg->aead_alg.base.cra_driver_name); 2722 } 2723 } 2724 2725 return 0; 2726 2727 fail2: 2728 kfree(t_alg); 2729 fail1: 2730 cc_aead_free(drvdata); 2731 fail0: 2732 return rc; 2733 } 2734