1 /* 2 * caam - Freescale FSL CAAM support for ahash functions of crypto API 3 * 4 * Copyright 2011 Freescale Semiconductor, Inc. 5 * 6 * Based on caamalg.c crypto API driver. 7 * 8 * relationship of digest job descriptor or first job descriptor after init to 9 * shared descriptors: 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (hashKey) | 14 * --------------- | (operation) | 15 * --------------- 16 * 17 * relationship of subsequent job descriptors to shared descriptors: 18 * 19 * --------------- --------------- 20 * | JobDesc #2 |-------------------->| ShareDesc | 21 * | *(packet 2) | |------------->| (hashKey) | 22 * --------------- | |-------->| (operation) | 23 * . | | | (load ctx2) | 24 * . | | --------------- 25 * --------------- | | 26 * | JobDesc #3 |------| | 27 * | *(packet 3) | | 28 * --------------- | 29 * . | 30 * . | 31 * --------------- | 32 * | JobDesc #4 |------------ 33 * | *(packet 4) | 34 * --------------- 35 * 36 * The SharedDesc never changes for a connection unless rekeyed, but 37 * each packet will likely be in a different place. So all we need 38 * to know to process the packet is where the input is, where the 39 * output goes, and what context we want to process with. Context is 40 * in the SharedDesc, packet references in the JobDesc. 41 * 42 * So, a job desc looks like: 43 * 44 * --------------------- 45 * | Header | 46 * | ShareDesc Pointer | 47 * | SEQ_OUT_PTR | 48 * | (output buffer) | 49 * | (output length) | 50 * | SEQ_IN_PTR | 51 * | (input buffer) | 52 * | (input length) | 53 * --------------------- 54 */ 55 56 #include "compat.h" 57 58 #include "regs.h" 59 #include "intern.h" 60 #include "desc_constr.h" 61 #include "jr.h" 62 #include "error.h" 63 #include "sg_sw_sec4.h" 64 #include "key_gen.h" 65 66 #define CAAM_CRA_PRIORITY 3000 67 68 /* max hash key is max split key size */ 69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 70 71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 73 74 /* length of descriptors text */ 75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) 76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) 77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 81 82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 83 CAAM_MAX_HASH_KEY_SIZE) 84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 85 86 /* caam context sizes for hashes: running digest + 8 */ 87 #define HASH_MSG_LEN 8 88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 89 90 #ifdef DEBUG 91 /* for print_hex_dumps with line references */ 92 #define debug(format, arg...) printk(format, arg) 93 #else 94 #define debug(format, arg...) 95 #endif 96 97 /* ahash per-session context */ 98 struct caam_hash_ctx { 99 struct device *jrdev; 100 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN]; 101 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN]; 102 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN]; 103 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN]; 104 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN]; 105 dma_addr_t sh_desc_update_dma; 106 dma_addr_t sh_desc_update_first_dma; 107 dma_addr_t sh_desc_fin_dma; 108 dma_addr_t sh_desc_digest_dma; 109 dma_addr_t sh_desc_finup_dma; 110 u32 alg_type; 111 u32 alg_op; 112 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 113 dma_addr_t key_dma; 114 int ctx_len; 115 unsigned int split_key_len; 116 unsigned int split_key_pad_len; 117 }; 118 119 /* ahash state */ 120 struct caam_hash_state { 121 dma_addr_t buf_dma; 122 dma_addr_t ctx_dma; 123 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 124 int buflen_0; 125 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 126 int buflen_1; 127 u8 caam_ctx[MAX_CTX_LEN]; 128 int (*update)(struct ahash_request *req); 129 int (*final)(struct ahash_request *req); 130 int (*finup)(struct ahash_request *req); 131 int current_buf; 132 }; 133 134 /* Common job descriptor seq in/out ptr routines */ 135 136 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 137 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 138 struct caam_hash_state *state, 139 int ctx_len) 140 { 141 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 142 ctx_len, DMA_FROM_DEVICE); 143 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 144 } 145 146 /* Map req->result, and append seq_out_ptr command that points to it */ 147 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, 148 u8 *result, int digestsize) 149 { 150 dma_addr_t dst_dma; 151 152 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); 153 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 154 155 return dst_dma; 156 } 157 158 /* Map current buffer in state and put it in link table */ 159 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, 160 struct sec4_sg_entry *sec4_sg, 161 u8 *buf, int buflen) 162 { 163 dma_addr_t buf_dma; 164 165 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 166 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); 167 168 return buf_dma; 169 } 170 171 /* Map req->src and put it in link table */ 172 static inline void src_map_to_sec4_sg(struct device *jrdev, 173 struct scatterlist *src, int src_nents, 174 struct sec4_sg_entry *sec4_sg, 175 bool chained) 176 { 177 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); 178 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); 179 } 180 181 /* 182 * Only put buffer in link table if it contains data, which is possible, 183 * since a buffer has previously been used, and needs to be unmapped, 184 */ 185 static inline dma_addr_t 186 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, 187 u8 *buf, dma_addr_t buf_dma, int buflen, 188 int last_buflen) 189 { 190 if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) 191 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); 192 if (buflen) 193 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); 194 else 195 buf_dma = 0; 196 197 return buf_dma; 198 } 199 200 /* Map state->caam_ctx, and add it to link table */ 201 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, 202 struct caam_hash_state *state, 203 int ctx_len, 204 struct sec4_sg_entry *sec4_sg, 205 u32 flag) 206 { 207 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 208 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 209 } 210 211 /* Common shared descriptor commands */ 212 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) 213 { 214 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 215 ctx->split_key_len, CLASS_2 | 216 KEY_DEST_MDHA_SPLIT | KEY_ENC); 217 } 218 219 /* Append key if it has been set */ 220 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) 221 { 222 u32 *key_jump_cmd; 223 224 init_sh_desc(desc, HDR_SHARE_SERIAL); 225 226 if (ctx->split_key_len) { 227 /* Skip if already shared */ 228 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 229 JUMP_COND_SHRD); 230 231 append_key_ahash(desc, ctx); 232 233 set_jump_tgt_here(desc, key_jump_cmd); 234 } 235 236 /* Propagate errors from shared to job descriptor */ 237 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 238 } 239 240 /* 241 * For ahash read data from seqin following state->caam_ctx, 242 * and write resulting class2 context to seqout, which may be state->caam_ctx 243 * or req->result 244 */ 245 static inline void ahash_append_load_str(u32 *desc, int digestsize) 246 { 247 /* Calculate remaining bytes to read */ 248 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 249 250 /* Read remaining bytes */ 251 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | 252 FIFOLD_TYPE_MSG | KEY_VLF); 253 254 /* Store class2 context bytes */ 255 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 256 LDST_SRCDST_BYTE_CONTEXT); 257 } 258 259 /* 260 * For ahash update, final and finup, import context, read and write to seqout 261 */ 262 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, 263 int digestsize, 264 struct caam_hash_ctx *ctx) 265 { 266 init_sh_desc_key_ahash(desc, ctx); 267 268 /* Import context from software */ 269 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 270 LDST_CLASS_2_CCB | ctx->ctx_len); 271 272 /* Class 2 operation */ 273 append_operation(desc, op | state | OP_ALG_ENCRYPT); 274 275 /* 276 * Load from buf and/or src and write to req->result or state->context 277 */ 278 ahash_append_load_str(desc, digestsize); 279 } 280 281 /* For ahash firsts and digest, read and write to seqout */ 282 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, 283 int digestsize, struct caam_hash_ctx *ctx) 284 { 285 init_sh_desc_key_ahash(desc, ctx); 286 287 /* Class 2 operation */ 288 append_operation(desc, op | state | OP_ALG_ENCRYPT); 289 290 /* 291 * Load from buf and/or src and write to req->result or state->context 292 */ 293 ahash_append_load_str(desc, digestsize); 294 } 295 296 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 297 { 298 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 299 int digestsize = crypto_ahash_digestsize(ahash); 300 struct device *jrdev = ctx->jrdev; 301 u32 have_key = 0; 302 u32 *desc; 303 304 if (ctx->split_key_len) 305 have_key = OP_ALG_AAI_HMAC_PRECOMP; 306 307 /* ahash_update shared descriptor */ 308 desc = ctx->sh_desc_update; 309 310 init_sh_desc(desc, HDR_SHARE_SERIAL); 311 312 /* Import context from software */ 313 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 314 LDST_CLASS_2_CCB | ctx->ctx_len); 315 316 /* Class 2 operation */ 317 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | 318 OP_ALG_ENCRYPT); 319 320 /* Load data and write to result or context */ 321 ahash_append_load_str(desc, ctx->ctx_len); 322 323 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 324 DMA_TO_DEVICE); 325 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { 326 dev_err(jrdev, "unable to map shared descriptor\n"); 327 return -ENOMEM; 328 } 329 #ifdef DEBUG 330 print_hex_dump(KERN_ERR, 331 "ahash update shdesc@"__stringify(__LINE__)": ", 332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 333 #endif 334 335 /* ahash_update_first shared descriptor */ 336 desc = ctx->sh_desc_update_first; 337 338 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, 339 ctx->ctx_len, ctx); 340 341 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, 342 desc_bytes(desc), 343 DMA_TO_DEVICE); 344 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { 345 dev_err(jrdev, "unable to map shared descriptor\n"); 346 return -ENOMEM; 347 } 348 #ifdef DEBUG 349 print_hex_dump(KERN_ERR, 350 "ahash update first shdesc@"__stringify(__LINE__)": ", 351 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 352 #endif 353 354 /* ahash_final shared descriptor */ 355 desc = ctx->sh_desc_fin; 356 357 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, 358 OP_ALG_AS_FINALIZE, digestsize, ctx); 359 360 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 361 DMA_TO_DEVICE); 362 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { 363 dev_err(jrdev, "unable to map shared descriptor\n"); 364 return -ENOMEM; 365 } 366 #ifdef DEBUG 367 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 368 DUMP_PREFIX_ADDRESS, 16, 4, desc, 369 desc_bytes(desc), 1); 370 #endif 371 372 /* ahash_finup shared descriptor */ 373 desc = ctx->sh_desc_finup; 374 375 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, 376 OP_ALG_AS_FINALIZE, digestsize, ctx); 377 378 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 379 DMA_TO_DEVICE); 380 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { 381 dev_err(jrdev, "unable to map shared descriptor\n"); 382 return -ENOMEM; 383 } 384 #ifdef DEBUG 385 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", 386 DUMP_PREFIX_ADDRESS, 16, 4, desc, 387 desc_bytes(desc), 1); 388 #endif 389 390 /* ahash_digest shared descriptor */ 391 desc = ctx->sh_desc_digest; 392 393 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, 394 digestsize, ctx); 395 396 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, 397 desc_bytes(desc), 398 DMA_TO_DEVICE); 399 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { 400 dev_err(jrdev, "unable to map shared descriptor\n"); 401 return -ENOMEM; 402 } 403 #ifdef DEBUG 404 print_hex_dump(KERN_ERR, 405 "ahash digest shdesc@"__stringify(__LINE__)": ", 406 DUMP_PREFIX_ADDRESS, 16, 4, desc, 407 desc_bytes(desc), 1); 408 #endif 409 410 return 0; 411 } 412 413 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, 414 u32 keylen) 415 { 416 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, 417 ctx->split_key_pad_len, key_in, keylen, 418 ctx->alg_op); 419 } 420 421 /* Digest hash size if it is too large */ 422 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 423 u32 *keylen, u8 *key_out, u32 digestsize) 424 { 425 struct device *jrdev = ctx->jrdev; 426 u32 *desc; 427 struct split_key_result result; 428 dma_addr_t src_dma, dst_dma; 429 int ret = 0; 430 431 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 432 if (!desc) { 433 dev_err(jrdev, "unable to allocate key input memory\n"); 434 return -ENOMEM; 435 } 436 437 init_job_desc(desc, 0); 438 439 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 440 DMA_TO_DEVICE); 441 if (dma_mapping_error(jrdev, src_dma)) { 442 dev_err(jrdev, "unable to map key input memory\n"); 443 kfree(desc); 444 return -ENOMEM; 445 } 446 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 447 DMA_FROM_DEVICE); 448 if (dma_mapping_error(jrdev, dst_dma)) { 449 dev_err(jrdev, "unable to map key output memory\n"); 450 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 451 kfree(desc); 452 return -ENOMEM; 453 } 454 455 /* Job descriptor to perform unkeyed hash on key_in */ 456 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | 457 OP_ALG_AS_INITFINAL); 458 append_seq_in_ptr(desc, src_dma, *keylen, 0); 459 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 460 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 461 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 462 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 463 LDST_SRCDST_BYTE_CONTEXT); 464 465 #ifdef DEBUG 466 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 467 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 468 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 469 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 470 #endif 471 472 result.err = 0; 473 init_completion(&result.completion); 474 475 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 476 if (!ret) { 477 /* in progress */ 478 wait_for_completion_interruptible(&result.completion); 479 ret = result.err; 480 #ifdef DEBUG 481 print_hex_dump(KERN_ERR, 482 "digested key@"__stringify(__LINE__)": ", 483 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 484 digestsize, 1); 485 #endif 486 } 487 *keylen = digestsize; 488 489 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 490 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 491 492 kfree(desc); 493 494 return ret; 495 } 496 497 static int ahash_setkey(struct crypto_ahash *ahash, 498 const u8 *key, unsigned int keylen) 499 { 500 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 501 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 502 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 503 struct device *jrdev = ctx->jrdev; 504 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 505 int digestsize = crypto_ahash_digestsize(ahash); 506 int ret = 0; 507 u8 *hashed_key = NULL; 508 509 #ifdef DEBUG 510 printk(KERN_ERR "keylen %d\n", keylen); 511 #endif 512 513 if (keylen > blocksize) { 514 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL | 515 GFP_DMA); 516 if (!hashed_key) 517 return -ENOMEM; 518 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 519 digestsize); 520 if (ret) 521 goto badkey; 522 key = hashed_key; 523 } 524 525 /* Pick class 2 key length from algorithm submask */ 526 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 527 OP_ALG_ALGSEL_SHIFT] * 2; 528 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); 529 530 #ifdef DEBUG 531 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 532 ctx->split_key_len, ctx->split_key_pad_len); 533 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 534 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 535 #endif 536 537 ret = gen_split_hash_key(ctx, key, keylen); 538 if (ret) 539 goto badkey; 540 541 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, 542 DMA_TO_DEVICE); 543 if (dma_mapping_error(jrdev, ctx->key_dma)) { 544 dev_err(jrdev, "unable to map key i/o memory\n"); 545 return -ENOMEM; 546 } 547 #ifdef DEBUG 548 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 549 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 550 ctx->split_key_pad_len, 1); 551 #endif 552 553 ret = ahash_set_sh_desc(ahash); 554 if (ret) { 555 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, 556 DMA_TO_DEVICE); 557 } 558 559 kfree(hashed_key); 560 return ret; 561 badkey: 562 kfree(hashed_key); 563 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 564 return -EINVAL; 565 } 566 567 /* 568 * ahash_edesc - s/w-extended ahash descriptor 569 * @dst_dma: physical mapped address of req->result 570 * @sec4_sg_dma: physical mapped address of h/w link table 571 * @chained: if source is chained 572 * @src_nents: number of segments in input scatterlist 573 * @sec4_sg_bytes: length of dma mapped sec4_sg space 574 * @sec4_sg: pointer to h/w link table 575 * @hw_desc: the h/w job descriptor followed by any referenced link tables 576 */ 577 struct ahash_edesc { 578 dma_addr_t dst_dma; 579 dma_addr_t sec4_sg_dma; 580 bool chained; 581 int src_nents; 582 int sec4_sg_bytes; 583 struct sec4_sg_entry *sec4_sg; 584 u32 hw_desc[0]; 585 }; 586 587 static inline void ahash_unmap(struct device *dev, 588 struct ahash_edesc *edesc, 589 struct ahash_request *req, int dst_len) 590 { 591 if (edesc->src_nents) 592 dma_unmap_sg_chained(dev, req->src, edesc->src_nents, 593 DMA_TO_DEVICE, edesc->chained); 594 if (edesc->dst_dma) 595 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 596 597 if (edesc->sec4_sg_bytes) 598 dma_unmap_single(dev, edesc->sec4_sg_dma, 599 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 600 } 601 602 static inline void ahash_unmap_ctx(struct device *dev, 603 struct ahash_edesc *edesc, 604 struct ahash_request *req, int dst_len, u32 flag) 605 { 606 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 607 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 608 struct caam_hash_state *state = ahash_request_ctx(req); 609 610 if (state->ctx_dma) 611 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 612 ahash_unmap(dev, edesc, req, dst_len); 613 } 614 615 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 616 void *context) 617 { 618 struct ahash_request *req = context; 619 struct ahash_edesc *edesc; 620 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 621 int digestsize = crypto_ahash_digestsize(ahash); 622 #ifdef DEBUG 623 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 624 struct caam_hash_state *state = ahash_request_ctx(req); 625 626 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 627 #endif 628 629 edesc = (struct ahash_edesc *)((char *)desc - 630 offsetof(struct ahash_edesc, hw_desc)); 631 if (err) { 632 char tmp[CAAM_ERROR_STR_MAX]; 633 634 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 635 } 636 637 ahash_unmap(jrdev, edesc, req, digestsize); 638 kfree(edesc); 639 640 #ifdef DEBUG 641 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 642 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 643 ctx->ctx_len, 1); 644 if (req->result) 645 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 646 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 647 digestsize, 1); 648 #endif 649 650 req->base.complete(&req->base, err); 651 } 652 653 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 654 void *context) 655 { 656 struct ahash_request *req = context; 657 struct ahash_edesc *edesc; 658 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 659 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 660 #ifdef DEBUG 661 struct caam_hash_state *state = ahash_request_ctx(req); 662 int digestsize = crypto_ahash_digestsize(ahash); 663 664 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 665 #endif 666 667 edesc = (struct ahash_edesc *)((char *)desc - 668 offsetof(struct ahash_edesc, hw_desc)); 669 if (err) { 670 char tmp[CAAM_ERROR_STR_MAX]; 671 672 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 673 } 674 675 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 676 kfree(edesc); 677 678 #ifdef DEBUG 679 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 680 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 681 ctx->ctx_len, 1); 682 if (req->result) 683 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 684 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 685 digestsize, 1); 686 #endif 687 688 req->base.complete(&req->base, err); 689 } 690 691 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 692 void *context) 693 { 694 struct ahash_request *req = context; 695 struct ahash_edesc *edesc; 696 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 697 int digestsize = crypto_ahash_digestsize(ahash); 698 #ifdef DEBUG 699 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 700 struct caam_hash_state *state = ahash_request_ctx(req); 701 702 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 703 #endif 704 705 edesc = (struct ahash_edesc *)((char *)desc - 706 offsetof(struct ahash_edesc, hw_desc)); 707 if (err) { 708 char tmp[CAAM_ERROR_STR_MAX]; 709 710 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 711 } 712 713 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 714 kfree(edesc); 715 716 #ifdef DEBUG 717 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 718 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 719 ctx->ctx_len, 1); 720 if (req->result) 721 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 722 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 723 digestsize, 1); 724 #endif 725 726 req->base.complete(&req->base, err); 727 } 728 729 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 730 void *context) 731 { 732 struct ahash_request *req = context; 733 struct ahash_edesc *edesc; 734 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 735 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 736 #ifdef DEBUG 737 struct caam_hash_state *state = ahash_request_ctx(req); 738 int digestsize = crypto_ahash_digestsize(ahash); 739 740 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 741 #endif 742 743 edesc = (struct ahash_edesc *)((char *)desc - 744 offsetof(struct ahash_edesc, hw_desc)); 745 if (err) { 746 char tmp[CAAM_ERROR_STR_MAX]; 747 748 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 749 } 750 751 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 752 kfree(edesc); 753 754 #ifdef DEBUG 755 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 756 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 757 ctx->ctx_len, 1); 758 if (req->result) 759 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 760 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 761 digestsize, 1); 762 #endif 763 764 req->base.complete(&req->base, err); 765 } 766 767 /* submit update job descriptor */ 768 static int ahash_update_ctx(struct ahash_request *req) 769 { 770 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 771 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 772 struct caam_hash_state *state = ahash_request_ctx(req); 773 struct device *jrdev = ctx->jrdev; 774 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 775 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 776 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 777 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 778 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 779 int *next_buflen = state->current_buf ? &state->buflen_0 : 780 &state->buflen_1, last_buflen; 781 int in_len = *buflen + req->nbytes, to_hash; 782 u32 *sh_desc = ctx->sh_desc_update, *desc; 783 dma_addr_t ptr = ctx->sh_desc_update_dma; 784 int src_nents, sec4_sg_bytes, sec4_sg_src_index; 785 struct ahash_edesc *edesc; 786 bool chained = false; 787 int ret = 0; 788 int sh_len; 789 790 last_buflen = *next_buflen; 791 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 792 to_hash = in_len - *next_buflen; 793 794 if (to_hash) { 795 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 796 &chained); 797 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 798 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 799 sizeof(struct sec4_sg_entry); 800 801 /* 802 * allocate space for base edesc and hw desc commands, 803 * link tables 804 */ 805 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 806 sec4_sg_bytes, GFP_DMA | flags); 807 if (!edesc) { 808 dev_err(jrdev, 809 "could not allocate extended descriptor\n"); 810 return -ENOMEM; 811 } 812 813 edesc->src_nents = src_nents; 814 edesc->chained = chained; 815 edesc->sec4_sg_bytes = sec4_sg_bytes; 816 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 817 DESC_JOB_IO_LEN; 818 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 819 sec4_sg_bytes, 820 DMA_TO_DEVICE); 821 822 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 823 edesc->sec4_sg, DMA_BIDIRECTIONAL); 824 825 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, 826 edesc->sec4_sg + 1, 827 buf, state->buf_dma, 828 *buflen, last_buflen); 829 830 if (src_nents) { 831 src_map_to_sec4_sg(jrdev, req->src, src_nents, 832 edesc->sec4_sg + sec4_sg_src_index, 833 chained); 834 if (*next_buflen) { 835 sg_copy_part(next_buf, req->src, to_hash - 836 *buflen, req->nbytes); 837 state->current_buf = !state->current_buf; 838 } 839 } else { 840 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 841 SEC4_SG_LEN_FIN; 842 } 843 844 sh_len = desc_len(sh_desc); 845 desc = edesc->hw_desc; 846 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | 847 HDR_REVERSE); 848 849 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 850 to_hash, LDST_SGF); 851 852 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 853 854 #ifdef DEBUG 855 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 856 DUMP_PREFIX_ADDRESS, 16, 4, desc, 857 desc_bytes(desc), 1); 858 #endif 859 860 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 861 if (!ret) { 862 ret = -EINPROGRESS; 863 } else { 864 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, 865 DMA_BIDIRECTIONAL); 866 kfree(edesc); 867 } 868 } else if (*next_buflen) { 869 sg_copy(buf + *buflen, req->src, req->nbytes); 870 *buflen = *next_buflen; 871 *next_buflen = last_buflen; 872 } 873 #ifdef DEBUG 874 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 875 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 876 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 877 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 878 *next_buflen, 1); 879 #endif 880 881 return ret; 882 } 883 884 static int ahash_final_ctx(struct ahash_request *req) 885 { 886 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 887 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 888 struct caam_hash_state *state = ahash_request_ctx(req); 889 struct device *jrdev = ctx->jrdev; 890 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 891 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 892 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 893 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 894 int last_buflen = state->current_buf ? state->buflen_0 : 895 state->buflen_1; 896 u32 *sh_desc = ctx->sh_desc_fin, *desc; 897 dma_addr_t ptr = ctx->sh_desc_fin_dma; 898 int sec4_sg_bytes; 899 int digestsize = crypto_ahash_digestsize(ahash); 900 struct ahash_edesc *edesc; 901 int ret = 0; 902 int sh_len; 903 904 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); 905 906 /* allocate space for base edesc and hw desc commands, link tables */ 907 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 908 sec4_sg_bytes, GFP_DMA | flags); 909 if (!edesc) { 910 dev_err(jrdev, "could not allocate extended descriptor\n"); 911 return -ENOMEM; 912 } 913 914 sh_len = desc_len(sh_desc); 915 desc = edesc->hw_desc; 916 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 917 918 edesc->sec4_sg_bytes = sec4_sg_bytes; 919 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 920 DESC_JOB_IO_LEN; 921 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 922 sec4_sg_bytes, DMA_TO_DEVICE); 923 edesc->src_nents = 0; 924 925 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, 926 DMA_TO_DEVICE); 927 928 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 929 buf, state->buf_dma, buflen, 930 last_buflen); 931 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; 932 933 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 934 LDST_SGF); 935 936 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 937 digestsize); 938 939 #ifdef DEBUG 940 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 941 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 942 #endif 943 944 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 945 if (!ret) { 946 ret = -EINPROGRESS; 947 } else { 948 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 949 kfree(edesc); 950 } 951 952 return ret; 953 } 954 955 static int ahash_finup_ctx(struct ahash_request *req) 956 { 957 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 958 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 959 struct caam_hash_state *state = ahash_request_ctx(req); 960 struct device *jrdev = ctx->jrdev; 961 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 962 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 963 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 964 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 965 int last_buflen = state->current_buf ? state->buflen_0 : 966 state->buflen_1; 967 u32 *sh_desc = ctx->sh_desc_finup, *desc; 968 dma_addr_t ptr = ctx->sh_desc_finup_dma; 969 int sec4_sg_bytes, sec4_sg_src_index; 970 int src_nents; 971 int digestsize = crypto_ahash_digestsize(ahash); 972 struct ahash_edesc *edesc; 973 bool chained = false; 974 int ret = 0; 975 int sh_len; 976 977 src_nents = __sg_count(req->src, req->nbytes, &chained); 978 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 979 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 980 sizeof(struct sec4_sg_entry); 981 982 /* allocate space for base edesc and hw desc commands, link tables */ 983 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 984 sec4_sg_bytes, GFP_DMA | flags); 985 if (!edesc) { 986 dev_err(jrdev, "could not allocate extended descriptor\n"); 987 return -ENOMEM; 988 } 989 990 sh_len = desc_len(sh_desc); 991 desc = edesc->hw_desc; 992 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 993 994 edesc->src_nents = src_nents; 995 edesc->chained = chained; 996 edesc->sec4_sg_bytes = sec4_sg_bytes; 997 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 998 DESC_JOB_IO_LEN; 999 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1000 sec4_sg_bytes, DMA_TO_DEVICE); 1001 1002 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, 1003 DMA_TO_DEVICE); 1004 1005 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 1006 buf, state->buf_dma, buflen, 1007 last_buflen); 1008 1009 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1010 sec4_sg_src_index, chained); 1011 1012 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 1013 buflen + req->nbytes, LDST_SGF); 1014 1015 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1016 digestsize); 1017 1018 #ifdef DEBUG 1019 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1020 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1021 #endif 1022 1023 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1024 if (!ret) { 1025 ret = -EINPROGRESS; 1026 } else { 1027 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1028 kfree(edesc); 1029 } 1030 1031 return ret; 1032 } 1033 1034 static int ahash_digest(struct ahash_request *req) 1035 { 1036 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1037 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1038 struct device *jrdev = ctx->jrdev; 1039 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1040 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1041 u32 *sh_desc = ctx->sh_desc_digest, *desc; 1042 dma_addr_t ptr = ctx->sh_desc_digest_dma; 1043 int digestsize = crypto_ahash_digestsize(ahash); 1044 int src_nents, sec4_sg_bytes; 1045 dma_addr_t src_dma; 1046 struct ahash_edesc *edesc; 1047 bool chained = false; 1048 int ret = 0; 1049 u32 options; 1050 int sh_len; 1051 1052 src_nents = sg_count(req->src, req->nbytes, &chained); 1053 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, 1054 chained); 1055 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1056 1057 /* allocate space for base edesc and hw desc commands, link tables */ 1058 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + 1059 DESC_JOB_IO_LEN, GFP_DMA | flags); 1060 if (!edesc) { 1061 dev_err(jrdev, "could not allocate extended descriptor\n"); 1062 return -ENOMEM; 1063 } 1064 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1065 DESC_JOB_IO_LEN; 1066 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1067 sec4_sg_bytes, DMA_TO_DEVICE); 1068 edesc->src_nents = src_nents; 1069 edesc->chained = chained; 1070 1071 sh_len = desc_len(sh_desc); 1072 desc = edesc->hw_desc; 1073 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1074 1075 if (src_nents) { 1076 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); 1077 src_dma = edesc->sec4_sg_dma; 1078 options = LDST_SGF; 1079 } else { 1080 src_dma = sg_dma_address(req->src); 1081 options = 0; 1082 } 1083 append_seq_in_ptr(desc, src_dma, req->nbytes, options); 1084 1085 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1086 digestsize); 1087 1088 #ifdef DEBUG 1089 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1090 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1091 #endif 1092 1093 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1094 if (!ret) { 1095 ret = -EINPROGRESS; 1096 } else { 1097 ahash_unmap(jrdev, edesc, req, digestsize); 1098 kfree(edesc); 1099 } 1100 1101 return ret; 1102 } 1103 1104 /* submit ahash final if it the first job descriptor */ 1105 static int ahash_final_no_ctx(struct ahash_request *req) 1106 { 1107 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1108 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1109 struct caam_hash_state *state = ahash_request_ctx(req); 1110 struct device *jrdev = ctx->jrdev; 1111 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1112 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1113 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1114 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1115 u32 *sh_desc = ctx->sh_desc_digest, *desc; 1116 dma_addr_t ptr = ctx->sh_desc_digest_dma; 1117 int digestsize = crypto_ahash_digestsize(ahash); 1118 struct ahash_edesc *edesc; 1119 int ret = 0; 1120 int sh_len; 1121 1122 /* allocate space for base edesc and hw desc commands, link tables */ 1123 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, 1124 GFP_DMA | flags); 1125 if (!edesc) { 1126 dev_err(jrdev, "could not allocate extended descriptor\n"); 1127 return -ENOMEM; 1128 } 1129 1130 sh_len = desc_len(sh_desc); 1131 desc = edesc->hw_desc; 1132 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1133 1134 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1135 1136 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1137 1138 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1139 digestsize); 1140 edesc->src_nents = 0; 1141 1142 #ifdef DEBUG 1143 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1144 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1145 #endif 1146 1147 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1148 if (!ret) { 1149 ret = -EINPROGRESS; 1150 } else { 1151 ahash_unmap(jrdev, edesc, req, digestsize); 1152 kfree(edesc); 1153 } 1154 1155 return ret; 1156 } 1157 1158 /* submit ahash update if it the first job descriptor after update */ 1159 static int ahash_update_no_ctx(struct ahash_request *req) 1160 { 1161 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1162 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1163 struct caam_hash_state *state = ahash_request_ctx(req); 1164 struct device *jrdev = ctx->jrdev; 1165 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1166 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1167 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1168 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 1169 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 1170 int *next_buflen = state->current_buf ? &state->buflen_0 : 1171 &state->buflen_1; 1172 int in_len = *buflen + req->nbytes, to_hash; 1173 int sec4_sg_bytes, src_nents; 1174 struct ahash_edesc *edesc; 1175 u32 *desc, *sh_desc = ctx->sh_desc_update_first; 1176 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1177 bool chained = false; 1178 int ret = 0; 1179 int sh_len; 1180 1181 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 1182 to_hash = in_len - *next_buflen; 1183 1184 if (to_hash) { 1185 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 1186 &chained); 1187 sec4_sg_bytes = (1 + src_nents) * 1188 sizeof(struct sec4_sg_entry); 1189 1190 /* 1191 * allocate space for base edesc and hw desc commands, 1192 * link tables 1193 */ 1194 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 1195 sec4_sg_bytes, GFP_DMA | flags); 1196 if (!edesc) { 1197 dev_err(jrdev, 1198 "could not allocate extended descriptor\n"); 1199 return -ENOMEM; 1200 } 1201 1202 edesc->src_nents = src_nents; 1203 edesc->chained = chained; 1204 edesc->sec4_sg_bytes = sec4_sg_bytes; 1205 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1206 DESC_JOB_IO_LEN; 1207 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1208 sec4_sg_bytes, 1209 DMA_TO_DEVICE); 1210 1211 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1212 buf, *buflen); 1213 src_map_to_sec4_sg(jrdev, req->src, src_nents, 1214 edesc->sec4_sg + 1, chained); 1215 if (*next_buflen) { 1216 sg_copy_part(next_buf, req->src, to_hash - *buflen, 1217 req->nbytes); 1218 state->current_buf = !state->current_buf; 1219 } 1220 1221 sh_len = desc_len(sh_desc); 1222 desc = edesc->hw_desc; 1223 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | 1224 HDR_REVERSE); 1225 1226 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1227 1228 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1229 1230 #ifdef DEBUG 1231 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1232 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1233 desc_bytes(desc), 1); 1234 #endif 1235 1236 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1237 if (!ret) { 1238 ret = -EINPROGRESS; 1239 state->update = ahash_update_ctx; 1240 state->finup = ahash_finup_ctx; 1241 state->final = ahash_final_ctx; 1242 } else { 1243 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, 1244 DMA_TO_DEVICE); 1245 kfree(edesc); 1246 } 1247 } else if (*next_buflen) { 1248 sg_copy(buf + *buflen, req->src, req->nbytes); 1249 *buflen = *next_buflen; 1250 *next_buflen = 0; 1251 } 1252 #ifdef DEBUG 1253 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1254 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1255 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1256 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1257 *next_buflen, 1); 1258 #endif 1259 1260 return ret; 1261 } 1262 1263 /* submit ahash finup if it the first job descriptor after update */ 1264 static int ahash_finup_no_ctx(struct ahash_request *req) 1265 { 1266 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1267 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1268 struct caam_hash_state *state = ahash_request_ctx(req); 1269 struct device *jrdev = ctx->jrdev; 1270 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1271 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1272 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1273 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1274 int last_buflen = state->current_buf ? state->buflen_0 : 1275 state->buflen_1; 1276 u32 *sh_desc = ctx->sh_desc_digest, *desc; 1277 dma_addr_t ptr = ctx->sh_desc_digest_dma; 1278 int sec4_sg_bytes, sec4_sg_src_index, src_nents; 1279 int digestsize = crypto_ahash_digestsize(ahash); 1280 struct ahash_edesc *edesc; 1281 bool chained = false; 1282 int sh_len; 1283 int ret = 0; 1284 1285 src_nents = __sg_count(req->src, req->nbytes, &chained); 1286 sec4_sg_src_index = 2; 1287 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 1288 sizeof(struct sec4_sg_entry); 1289 1290 /* allocate space for base edesc and hw desc commands, link tables */ 1291 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 1292 sec4_sg_bytes, GFP_DMA | flags); 1293 if (!edesc) { 1294 dev_err(jrdev, "could not allocate extended descriptor\n"); 1295 return -ENOMEM; 1296 } 1297 1298 sh_len = desc_len(sh_desc); 1299 desc = edesc->hw_desc; 1300 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1301 1302 edesc->src_nents = src_nents; 1303 edesc->chained = chained; 1304 edesc->sec4_sg_bytes = sec4_sg_bytes; 1305 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1306 DESC_JOB_IO_LEN; 1307 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1308 sec4_sg_bytes, DMA_TO_DEVICE); 1309 1310 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, 1311 state->buf_dma, buflen, 1312 last_buflen); 1313 1314 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, 1315 chained); 1316 1317 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + 1318 req->nbytes, LDST_SGF); 1319 1320 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1321 digestsize); 1322 1323 #ifdef DEBUG 1324 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1325 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1326 #endif 1327 1328 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1329 if (!ret) { 1330 ret = -EINPROGRESS; 1331 } else { 1332 ahash_unmap(jrdev, edesc, req, digestsize); 1333 kfree(edesc); 1334 } 1335 1336 return ret; 1337 } 1338 1339 /* submit first update job descriptor after init */ 1340 static int ahash_update_first(struct ahash_request *req) 1341 { 1342 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1343 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1344 struct caam_hash_state *state = ahash_request_ctx(req); 1345 struct device *jrdev = ctx->jrdev; 1346 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1347 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1348 u8 *next_buf = state->buf_0 + state->current_buf * 1349 CAAM_MAX_HASH_BLOCK_SIZE; 1350 int *next_buflen = &state->buflen_0 + state->current_buf; 1351 int to_hash; 1352 u32 *sh_desc = ctx->sh_desc_update_first, *desc; 1353 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1354 int sec4_sg_bytes, src_nents; 1355 dma_addr_t src_dma; 1356 u32 options; 1357 struct ahash_edesc *edesc; 1358 bool chained = false; 1359 int ret = 0; 1360 int sh_len; 1361 1362 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1363 1); 1364 to_hash = req->nbytes - *next_buflen; 1365 1366 if (to_hash) { 1367 src_nents = sg_count(req->src, req->nbytes - (*next_buflen), 1368 &chained); 1369 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1370 DMA_TO_DEVICE, chained); 1371 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1372 1373 /* 1374 * allocate space for base edesc and hw desc commands, 1375 * link tables 1376 */ 1377 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 1378 sec4_sg_bytes, GFP_DMA | flags); 1379 if (!edesc) { 1380 dev_err(jrdev, 1381 "could not allocate extended descriptor\n"); 1382 return -ENOMEM; 1383 } 1384 1385 edesc->src_nents = src_nents; 1386 edesc->chained = chained; 1387 edesc->sec4_sg_bytes = sec4_sg_bytes; 1388 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1389 DESC_JOB_IO_LEN; 1390 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1391 sec4_sg_bytes, 1392 DMA_TO_DEVICE); 1393 1394 if (src_nents) { 1395 sg_to_sec4_sg_last(req->src, src_nents, 1396 edesc->sec4_sg, 0); 1397 src_dma = edesc->sec4_sg_dma; 1398 options = LDST_SGF; 1399 } else { 1400 src_dma = sg_dma_address(req->src); 1401 options = 0; 1402 } 1403 1404 if (*next_buflen) 1405 sg_copy_part(next_buf, req->src, to_hash, req->nbytes); 1406 1407 sh_len = desc_len(sh_desc); 1408 desc = edesc->hw_desc; 1409 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | 1410 HDR_REVERSE); 1411 1412 append_seq_in_ptr(desc, src_dma, to_hash, options); 1413 1414 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1415 1416 #ifdef DEBUG 1417 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1418 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1419 desc_bytes(desc), 1); 1420 #endif 1421 1422 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, 1423 req); 1424 if (!ret) { 1425 ret = -EINPROGRESS; 1426 state->update = ahash_update_ctx; 1427 state->finup = ahash_finup_ctx; 1428 state->final = ahash_final_ctx; 1429 } else { 1430 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, 1431 DMA_TO_DEVICE); 1432 kfree(edesc); 1433 } 1434 } else if (*next_buflen) { 1435 state->update = ahash_update_no_ctx; 1436 state->finup = ahash_finup_no_ctx; 1437 state->final = ahash_final_no_ctx; 1438 sg_copy(next_buf, req->src, req->nbytes); 1439 } 1440 #ifdef DEBUG 1441 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1442 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1443 *next_buflen, 1); 1444 #endif 1445 1446 return ret; 1447 } 1448 1449 static int ahash_finup_first(struct ahash_request *req) 1450 { 1451 return ahash_digest(req); 1452 } 1453 1454 static int ahash_init(struct ahash_request *req) 1455 { 1456 struct caam_hash_state *state = ahash_request_ctx(req); 1457 1458 state->update = ahash_update_first; 1459 state->finup = ahash_finup_first; 1460 state->final = ahash_final_no_ctx; 1461 1462 state->current_buf = 0; 1463 1464 return 0; 1465 } 1466 1467 static int ahash_update(struct ahash_request *req) 1468 { 1469 struct caam_hash_state *state = ahash_request_ctx(req); 1470 1471 return state->update(req); 1472 } 1473 1474 static int ahash_finup(struct ahash_request *req) 1475 { 1476 struct caam_hash_state *state = ahash_request_ctx(req); 1477 1478 return state->finup(req); 1479 } 1480 1481 static int ahash_final(struct ahash_request *req) 1482 { 1483 struct caam_hash_state *state = ahash_request_ctx(req); 1484 1485 return state->final(req); 1486 } 1487 1488 static int ahash_export(struct ahash_request *req, void *out) 1489 { 1490 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1491 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1492 struct caam_hash_state *state = ahash_request_ctx(req); 1493 1494 memcpy(out, ctx, sizeof(struct caam_hash_ctx)); 1495 memcpy(out + sizeof(struct caam_hash_ctx), state, 1496 sizeof(struct caam_hash_state)); 1497 return 0; 1498 } 1499 1500 static int ahash_import(struct ahash_request *req, const void *in) 1501 { 1502 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1503 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1504 struct caam_hash_state *state = ahash_request_ctx(req); 1505 1506 memcpy(ctx, in, sizeof(struct caam_hash_ctx)); 1507 memcpy(state, in + sizeof(struct caam_hash_ctx), 1508 sizeof(struct caam_hash_state)); 1509 return 0; 1510 } 1511 1512 struct caam_hash_template { 1513 char name[CRYPTO_MAX_ALG_NAME]; 1514 char driver_name[CRYPTO_MAX_ALG_NAME]; 1515 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1516 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1517 unsigned int blocksize; 1518 struct ahash_alg template_ahash; 1519 u32 alg_type; 1520 u32 alg_op; 1521 }; 1522 1523 /* ahash descriptors */ 1524 static struct caam_hash_template driver_hash[] = { 1525 { 1526 .name = "sha1", 1527 .driver_name = "sha1-caam", 1528 .hmac_name = "hmac(sha1)", 1529 .hmac_driver_name = "hmac-sha1-caam", 1530 .blocksize = SHA1_BLOCK_SIZE, 1531 .template_ahash = { 1532 .init = ahash_init, 1533 .update = ahash_update, 1534 .final = ahash_final, 1535 .finup = ahash_finup, 1536 .digest = ahash_digest, 1537 .export = ahash_export, 1538 .import = ahash_import, 1539 .setkey = ahash_setkey, 1540 .halg = { 1541 .digestsize = SHA1_DIGEST_SIZE, 1542 }, 1543 }, 1544 .alg_type = OP_ALG_ALGSEL_SHA1, 1545 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1546 }, { 1547 .name = "sha224", 1548 .driver_name = "sha224-caam", 1549 .hmac_name = "hmac(sha224)", 1550 .hmac_driver_name = "hmac-sha224-caam", 1551 .blocksize = SHA224_BLOCK_SIZE, 1552 .template_ahash = { 1553 .init = ahash_init, 1554 .update = ahash_update, 1555 .final = ahash_final, 1556 .finup = ahash_finup, 1557 .digest = ahash_digest, 1558 .export = ahash_export, 1559 .import = ahash_import, 1560 .setkey = ahash_setkey, 1561 .halg = { 1562 .digestsize = SHA224_DIGEST_SIZE, 1563 }, 1564 }, 1565 .alg_type = OP_ALG_ALGSEL_SHA224, 1566 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1567 }, { 1568 .name = "sha256", 1569 .driver_name = "sha256-caam", 1570 .hmac_name = "hmac(sha256)", 1571 .hmac_driver_name = "hmac-sha256-caam", 1572 .blocksize = SHA256_BLOCK_SIZE, 1573 .template_ahash = { 1574 .init = ahash_init, 1575 .update = ahash_update, 1576 .final = ahash_final, 1577 .finup = ahash_finup, 1578 .digest = ahash_digest, 1579 .export = ahash_export, 1580 .import = ahash_import, 1581 .setkey = ahash_setkey, 1582 .halg = { 1583 .digestsize = SHA256_DIGEST_SIZE, 1584 }, 1585 }, 1586 .alg_type = OP_ALG_ALGSEL_SHA256, 1587 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1588 }, { 1589 .name = "sha384", 1590 .driver_name = "sha384-caam", 1591 .hmac_name = "hmac(sha384)", 1592 .hmac_driver_name = "hmac-sha384-caam", 1593 .blocksize = SHA384_BLOCK_SIZE, 1594 .template_ahash = { 1595 .init = ahash_init, 1596 .update = ahash_update, 1597 .final = ahash_final, 1598 .finup = ahash_finup, 1599 .digest = ahash_digest, 1600 .export = ahash_export, 1601 .import = ahash_import, 1602 .setkey = ahash_setkey, 1603 .halg = { 1604 .digestsize = SHA384_DIGEST_SIZE, 1605 }, 1606 }, 1607 .alg_type = OP_ALG_ALGSEL_SHA384, 1608 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1609 }, { 1610 .name = "sha512", 1611 .driver_name = "sha512-caam", 1612 .hmac_name = "hmac(sha512)", 1613 .hmac_driver_name = "hmac-sha512-caam", 1614 .blocksize = SHA512_BLOCK_SIZE, 1615 .template_ahash = { 1616 .init = ahash_init, 1617 .update = ahash_update, 1618 .final = ahash_final, 1619 .finup = ahash_finup, 1620 .digest = ahash_digest, 1621 .export = ahash_export, 1622 .import = ahash_import, 1623 .setkey = ahash_setkey, 1624 .halg = { 1625 .digestsize = SHA512_DIGEST_SIZE, 1626 }, 1627 }, 1628 .alg_type = OP_ALG_ALGSEL_SHA512, 1629 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1630 }, { 1631 .name = "md5", 1632 .driver_name = "md5-caam", 1633 .hmac_name = "hmac(md5)", 1634 .hmac_driver_name = "hmac-md5-caam", 1635 .blocksize = MD5_BLOCK_WORDS * 4, 1636 .template_ahash = { 1637 .init = ahash_init, 1638 .update = ahash_update, 1639 .final = ahash_final, 1640 .finup = ahash_finup, 1641 .digest = ahash_digest, 1642 .export = ahash_export, 1643 .import = ahash_import, 1644 .setkey = ahash_setkey, 1645 .halg = { 1646 .digestsize = MD5_DIGEST_SIZE, 1647 }, 1648 }, 1649 .alg_type = OP_ALG_ALGSEL_MD5, 1650 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1651 }, 1652 }; 1653 1654 struct caam_hash_alg { 1655 struct list_head entry; 1656 struct device *ctrldev; 1657 int alg_type; 1658 int alg_op; 1659 struct ahash_alg ahash_alg; 1660 }; 1661 1662 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1663 { 1664 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1665 struct crypto_alg *base = tfm->__crt_alg; 1666 struct hash_alg_common *halg = 1667 container_of(base, struct hash_alg_common, base); 1668 struct ahash_alg *alg = 1669 container_of(halg, struct ahash_alg, halg); 1670 struct caam_hash_alg *caam_hash = 1671 container_of(alg, struct caam_hash_alg, ahash_alg); 1672 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1673 struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev); 1674 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1675 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1676 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1677 HASH_MSG_LEN + 32, 1678 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1679 HASH_MSG_LEN + 64, 1680 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1681 int tgt_jr = atomic_inc_return(&priv->tfm_count); 1682 int ret = 0; 1683 struct platform_device *pdev; 1684 1685 /* 1686 * distribute tfms across job rings to ensure in-order 1687 * crypto request processing per tfm 1688 */ 1689 pdev = priv->jrpdev[tgt_jr % priv->total_jobrs]; 1690 ctx->jrdev = &pdev->dev; 1691 1692 /* copy descriptor header template value */ 1693 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1694 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; 1695 1696 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 1697 OP_ALG_ALGSEL_SHIFT]; 1698 1699 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1700 sizeof(struct caam_hash_state)); 1701 1702 ret = ahash_set_sh_desc(ahash); 1703 1704 return ret; 1705 } 1706 1707 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1708 { 1709 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1710 1711 if (ctx->sh_desc_update_dma && 1712 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) 1713 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, 1714 desc_bytes(ctx->sh_desc_update), 1715 DMA_TO_DEVICE); 1716 if (ctx->sh_desc_update_first_dma && 1717 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) 1718 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, 1719 desc_bytes(ctx->sh_desc_update_first), 1720 DMA_TO_DEVICE); 1721 if (ctx->sh_desc_fin_dma && 1722 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) 1723 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, 1724 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); 1725 if (ctx->sh_desc_digest_dma && 1726 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) 1727 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, 1728 desc_bytes(ctx->sh_desc_digest), 1729 DMA_TO_DEVICE); 1730 if (ctx->sh_desc_finup_dma && 1731 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) 1732 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, 1733 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); 1734 } 1735 1736 static void __exit caam_algapi_hash_exit(void) 1737 { 1738 struct device_node *dev_node; 1739 struct platform_device *pdev; 1740 struct device *ctrldev; 1741 struct caam_drv_private *priv; 1742 struct caam_hash_alg *t_alg, *n; 1743 1744 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1745 if (!dev_node) { 1746 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1747 if (!dev_node) 1748 return; 1749 } 1750 1751 pdev = of_find_device_by_node(dev_node); 1752 if (!pdev) 1753 return; 1754 1755 ctrldev = &pdev->dev; 1756 of_node_put(dev_node); 1757 priv = dev_get_drvdata(ctrldev); 1758 1759 if (!priv->hash_list.next) 1760 return; 1761 1762 list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) { 1763 crypto_unregister_ahash(&t_alg->ahash_alg); 1764 list_del(&t_alg->entry); 1765 kfree(t_alg); 1766 } 1767 } 1768 1769 static struct caam_hash_alg * 1770 caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, 1771 bool keyed) 1772 { 1773 struct caam_hash_alg *t_alg; 1774 struct ahash_alg *halg; 1775 struct crypto_alg *alg; 1776 1777 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); 1778 if (!t_alg) { 1779 dev_err(ctrldev, "failed to allocate t_alg\n"); 1780 return ERR_PTR(-ENOMEM); 1781 } 1782 1783 t_alg->ahash_alg = template->template_ahash; 1784 halg = &t_alg->ahash_alg; 1785 alg = &halg->halg.base; 1786 1787 if (keyed) { 1788 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1789 template->hmac_name); 1790 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1791 template->hmac_driver_name); 1792 } else { 1793 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1794 template->name); 1795 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1796 template->driver_name); 1797 } 1798 alg->cra_module = THIS_MODULE; 1799 alg->cra_init = caam_hash_cra_init; 1800 alg->cra_exit = caam_hash_cra_exit; 1801 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1802 alg->cra_priority = CAAM_CRA_PRIORITY; 1803 alg->cra_blocksize = template->blocksize; 1804 alg->cra_alignmask = 0; 1805 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; 1806 alg->cra_type = &crypto_ahash_type; 1807 1808 t_alg->alg_type = template->alg_type; 1809 t_alg->alg_op = template->alg_op; 1810 t_alg->ctrldev = ctrldev; 1811 1812 return t_alg; 1813 } 1814 1815 static int __init caam_algapi_hash_init(void) 1816 { 1817 struct device_node *dev_node; 1818 struct platform_device *pdev; 1819 struct device *ctrldev; 1820 struct caam_drv_private *priv; 1821 int i = 0, err = 0; 1822 1823 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1824 if (!dev_node) { 1825 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1826 if (!dev_node) 1827 return -ENODEV; 1828 } 1829 1830 pdev = of_find_device_by_node(dev_node); 1831 if (!pdev) 1832 return -ENODEV; 1833 1834 ctrldev = &pdev->dev; 1835 priv = dev_get_drvdata(ctrldev); 1836 of_node_put(dev_node); 1837 1838 /* 1839 * If priv is NULL, it's probably because the caam driver wasn't 1840 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1841 */ 1842 if (!priv) 1843 return -ENODEV; 1844 1845 INIT_LIST_HEAD(&priv->hash_list); 1846 1847 atomic_set(&priv->tfm_count, -1); 1848 1849 /* register crypto algorithms the device supports */ 1850 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1851 /* TODO: check if h/w supports alg */ 1852 struct caam_hash_alg *t_alg; 1853 1854 /* register hmac version */ 1855 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); 1856 if (IS_ERR(t_alg)) { 1857 err = PTR_ERR(t_alg); 1858 dev_warn(ctrldev, "%s alg allocation failed\n", 1859 driver_hash[i].driver_name); 1860 continue; 1861 } 1862 1863 err = crypto_register_ahash(&t_alg->ahash_alg); 1864 if (err) { 1865 dev_warn(ctrldev, "%s alg registration failed\n", 1866 t_alg->ahash_alg.halg.base.cra_driver_name); 1867 kfree(t_alg); 1868 } else 1869 list_add_tail(&t_alg->entry, &priv->hash_list); 1870 1871 /* register unkeyed version */ 1872 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); 1873 if (IS_ERR(t_alg)) { 1874 err = PTR_ERR(t_alg); 1875 dev_warn(ctrldev, "%s alg allocation failed\n", 1876 driver_hash[i].driver_name); 1877 continue; 1878 } 1879 1880 err = crypto_register_ahash(&t_alg->ahash_alg); 1881 if (err) { 1882 dev_warn(ctrldev, "%s alg registration failed\n", 1883 t_alg->ahash_alg.halg.base.cra_driver_name); 1884 kfree(t_alg); 1885 } else 1886 list_add_tail(&t_alg->entry, &priv->hash_list); 1887 } 1888 1889 return err; 1890 } 1891 1892 module_init(caam_algapi_hash_init); 1893 module_exit(caam_algapi_hash_exit); 1894 1895 MODULE_LICENSE("GPL"); 1896 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 1897 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 1898