1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) driver 4 * 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/interrupt.h> 15 #include <crypto/scatterwalk.h> 16 #include <crypto/des.h> 17 #include <linux/ccp.h> 18 19 #include "ccp-dev.h" 20 21 /* SHA initial context values */ 22 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { 23 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 24 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), 25 cpu_to_be32(SHA1_H4), 26 }; 27 28 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { 29 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 30 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 31 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 32 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 33 }; 34 35 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { 36 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 37 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 38 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 39 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 40 }; 41 42 static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { 43 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), 44 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), 45 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), 46 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7), 47 }; 48 49 static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { 50 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), 51 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), 52 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), 53 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7), 54 }; 55 56 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ 57 ccp_gen_jobid(ccp) : 0) 58 59 static u32 ccp_gen_jobid(struct ccp_device *ccp) 60 { 61 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; 62 } 63 64 static void ccp_sg_free(struct ccp_sg_workarea *wa) 65 { 66 if (wa->dma_count) 67 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); 68 69 wa->dma_count = 0; 70 } 71 72 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, 73 struct scatterlist *sg, u64 len, 74 enum dma_data_direction dma_dir) 75 { 76 memset(wa, 0, sizeof(*wa)); 77 78 wa->sg = sg; 79 if (!sg) 80 return 0; 81 82 wa->nents = sg_nents_for_len(sg, len); 83 if (wa->nents < 0) 84 return wa->nents; 85 86 wa->bytes_left = len; 87 wa->sg_used = 0; 88 89 if (len == 0) 90 return 0; 91 92 if (dma_dir == DMA_NONE) 93 return 0; 94 95 wa->dma_sg = sg; 96 wa->dma_dev = dev; 97 wa->dma_dir = dma_dir; 98 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); 99 if (!wa->dma_count) 100 return -ENOMEM; 101 102 return 0; 103 } 104 105 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) 106 { 107 unsigned int nbytes = min_t(u64, len, wa->bytes_left); 108 109 if (!wa->sg) 110 return; 111 112 wa->sg_used += nbytes; 113 wa->bytes_left -= nbytes; 114 if (wa->sg_used == wa->sg->length) { 115 wa->sg = sg_next(wa->sg); 116 wa->sg_used = 0; 117 } 118 } 119 120 static void ccp_dm_free(struct ccp_dm_workarea *wa) 121 { 122 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { 123 if (wa->address) 124 dma_pool_free(wa->dma_pool, wa->address, 125 wa->dma.address); 126 } else { 127 if (wa->dma.address) 128 dma_unmap_single(wa->dev, wa->dma.address, wa->length, 129 wa->dma.dir); 130 kfree(wa->address); 131 } 132 133 wa->address = NULL; 134 wa->dma.address = 0; 135 } 136 137 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, 138 struct ccp_cmd_queue *cmd_q, 139 unsigned int len, 140 enum dma_data_direction dir) 141 { 142 memset(wa, 0, sizeof(*wa)); 143 144 if (!len) 145 return 0; 146 147 wa->dev = cmd_q->ccp->dev; 148 wa->length = len; 149 150 if (len <= CCP_DMAPOOL_MAX_SIZE) { 151 wa->dma_pool = cmd_q->dma_pool; 152 153 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL, 154 &wa->dma.address); 155 if (!wa->address) 156 return -ENOMEM; 157 158 wa->dma.length = CCP_DMAPOOL_MAX_SIZE; 159 160 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE); 161 } else { 162 wa->address = kzalloc(len, GFP_KERNEL); 163 if (!wa->address) 164 return -ENOMEM; 165 166 wa->dma.address = dma_map_single(wa->dev, wa->address, len, 167 dir); 168 if (dma_mapping_error(wa->dev, wa->dma.address)) 169 return -ENOMEM; 170 171 wa->dma.length = len; 172 } 173 wa->dma.dir = dir; 174 175 return 0; 176 } 177 178 static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 179 struct scatterlist *sg, unsigned int sg_offset, 180 unsigned int len) 181 { 182 WARN_ON(!wa->address); 183 184 if (len > (wa->length - wa_offset)) 185 return -EINVAL; 186 187 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 188 0); 189 return 0; 190 } 191 192 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 193 struct scatterlist *sg, unsigned int sg_offset, 194 unsigned int len) 195 { 196 WARN_ON(!wa->address); 197 198 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 199 1); 200 } 201 202 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, 203 unsigned int wa_offset, 204 struct scatterlist *sg, 205 unsigned int sg_offset, 206 unsigned int len) 207 { 208 u8 *p, *q; 209 int rc; 210 211 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); 212 if (rc) 213 return rc; 214 215 p = wa->address + wa_offset; 216 q = p + len - 1; 217 while (p < q) { 218 *p = *p ^ *q; 219 *q = *p ^ *q; 220 *p = *p ^ *q; 221 p++; 222 q--; 223 } 224 return 0; 225 } 226 227 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, 228 unsigned int wa_offset, 229 struct scatterlist *sg, 230 unsigned int sg_offset, 231 unsigned int len) 232 { 233 u8 *p, *q; 234 235 p = wa->address + wa_offset; 236 q = p + len - 1; 237 while (p < q) { 238 *p = *p ^ *q; 239 *q = *p ^ *q; 240 *p = *p ^ *q; 241 p++; 242 q--; 243 } 244 245 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len); 246 } 247 248 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) 249 { 250 ccp_dm_free(&data->dm_wa); 251 ccp_sg_free(&data->sg_wa); 252 } 253 254 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, 255 struct scatterlist *sg, u64 sg_len, 256 unsigned int dm_len, 257 enum dma_data_direction dir) 258 { 259 int ret; 260 261 memset(data, 0, sizeof(*data)); 262 263 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, 264 dir); 265 if (ret) 266 goto e_err; 267 268 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); 269 if (ret) 270 goto e_err; 271 272 return 0; 273 274 e_err: 275 ccp_free_data(data, cmd_q); 276 277 return ret; 278 } 279 280 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) 281 { 282 struct ccp_sg_workarea *sg_wa = &data->sg_wa; 283 struct ccp_dm_workarea *dm_wa = &data->dm_wa; 284 unsigned int buf_count, nbytes; 285 286 /* Clear the buffer if setting it */ 287 if (!from) 288 memset(dm_wa->address, 0, dm_wa->length); 289 290 if (!sg_wa->sg) 291 return 0; 292 293 /* Perform the copy operation 294 * nbytes will always be <= UINT_MAX because dm_wa->length is 295 * an unsigned int 296 */ 297 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); 298 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, 299 nbytes, from); 300 301 /* Update the structures and generate the count */ 302 buf_count = 0; 303 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { 304 nbytes = min(sg_wa->sg->length - sg_wa->sg_used, 305 dm_wa->length - buf_count); 306 nbytes = min_t(u64, sg_wa->bytes_left, nbytes); 307 308 buf_count += nbytes; 309 ccp_update_sg_workarea(sg_wa, nbytes); 310 } 311 312 return buf_count; 313 } 314 315 static unsigned int ccp_fill_queue_buf(struct ccp_data *data) 316 { 317 return ccp_queue_buf(data, 0); 318 } 319 320 static unsigned int ccp_empty_queue_buf(struct ccp_data *data) 321 { 322 return ccp_queue_buf(data, 1); 323 } 324 325 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, 326 struct ccp_op *op, unsigned int block_size, 327 bool blocksize_op) 328 { 329 unsigned int sg_src_len, sg_dst_len, op_len; 330 331 /* The CCP can only DMA from/to one address each per operation. This 332 * requires that we find the smallest DMA area between the source 333 * and destination. The resulting len values will always be <= UINT_MAX 334 * because the dma length is an unsigned int. 335 */ 336 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; 337 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); 338 339 if (dst) { 340 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; 341 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); 342 op_len = min(sg_src_len, sg_dst_len); 343 } else { 344 op_len = sg_src_len; 345 } 346 347 /* The data operation length will be at least block_size in length 348 * or the smaller of available sg room remaining for the source or 349 * the destination 350 */ 351 op_len = max(op_len, block_size); 352 353 /* Unless we have to buffer data, there's no reason to wait */ 354 op->soc = 0; 355 356 if (sg_src_len < block_size) { 357 /* Not enough data in the sg element, so it 358 * needs to be buffered into a blocksize chunk 359 */ 360 int cp_len = ccp_fill_queue_buf(src); 361 362 op->soc = 1; 363 op->src.u.dma.address = src->dm_wa.dma.address; 364 op->src.u.dma.offset = 0; 365 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; 366 } else { 367 /* Enough data in the sg element, but we need to 368 * adjust for any previously copied data 369 */ 370 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); 371 op->src.u.dma.offset = src->sg_wa.sg_used; 372 op->src.u.dma.length = op_len & ~(block_size - 1); 373 374 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); 375 } 376 377 if (dst) { 378 if (sg_dst_len < block_size) { 379 /* Not enough room in the sg element or we're on the 380 * last piece of data (when using padding), so the 381 * output needs to be buffered into a blocksize chunk 382 */ 383 op->soc = 1; 384 op->dst.u.dma.address = dst->dm_wa.dma.address; 385 op->dst.u.dma.offset = 0; 386 op->dst.u.dma.length = op->src.u.dma.length; 387 } else { 388 /* Enough room in the sg element, but we need to 389 * adjust for any previously used area 390 */ 391 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); 392 op->dst.u.dma.offset = dst->sg_wa.sg_used; 393 op->dst.u.dma.length = op->src.u.dma.length; 394 } 395 } 396 } 397 398 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, 399 struct ccp_op *op) 400 { 401 op->init = 0; 402 403 if (dst) { 404 if (op->dst.u.dma.address == dst->dm_wa.dma.address) 405 ccp_empty_queue_buf(dst); 406 else 407 ccp_update_sg_workarea(&dst->sg_wa, 408 op->dst.u.dma.length); 409 } 410 } 411 412 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, 413 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 414 u32 byte_swap, bool from) 415 { 416 struct ccp_op op; 417 418 memset(&op, 0, sizeof(op)); 419 420 op.cmd_q = cmd_q; 421 op.jobid = jobid; 422 op.eom = 1; 423 424 if (from) { 425 op.soc = 1; 426 op.src.type = CCP_MEMTYPE_SB; 427 op.src.u.sb = sb; 428 op.dst.type = CCP_MEMTYPE_SYSTEM; 429 op.dst.u.dma.address = wa->dma.address; 430 op.dst.u.dma.length = wa->length; 431 } else { 432 op.src.type = CCP_MEMTYPE_SYSTEM; 433 op.src.u.dma.address = wa->dma.address; 434 op.src.u.dma.length = wa->length; 435 op.dst.type = CCP_MEMTYPE_SB; 436 op.dst.u.sb = sb; 437 } 438 439 op.u.passthru.byte_swap = byte_swap; 440 441 return cmd_q->ccp->vdata->perform->passthru(&op); 442 } 443 444 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q, 445 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 446 u32 byte_swap) 447 { 448 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false); 449 } 450 451 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, 452 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 453 u32 byte_swap) 454 { 455 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); 456 } 457 458 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, 459 struct ccp_cmd *cmd) 460 { 461 struct ccp_aes_engine *aes = &cmd->u.aes; 462 struct ccp_dm_workarea key, ctx; 463 struct ccp_data src; 464 struct ccp_op op; 465 unsigned int dm_offset; 466 int ret; 467 468 if (!((aes->key_len == AES_KEYSIZE_128) || 469 (aes->key_len == AES_KEYSIZE_192) || 470 (aes->key_len == AES_KEYSIZE_256))) 471 return -EINVAL; 472 473 if (aes->src_len & (AES_BLOCK_SIZE - 1)) 474 return -EINVAL; 475 476 if (aes->iv_len != AES_BLOCK_SIZE) 477 return -EINVAL; 478 479 if (!aes->key || !aes->iv || !aes->src) 480 return -EINVAL; 481 482 if (aes->cmac_final) { 483 if (aes->cmac_key_len != AES_BLOCK_SIZE) 484 return -EINVAL; 485 486 if (!aes->cmac_key) 487 return -EINVAL; 488 } 489 490 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 491 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 492 493 ret = -EIO; 494 memset(&op, 0, sizeof(op)); 495 op.cmd_q = cmd_q; 496 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 497 op.sb_key = cmd_q->sb_key; 498 op.sb_ctx = cmd_q->sb_ctx; 499 op.init = 1; 500 op.u.aes.type = aes->type; 501 op.u.aes.mode = aes->mode; 502 op.u.aes.action = aes->action; 503 504 /* All supported key sizes fit in a single (32-byte) SB entry 505 * and must be in little endian format. Use the 256-bit byte 506 * swap passthru option to convert from big endian to little 507 * endian. 508 */ 509 ret = ccp_init_dm_workarea(&key, cmd_q, 510 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, 511 DMA_TO_DEVICE); 512 if (ret) 513 return ret; 514 515 dm_offset = CCP_SB_BYTES - aes->key_len; 516 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 517 if (ret) 518 goto e_key; 519 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 520 CCP_PASSTHRU_BYTESWAP_256BIT); 521 if (ret) { 522 cmd->engine_error = cmd_q->cmd_error; 523 goto e_key; 524 } 525 526 /* The AES context fits in a single (32-byte) SB entry and 527 * must be in little endian format. Use the 256-bit byte swap 528 * passthru option to convert from big endian to little endian. 529 */ 530 ret = ccp_init_dm_workarea(&ctx, cmd_q, 531 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 532 DMA_BIDIRECTIONAL); 533 if (ret) 534 goto e_key; 535 536 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 537 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 538 if (ret) 539 goto e_ctx; 540 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 541 CCP_PASSTHRU_BYTESWAP_256BIT); 542 if (ret) { 543 cmd->engine_error = cmd_q->cmd_error; 544 goto e_ctx; 545 } 546 547 /* Send data to the CCP AES engine */ 548 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, 549 AES_BLOCK_SIZE, DMA_TO_DEVICE); 550 if (ret) 551 goto e_ctx; 552 553 while (src.sg_wa.bytes_left) { 554 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true); 555 if (aes->cmac_final && !src.sg_wa.bytes_left) { 556 op.eom = 1; 557 558 /* Push the K1/K2 key to the CCP now */ 559 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, 560 op.sb_ctx, 561 CCP_PASSTHRU_BYTESWAP_256BIT); 562 if (ret) { 563 cmd->engine_error = cmd_q->cmd_error; 564 goto e_src; 565 } 566 567 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, 568 aes->cmac_key_len); 569 if (ret) 570 goto e_src; 571 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 572 CCP_PASSTHRU_BYTESWAP_256BIT); 573 if (ret) { 574 cmd->engine_error = cmd_q->cmd_error; 575 goto e_src; 576 } 577 } 578 579 ret = cmd_q->ccp->vdata->perform->aes(&op); 580 if (ret) { 581 cmd->engine_error = cmd_q->cmd_error; 582 goto e_src; 583 } 584 585 ccp_process_data(&src, NULL, &op); 586 } 587 588 /* Retrieve the AES context - convert from LE to BE using 589 * 32-byte (256-bit) byteswapping 590 */ 591 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 592 CCP_PASSTHRU_BYTESWAP_256BIT); 593 if (ret) { 594 cmd->engine_error = cmd_q->cmd_error; 595 goto e_src; 596 } 597 598 /* ...but we only need AES_BLOCK_SIZE bytes */ 599 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 600 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 601 602 e_src: 603 ccp_free_data(&src, cmd_q); 604 605 e_ctx: 606 ccp_dm_free(&ctx); 607 608 e_key: 609 ccp_dm_free(&key); 610 611 return ret; 612 } 613 614 static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, 615 struct ccp_cmd *cmd) 616 { 617 struct ccp_aes_engine *aes = &cmd->u.aes; 618 struct ccp_dm_workarea key, ctx, final_wa, tag; 619 struct ccp_data src, dst; 620 struct ccp_data aad; 621 struct ccp_op op; 622 623 unsigned long long *final; 624 unsigned int dm_offset; 625 unsigned int ilen; 626 bool in_place = true; /* Default value */ 627 int ret; 628 629 struct scatterlist *p_inp, sg_inp[2]; 630 struct scatterlist *p_tag, sg_tag[2]; 631 struct scatterlist *p_outp, sg_outp[2]; 632 struct scatterlist *p_aad; 633 634 if (!aes->iv) 635 return -EINVAL; 636 637 if (!((aes->key_len == AES_KEYSIZE_128) || 638 (aes->key_len == AES_KEYSIZE_192) || 639 (aes->key_len == AES_KEYSIZE_256))) 640 return -EINVAL; 641 642 if (!aes->key) /* Gotta have a key SGL */ 643 return -EINVAL; 644 645 /* First, decompose the source buffer into AAD & PT, 646 * and the destination buffer into AAD, CT & tag, or 647 * the input into CT & tag. 648 * It is expected that the input and output SGs will 649 * be valid, even if the AAD and input lengths are 0. 650 */ 651 p_aad = aes->src; 652 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); 653 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); 654 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 655 ilen = aes->src_len; 656 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); 657 } else { 658 /* Input length for decryption includes tag */ 659 ilen = aes->src_len - AES_BLOCK_SIZE; 660 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); 661 } 662 663 memset(&op, 0, sizeof(op)); 664 op.cmd_q = cmd_q; 665 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 666 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ 667 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 668 op.init = 1; 669 op.u.aes.type = aes->type; 670 671 /* Copy the key to the LSB */ 672 ret = ccp_init_dm_workarea(&key, cmd_q, 673 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 674 DMA_TO_DEVICE); 675 if (ret) 676 return ret; 677 678 dm_offset = CCP_SB_BYTES - aes->key_len; 679 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 680 if (ret) 681 goto e_key; 682 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 683 CCP_PASSTHRU_BYTESWAP_256BIT); 684 if (ret) { 685 cmd->engine_error = cmd_q->cmd_error; 686 goto e_key; 687 } 688 689 /* Copy the context (IV) to the LSB. 690 * There is an assumption here that the IV is 96 bits in length, plus 691 * a nonce of 32 bits. If no IV is present, use a zeroed buffer. 692 */ 693 ret = ccp_init_dm_workarea(&ctx, cmd_q, 694 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 695 DMA_BIDIRECTIONAL); 696 if (ret) 697 goto e_key; 698 699 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; 700 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 701 if (ret) 702 goto e_ctx; 703 704 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 705 CCP_PASSTHRU_BYTESWAP_256BIT); 706 if (ret) { 707 cmd->engine_error = cmd_q->cmd_error; 708 goto e_ctx; 709 } 710 711 op.init = 1; 712 if (aes->aad_len > 0) { 713 /* Step 1: Run a GHASH over the Additional Authenticated Data */ 714 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, 715 AES_BLOCK_SIZE, 716 DMA_TO_DEVICE); 717 if (ret) 718 goto e_ctx; 719 720 op.u.aes.mode = CCP_AES_MODE_GHASH; 721 op.u.aes.action = CCP_AES_GHASHAAD; 722 723 while (aad.sg_wa.bytes_left) { 724 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true); 725 726 ret = cmd_q->ccp->vdata->perform->aes(&op); 727 if (ret) { 728 cmd->engine_error = cmd_q->cmd_error; 729 goto e_aad; 730 } 731 732 ccp_process_data(&aad, NULL, &op); 733 op.init = 0; 734 } 735 } 736 737 op.u.aes.mode = CCP_AES_MODE_GCTR; 738 op.u.aes.action = aes->action; 739 740 if (ilen > 0) { 741 /* Step 2: Run a GCTR over the plaintext */ 742 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false; 743 744 ret = ccp_init_data(&src, cmd_q, p_inp, ilen, 745 AES_BLOCK_SIZE, 746 in_place ? DMA_BIDIRECTIONAL 747 : DMA_TO_DEVICE); 748 if (ret) 749 goto e_ctx; 750 751 if (in_place) { 752 dst = src; 753 } else { 754 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen, 755 AES_BLOCK_SIZE, DMA_FROM_DEVICE); 756 if (ret) 757 goto e_src; 758 } 759 760 op.soc = 0; 761 op.eom = 0; 762 op.init = 1; 763 while (src.sg_wa.bytes_left) { 764 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 765 if (!src.sg_wa.bytes_left) { 766 unsigned int nbytes = aes->src_len 767 % AES_BLOCK_SIZE; 768 769 if (nbytes) { 770 op.eom = 1; 771 op.u.aes.size = (nbytes * 8) - 1; 772 } 773 } 774 775 ret = cmd_q->ccp->vdata->perform->aes(&op); 776 if (ret) { 777 cmd->engine_error = cmd_q->cmd_error; 778 goto e_dst; 779 } 780 781 ccp_process_data(&src, &dst, &op); 782 op.init = 0; 783 } 784 } 785 786 /* Step 3: Update the IV portion of the context with the original IV */ 787 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 788 CCP_PASSTHRU_BYTESWAP_256BIT); 789 if (ret) { 790 cmd->engine_error = cmd_q->cmd_error; 791 goto e_dst; 792 } 793 794 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 795 if (ret) 796 goto e_dst; 797 798 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 799 CCP_PASSTHRU_BYTESWAP_256BIT); 800 if (ret) { 801 cmd->engine_error = cmd_q->cmd_error; 802 goto e_dst; 803 } 804 805 /* Step 4: Concatenate the lengths of the AAD and source, and 806 * hash that 16 byte buffer. 807 */ 808 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE, 809 DMA_BIDIRECTIONAL); 810 if (ret) 811 goto e_dst; 812 final = (unsigned long long *) final_wa.address; 813 final[0] = cpu_to_be64(aes->aad_len * 8); 814 final[1] = cpu_to_be64(ilen * 8); 815 816 op.u.aes.mode = CCP_AES_MODE_GHASH; 817 op.u.aes.action = CCP_AES_GHASHFINAL; 818 op.src.type = CCP_MEMTYPE_SYSTEM; 819 op.src.u.dma.address = final_wa.dma.address; 820 op.src.u.dma.length = AES_BLOCK_SIZE; 821 op.dst.type = CCP_MEMTYPE_SYSTEM; 822 op.dst.u.dma.address = final_wa.dma.address; 823 op.dst.u.dma.length = AES_BLOCK_SIZE; 824 op.eom = 1; 825 op.u.aes.size = 0; 826 ret = cmd_q->ccp->vdata->perform->aes(&op); 827 if (ret) 828 goto e_dst; 829 830 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 831 /* Put the ciphered tag after the ciphertext. */ 832 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE); 833 } else { 834 /* Does this ciphered tag match the input? */ 835 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE, 836 DMA_BIDIRECTIONAL); 837 if (ret) 838 goto e_tag; 839 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); 840 if (ret) 841 goto e_tag; 842 843 ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE); 844 ccp_dm_free(&tag); 845 } 846 847 e_tag: 848 ccp_dm_free(&final_wa); 849 850 e_dst: 851 if (aes->src_len && !in_place) 852 ccp_free_data(&dst, cmd_q); 853 854 e_src: 855 if (aes->src_len) 856 ccp_free_data(&src, cmd_q); 857 858 e_aad: 859 if (aes->aad_len) 860 ccp_free_data(&aad, cmd_q); 861 862 e_ctx: 863 ccp_dm_free(&ctx); 864 865 e_key: 866 ccp_dm_free(&key); 867 868 return ret; 869 } 870 871 static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 872 { 873 struct ccp_aes_engine *aes = &cmd->u.aes; 874 struct ccp_dm_workarea key, ctx; 875 struct ccp_data src, dst; 876 struct ccp_op op; 877 unsigned int dm_offset; 878 bool in_place = false; 879 int ret; 880 881 if (aes->mode == CCP_AES_MODE_CMAC) 882 return ccp_run_aes_cmac_cmd(cmd_q, cmd); 883 884 if (aes->mode == CCP_AES_MODE_GCM) 885 return ccp_run_aes_gcm_cmd(cmd_q, cmd); 886 887 if (!((aes->key_len == AES_KEYSIZE_128) || 888 (aes->key_len == AES_KEYSIZE_192) || 889 (aes->key_len == AES_KEYSIZE_256))) 890 return -EINVAL; 891 892 if (((aes->mode == CCP_AES_MODE_ECB) || 893 (aes->mode == CCP_AES_MODE_CBC)) && 894 (aes->src_len & (AES_BLOCK_SIZE - 1))) 895 return -EINVAL; 896 897 if (!aes->key || !aes->src || !aes->dst) 898 return -EINVAL; 899 900 if (aes->mode != CCP_AES_MODE_ECB) { 901 if (aes->iv_len != AES_BLOCK_SIZE) 902 return -EINVAL; 903 904 if (!aes->iv) 905 return -EINVAL; 906 } 907 908 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 909 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 910 911 ret = -EIO; 912 memset(&op, 0, sizeof(op)); 913 op.cmd_q = cmd_q; 914 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 915 op.sb_key = cmd_q->sb_key; 916 op.sb_ctx = cmd_q->sb_ctx; 917 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; 918 op.u.aes.type = aes->type; 919 op.u.aes.mode = aes->mode; 920 op.u.aes.action = aes->action; 921 922 /* All supported key sizes fit in a single (32-byte) SB entry 923 * and must be in little endian format. Use the 256-bit byte 924 * swap passthru option to convert from big endian to little 925 * endian. 926 */ 927 ret = ccp_init_dm_workarea(&key, cmd_q, 928 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, 929 DMA_TO_DEVICE); 930 if (ret) 931 return ret; 932 933 dm_offset = CCP_SB_BYTES - aes->key_len; 934 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 935 if (ret) 936 goto e_key; 937 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 938 CCP_PASSTHRU_BYTESWAP_256BIT); 939 if (ret) { 940 cmd->engine_error = cmd_q->cmd_error; 941 goto e_key; 942 } 943 944 /* The AES context fits in a single (32-byte) SB entry and 945 * must be in little endian format. Use the 256-bit byte swap 946 * passthru option to convert from big endian to little endian. 947 */ 948 ret = ccp_init_dm_workarea(&ctx, cmd_q, 949 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 950 DMA_BIDIRECTIONAL); 951 if (ret) 952 goto e_key; 953 954 if (aes->mode != CCP_AES_MODE_ECB) { 955 /* Load the AES context - convert to LE */ 956 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 957 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 958 if (ret) 959 goto e_ctx; 960 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 961 CCP_PASSTHRU_BYTESWAP_256BIT); 962 if (ret) { 963 cmd->engine_error = cmd_q->cmd_error; 964 goto e_ctx; 965 } 966 } 967 switch (aes->mode) { 968 case CCP_AES_MODE_CFB: /* CFB128 only */ 969 case CCP_AES_MODE_CTR: 970 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1; 971 break; 972 default: 973 op.u.aes.size = 0; 974 } 975 976 /* Prepare the input and output data workareas. For in-place 977 * operations we need to set the dma direction to BIDIRECTIONAL 978 * and copy the src workarea to the dst workarea. 979 */ 980 if (sg_virt(aes->src) == sg_virt(aes->dst)) 981 in_place = true; 982 983 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, 984 AES_BLOCK_SIZE, 985 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 986 if (ret) 987 goto e_ctx; 988 989 if (in_place) { 990 dst = src; 991 } else { 992 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, 993 AES_BLOCK_SIZE, DMA_FROM_DEVICE); 994 if (ret) 995 goto e_src; 996 } 997 998 /* Send data to the CCP AES engine */ 999 while (src.sg_wa.bytes_left) { 1000 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 1001 if (!src.sg_wa.bytes_left) { 1002 op.eom = 1; 1003 1004 /* Since we don't retrieve the AES context in ECB 1005 * mode we have to wait for the operation to complete 1006 * on the last piece of data 1007 */ 1008 if (aes->mode == CCP_AES_MODE_ECB) 1009 op.soc = 1; 1010 } 1011 1012 ret = cmd_q->ccp->vdata->perform->aes(&op); 1013 if (ret) { 1014 cmd->engine_error = cmd_q->cmd_error; 1015 goto e_dst; 1016 } 1017 1018 ccp_process_data(&src, &dst, &op); 1019 } 1020 1021 if (aes->mode != CCP_AES_MODE_ECB) { 1022 /* Retrieve the AES context - convert from LE to BE using 1023 * 32-byte (256-bit) byteswapping 1024 */ 1025 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1026 CCP_PASSTHRU_BYTESWAP_256BIT); 1027 if (ret) { 1028 cmd->engine_error = cmd_q->cmd_error; 1029 goto e_dst; 1030 } 1031 1032 /* ...but we only need AES_BLOCK_SIZE bytes */ 1033 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 1034 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 1035 } 1036 1037 e_dst: 1038 if (!in_place) 1039 ccp_free_data(&dst, cmd_q); 1040 1041 e_src: 1042 ccp_free_data(&src, cmd_q); 1043 1044 e_ctx: 1045 ccp_dm_free(&ctx); 1046 1047 e_key: 1048 ccp_dm_free(&key); 1049 1050 return ret; 1051 } 1052 1053 static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, 1054 struct ccp_cmd *cmd) 1055 { 1056 struct ccp_xts_aes_engine *xts = &cmd->u.xts; 1057 struct ccp_dm_workarea key, ctx; 1058 struct ccp_data src, dst; 1059 struct ccp_op op; 1060 unsigned int unit_size, dm_offset; 1061 bool in_place = false; 1062 unsigned int sb_count; 1063 enum ccp_aes_type aestype; 1064 int ret; 1065 1066 switch (xts->unit_size) { 1067 case CCP_XTS_AES_UNIT_SIZE_16: 1068 unit_size = 16; 1069 break; 1070 case CCP_XTS_AES_UNIT_SIZE_512: 1071 unit_size = 512; 1072 break; 1073 case CCP_XTS_AES_UNIT_SIZE_1024: 1074 unit_size = 1024; 1075 break; 1076 case CCP_XTS_AES_UNIT_SIZE_2048: 1077 unit_size = 2048; 1078 break; 1079 case CCP_XTS_AES_UNIT_SIZE_4096: 1080 unit_size = 4096; 1081 break; 1082 1083 default: 1084 return -EINVAL; 1085 } 1086 1087 if (xts->key_len == AES_KEYSIZE_128) 1088 aestype = CCP_AES_TYPE_128; 1089 else if (xts->key_len == AES_KEYSIZE_256) 1090 aestype = CCP_AES_TYPE_256; 1091 else 1092 return -EINVAL; 1093 1094 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) 1095 return -EINVAL; 1096 1097 if (xts->iv_len != AES_BLOCK_SIZE) 1098 return -EINVAL; 1099 1100 if (!xts->key || !xts->iv || !xts->src || !xts->dst) 1101 return -EINVAL; 1102 1103 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); 1104 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); 1105 1106 ret = -EIO; 1107 memset(&op, 0, sizeof(op)); 1108 op.cmd_q = cmd_q; 1109 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1110 op.sb_key = cmd_q->sb_key; 1111 op.sb_ctx = cmd_q->sb_ctx; 1112 op.init = 1; 1113 op.u.xts.type = aestype; 1114 op.u.xts.action = xts->action; 1115 op.u.xts.unit_size = xts->unit_size; 1116 1117 /* A version 3 device only supports 128-bit keys, which fits into a 1118 * single SB entry. A version 5 device uses a 512-bit vector, so two 1119 * SB entries. 1120 */ 1121 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1122 sb_count = CCP_XTS_AES_KEY_SB_COUNT; 1123 else 1124 sb_count = CCP5_XTS_AES_KEY_SB_COUNT; 1125 ret = ccp_init_dm_workarea(&key, cmd_q, 1126 sb_count * CCP_SB_BYTES, 1127 DMA_TO_DEVICE); 1128 if (ret) 1129 return ret; 1130 1131 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { 1132 /* All supported key sizes must be in little endian format. 1133 * Use the 256-bit byte swap passthru option to convert from 1134 * big endian to little endian. 1135 */ 1136 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; 1137 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); 1138 if (ret) 1139 goto e_key; 1140 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); 1141 if (ret) 1142 goto e_key; 1143 } else { 1144 /* Version 5 CCPs use a 512-bit space for the key: each portion 1145 * occupies 256 bits, or one entire slot, and is zero-padded. 1146 */ 1147 unsigned int pad; 1148 1149 dm_offset = CCP_SB_BYTES; 1150 pad = dm_offset - xts->key_len; 1151 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); 1152 if (ret) 1153 goto e_key; 1154 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, 1155 xts->key_len, xts->key_len); 1156 if (ret) 1157 goto e_key; 1158 } 1159 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1160 CCP_PASSTHRU_BYTESWAP_256BIT); 1161 if (ret) { 1162 cmd->engine_error = cmd_q->cmd_error; 1163 goto e_key; 1164 } 1165 1166 /* The AES context fits in a single (32-byte) SB entry and 1167 * for XTS is already in little endian format so no byte swapping 1168 * is needed. 1169 */ 1170 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1171 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES, 1172 DMA_BIDIRECTIONAL); 1173 if (ret) 1174 goto e_key; 1175 1176 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); 1177 if (ret) 1178 goto e_ctx; 1179 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1180 CCP_PASSTHRU_BYTESWAP_NOOP); 1181 if (ret) { 1182 cmd->engine_error = cmd_q->cmd_error; 1183 goto e_ctx; 1184 } 1185 1186 /* Prepare the input and output data workareas. For in-place 1187 * operations we need to set the dma direction to BIDIRECTIONAL 1188 * and copy the src workarea to the dst workarea. 1189 */ 1190 if (sg_virt(xts->src) == sg_virt(xts->dst)) 1191 in_place = true; 1192 1193 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, 1194 unit_size, 1195 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1196 if (ret) 1197 goto e_ctx; 1198 1199 if (in_place) { 1200 dst = src; 1201 } else { 1202 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, 1203 unit_size, DMA_FROM_DEVICE); 1204 if (ret) 1205 goto e_src; 1206 } 1207 1208 /* Send data to the CCP AES engine */ 1209 while (src.sg_wa.bytes_left) { 1210 ccp_prepare_data(&src, &dst, &op, unit_size, true); 1211 if (!src.sg_wa.bytes_left) 1212 op.eom = 1; 1213 1214 ret = cmd_q->ccp->vdata->perform->xts_aes(&op); 1215 if (ret) { 1216 cmd->engine_error = cmd_q->cmd_error; 1217 goto e_dst; 1218 } 1219 1220 ccp_process_data(&src, &dst, &op); 1221 } 1222 1223 /* Retrieve the AES context - convert from LE to BE using 1224 * 32-byte (256-bit) byteswapping 1225 */ 1226 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1227 CCP_PASSTHRU_BYTESWAP_256BIT); 1228 if (ret) { 1229 cmd->engine_error = cmd_q->cmd_error; 1230 goto e_dst; 1231 } 1232 1233 /* ...but we only need AES_BLOCK_SIZE bytes */ 1234 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 1235 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); 1236 1237 e_dst: 1238 if (!in_place) 1239 ccp_free_data(&dst, cmd_q); 1240 1241 e_src: 1242 ccp_free_data(&src, cmd_q); 1243 1244 e_ctx: 1245 ccp_dm_free(&ctx); 1246 1247 e_key: 1248 ccp_dm_free(&key); 1249 1250 return ret; 1251 } 1252 1253 static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1254 { 1255 struct ccp_des3_engine *des3 = &cmd->u.des3; 1256 1257 struct ccp_dm_workarea key, ctx; 1258 struct ccp_data src, dst; 1259 struct ccp_op op; 1260 unsigned int dm_offset; 1261 unsigned int len_singlekey; 1262 bool in_place = false; 1263 int ret; 1264 1265 /* Error checks */ 1266 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) 1267 return -EINVAL; 1268 1269 if (!cmd_q->ccp->vdata->perform->des3) 1270 return -EINVAL; 1271 1272 if (des3->key_len != DES3_EDE_KEY_SIZE) 1273 return -EINVAL; 1274 1275 if (((des3->mode == CCP_DES3_MODE_ECB) || 1276 (des3->mode == CCP_DES3_MODE_CBC)) && 1277 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1))) 1278 return -EINVAL; 1279 1280 if (!des3->key || !des3->src || !des3->dst) 1281 return -EINVAL; 1282 1283 if (des3->mode != CCP_DES3_MODE_ECB) { 1284 if (des3->iv_len != DES3_EDE_BLOCK_SIZE) 1285 return -EINVAL; 1286 1287 if (!des3->iv) 1288 return -EINVAL; 1289 } 1290 1291 ret = -EIO; 1292 /* Zero out all the fields of the command desc */ 1293 memset(&op, 0, sizeof(op)); 1294 1295 /* Set up the Function field */ 1296 op.cmd_q = cmd_q; 1297 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1298 op.sb_key = cmd_q->sb_key; 1299 1300 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1; 1301 op.u.des3.type = des3->type; 1302 op.u.des3.mode = des3->mode; 1303 op.u.des3.action = des3->action; 1304 1305 /* 1306 * All supported key sizes fit in a single (32-byte) KSB entry and 1307 * (like AES) must be in little endian format. Use the 256-bit byte 1308 * swap passthru option to convert from big endian to little endian. 1309 */ 1310 ret = ccp_init_dm_workarea(&key, cmd_q, 1311 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES, 1312 DMA_TO_DEVICE); 1313 if (ret) 1314 return ret; 1315 1316 /* 1317 * The contents of the key triplet are in the reverse order of what 1318 * is required by the engine. Copy the 3 pieces individually to put 1319 * them where they belong. 1320 */ 1321 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ 1322 1323 len_singlekey = des3->key_len / 3; 1324 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, 1325 des3->key, 0, len_singlekey); 1326 if (ret) 1327 goto e_key; 1328 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey, 1329 des3->key, len_singlekey, len_singlekey); 1330 if (ret) 1331 goto e_key; 1332 ret = ccp_set_dm_area(&key, dm_offset, 1333 des3->key, 2 * len_singlekey, len_singlekey); 1334 if (ret) 1335 goto e_key; 1336 1337 /* Copy the key to the SB */ 1338 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1339 CCP_PASSTHRU_BYTESWAP_256BIT); 1340 if (ret) { 1341 cmd->engine_error = cmd_q->cmd_error; 1342 goto e_key; 1343 } 1344 1345 /* 1346 * The DES3 context fits in a single (32-byte) KSB entry and 1347 * must be in little endian format. Use the 256-bit byte swap 1348 * passthru option to convert from big endian to little endian. 1349 */ 1350 if (des3->mode != CCP_DES3_MODE_ECB) { 1351 op.sb_ctx = cmd_q->sb_ctx; 1352 1353 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1354 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES, 1355 DMA_BIDIRECTIONAL); 1356 if (ret) 1357 goto e_key; 1358 1359 /* Load the context into the LSB */ 1360 dm_offset = CCP_SB_BYTES - des3->iv_len; 1361 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, 1362 des3->iv_len); 1363 if (ret) 1364 goto e_ctx; 1365 1366 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1367 CCP_PASSTHRU_BYTESWAP_256BIT); 1368 if (ret) { 1369 cmd->engine_error = cmd_q->cmd_error; 1370 goto e_ctx; 1371 } 1372 } 1373 1374 /* 1375 * Prepare the input and output data workareas. For in-place 1376 * operations we need to set the dma direction to BIDIRECTIONAL 1377 * and copy the src workarea to the dst workarea. 1378 */ 1379 if (sg_virt(des3->src) == sg_virt(des3->dst)) 1380 in_place = true; 1381 1382 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len, 1383 DES3_EDE_BLOCK_SIZE, 1384 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1385 if (ret) 1386 goto e_ctx; 1387 1388 if (in_place) 1389 dst = src; 1390 else { 1391 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len, 1392 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE); 1393 if (ret) 1394 goto e_src; 1395 } 1396 1397 /* Send data to the CCP DES3 engine */ 1398 while (src.sg_wa.bytes_left) { 1399 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true); 1400 if (!src.sg_wa.bytes_left) { 1401 op.eom = 1; 1402 1403 /* Since we don't retrieve the context in ECB mode 1404 * we have to wait for the operation to complete 1405 * on the last piece of data 1406 */ 1407 op.soc = 0; 1408 } 1409 1410 ret = cmd_q->ccp->vdata->perform->des3(&op); 1411 if (ret) { 1412 cmd->engine_error = cmd_q->cmd_error; 1413 goto e_dst; 1414 } 1415 1416 ccp_process_data(&src, &dst, &op); 1417 } 1418 1419 if (des3->mode != CCP_DES3_MODE_ECB) { 1420 /* Retrieve the context and make BE */ 1421 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1422 CCP_PASSTHRU_BYTESWAP_256BIT); 1423 if (ret) { 1424 cmd->engine_error = cmd_q->cmd_error; 1425 goto e_dst; 1426 } 1427 1428 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ 1429 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, 1430 DES3_EDE_BLOCK_SIZE); 1431 } 1432 e_dst: 1433 if (!in_place) 1434 ccp_free_data(&dst, cmd_q); 1435 1436 e_src: 1437 ccp_free_data(&src, cmd_q); 1438 1439 e_ctx: 1440 if (des3->mode != CCP_DES3_MODE_ECB) 1441 ccp_dm_free(&ctx); 1442 1443 e_key: 1444 ccp_dm_free(&key); 1445 1446 return ret; 1447 } 1448 1449 static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1450 { 1451 struct ccp_sha_engine *sha = &cmd->u.sha; 1452 struct ccp_dm_workarea ctx; 1453 struct ccp_data src; 1454 struct ccp_op op; 1455 unsigned int ioffset, ooffset; 1456 unsigned int digest_size; 1457 int sb_count; 1458 const void *init; 1459 u64 block_size; 1460 int ctx_size; 1461 int ret; 1462 1463 switch (sha->type) { 1464 case CCP_SHA_TYPE_1: 1465 if (sha->ctx_len < SHA1_DIGEST_SIZE) 1466 return -EINVAL; 1467 block_size = SHA1_BLOCK_SIZE; 1468 break; 1469 case CCP_SHA_TYPE_224: 1470 if (sha->ctx_len < SHA224_DIGEST_SIZE) 1471 return -EINVAL; 1472 block_size = SHA224_BLOCK_SIZE; 1473 break; 1474 case CCP_SHA_TYPE_256: 1475 if (sha->ctx_len < SHA256_DIGEST_SIZE) 1476 return -EINVAL; 1477 block_size = SHA256_BLOCK_SIZE; 1478 break; 1479 case CCP_SHA_TYPE_384: 1480 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) 1481 || sha->ctx_len < SHA384_DIGEST_SIZE) 1482 return -EINVAL; 1483 block_size = SHA384_BLOCK_SIZE; 1484 break; 1485 case CCP_SHA_TYPE_512: 1486 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) 1487 || sha->ctx_len < SHA512_DIGEST_SIZE) 1488 return -EINVAL; 1489 block_size = SHA512_BLOCK_SIZE; 1490 break; 1491 default: 1492 return -EINVAL; 1493 } 1494 1495 if (!sha->ctx) 1496 return -EINVAL; 1497 1498 if (!sha->final && (sha->src_len & (block_size - 1))) 1499 return -EINVAL; 1500 1501 /* The version 3 device can't handle zero-length input */ 1502 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { 1503 1504 if (!sha->src_len) { 1505 unsigned int digest_len; 1506 const u8 *sha_zero; 1507 1508 /* Not final, just return */ 1509 if (!sha->final) 1510 return 0; 1511 1512 /* CCP can't do a zero length sha operation so the 1513 * caller must buffer the data. 1514 */ 1515 if (sha->msg_bits) 1516 return -EINVAL; 1517 1518 /* The CCP cannot perform zero-length sha operations 1519 * so the caller is required to buffer data for the 1520 * final operation. However, a sha operation for a 1521 * message with a total length of zero is valid so 1522 * known values are required to supply the result. 1523 */ 1524 switch (sha->type) { 1525 case CCP_SHA_TYPE_1: 1526 sha_zero = sha1_zero_message_hash; 1527 digest_len = SHA1_DIGEST_SIZE; 1528 break; 1529 case CCP_SHA_TYPE_224: 1530 sha_zero = sha224_zero_message_hash; 1531 digest_len = SHA224_DIGEST_SIZE; 1532 break; 1533 case CCP_SHA_TYPE_256: 1534 sha_zero = sha256_zero_message_hash; 1535 digest_len = SHA256_DIGEST_SIZE; 1536 break; 1537 default: 1538 return -EINVAL; 1539 } 1540 1541 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, 1542 digest_len, 1); 1543 1544 return 0; 1545 } 1546 } 1547 1548 /* Set variables used throughout */ 1549 switch (sha->type) { 1550 case CCP_SHA_TYPE_1: 1551 digest_size = SHA1_DIGEST_SIZE; 1552 init = (void *) ccp_sha1_init; 1553 ctx_size = SHA1_DIGEST_SIZE; 1554 sb_count = 1; 1555 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1556 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; 1557 else 1558 ooffset = ioffset = 0; 1559 break; 1560 case CCP_SHA_TYPE_224: 1561 digest_size = SHA224_DIGEST_SIZE; 1562 init = (void *) ccp_sha224_init; 1563 ctx_size = SHA256_DIGEST_SIZE; 1564 sb_count = 1; 1565 ioffset = 0; 1566 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1567 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; 1568 else 1569 ooffset = 0; 1570 break; 1571 case CCP_SHA_TYPE_256: 1572 digest_size = SHA256_DIGEST_SIZE; 1573 init = (void *) ccp_sha256_init; 1574 ctx_size = SHA256_DIGEST_SIZE; 1575 sb_count = 1; 1576 ooffset = ioffset = 0; 1577 break; 1578 case CCP_SHA_TYPE_384: 1579 digest_size = SHA384_DIGEST_SIZE; 1580 init = (void *) ccp_sha384_init; 1581 ctx_size = SHA512_DIGEST_SIZE; 1582 sb_count = 2; 1583 ioffset = 0; 1584 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE; 1585 break; 1586 case CCP_SHA_TYPE_512: 1587 digest_size = SHA512_DIGEST_SIZE; 1588 init = (void *) ccp_sha512_init; 1589 ctx_size = SHA512_DIGEST_SIZE; 1590 sb_count = 2; 1591 ooffset = ioffset = 0; 1592 break; 1593 default: 1594 ret = -EINVAL; 1595 goto e_data; 1596 } 1597 1598 /* For zero-length plaintext the src pointer is ignored; 1599 * otherwise both parts must be valid 1600 */ 1601 if (sha->src_len && !sha->src) 1602 return -EINVAL; 1603 1604 memset(&op, 0, sizeof(op)); 1605 op.cmd_q = cmd_q; 1606 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1607 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 1608 op.u.sha.type = sha->type; 1609 op.u.sha.msg_bits = sha->msg_bits; 1610 1611 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry; 1612 * SHA384/512 require 2 adjacent SB slots, with the right half in the 1613 * first slot, and the left half in the second. Each portion must then 1614 * be in little endian format: use the 256-bit byte swap option. 1615 */ 1616 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, 1617 DMA_BIDIRECTIONAL); 1618 if (ret) 1619 return ret; 1620 if (sha->first) { 1621 switch (sha->type) { 1622 case CCP_SHA_TYPE_1: 1623 case CCP_SHA_TYPE_224: 1624 case CCP_SHA_TYPE_256: 1625 memcpy(ctx.address + ioffset, init, ctx_size); 1626 break; 1627 case CCP_SHA_TYPE_384: 1628 case CCP_SHA_TYPE_512: 1629 memcpy(ctx.address + ctx_size / 2, init, 1630 ctx_size / 2); 1631 memcpy(ctx.address, init + ctx_size / 2, 1632 ctx_size / 2); 1633 break; 1634 default: 1635 ret = -EINVAL; 1636 goto e_ctx; 1637 } 1638 } else { 1639 /* Restore the context */ 1640 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, 1641 sb_count * CCP_SB_BYTES); 1642 if (ret) 1643 goto e_ctx; 1644 } 1645 1646 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1647 CCP_PASSTHRU_BYTESWAP_256BIT); 1648 if (ret) { 1649 cmd->engine_error = cmd_q->cmd_error; 1650 goto e_ctx; 1651 } 1652 1653 if (sha->src) { 1654 /* Send data to the CCP SHA engine; block_size is set above */ 1655 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, 1656 block_size, DMA_TO_DEVICE); 1657 if (ret) 1658 goto e_ctx; 1659 1660 while (src.sg_wa.bytes_left) { 1661 ccp_prepare_data(&src, NULL, &op, block_size, false); 1662 if (sha->final && !src.sg_wa.bytes_left) 1663 op.eom = 1; 1664 1665 ret = cmd_q->ccp->vdata->perform->sha(&op); 1666 if (ret) { 1667 cmd->engine_error = cmd_q->cmd_error; 1668 goto e_data; 1669 } 1670 1671 ccp_process_data(&src, NULL, &op); 1672 } 1673 } else { 1674 op.eom = 1; 1675 ret = cmd_q->ccp->vdata->perform->sha(&op); 1676 if (ret) { 1677 cmd->engine_error = cmd_q->cmd_error; 1678 goto e_data; 1679 } 1680 } 1681 1682 /* Retrieve the SHA context - convert from LE to BE using 1683 * 32-byte (256-bit) byteswapping to BE 1684 */ 1685 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1686 CCP_PASSTHRU_BYTESWAP_256BIT); 1687 if (ret) { 1688 cmd->engine_error = cmd_q->cmd_error; 1689 goto e_data; 1690 } 1691 1692 if (sha->final) { 1693 /* Finishing up, so get the digest */ 1694 switch (sha->type) { 1695 case CCP_SHA_TYPE_1: 1696 case CCP_SHA_TYPE_224: 1697 case CCP_SHA_TYPE_256: 1698 ccp_get_dm_area(&ctx, ooffset, 1699 sha->ctx, 0, 1700 digest_size); 1701 break; 1702 case CCP_SHA_TYPE_384: 1703 case CCP_SHA_TYPE_512: 1704 ccp_get_dm_area(&ctx, 0, 1705 sha->ctx, LSB_ITEM_SIZE - ooffset, 1706 LSB_ITEM_SIZE); 1707 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset, 1708 sha->ctx, 0, 1709 LSB_ITEM_SIZE - ooffset); 1710 break; 1711 default: 1712 ret = -EINVAL; 1713 goto e_ctx; 1714 } 1715 } else { 1716 /* Stash the context */ 1717 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, 1718 sb_count * CCP_SB_BYTES); 1719 } 1720 1721 if (sha->final && sha->opad) { 1722 /* HMAC operation, recursively perform final SHA */ 1723 struct ccp_cmd hmac_cmd; 1724 struct scatterlist sg; 1725 u8 *hmac_buf; 1726 1727 if (sha->opad_len != block_size) { 1728 ret = -EINVAL; 1729 goto e_data; 1730 } 1731 1732 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); 1733 if (!hmac_buf) { 1734 ret = -ENOMEM; 1735 goto e_data; 1736 } 1737 sg_init_one(&sg, hmac_buf, block_size + digest_size); 1738 1739 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); 1740 switch (sha->type) { 1741 case CCP_SHA_TYPE_1: 1742 case CCP_SHA_TYPE_224: 1743 case CCP_SHA_TYPE_256: 1744 memcpy(hmac_buf + block_size, 1745 ctx.address + ooffset, 1746 digest_size); 1747 break; 1748 case CCP_SHA_TYPE_384: 1749 case CCP_SHA_TYPE_512: 1750 memcpy(hmac_buf + block_size, 1751 ctx.address + LSB_ITEM_SIZE + ooffset, 1752 LSB_ITEM_SIZE); 1753 memcpy(hmac_buf + block_size + 1754 (LSB_ITEM_SIZE - ooffset), 1755 ctx.address, 1756 LSB_ITEM_SIZE); 1757 break; 1758 default: 1759 ret = -EINVAL; 1760 goto e_ctx; 1761 } 1762 1763 memset(&hmac_cmd, 0, sizeof(hmac_cmd)); 1764 hmac_cmd.engine = CCP_ENGINE_SHA; 1765 hmac_cmd.u.sha.type = sha->type; 1766 hmac_cmd.u.sha.ctx = sha->ctx; 1767 hmac_cmd.u.sha.ctx_len = sha->ctx_len; 1768 hmac_cmd.u.sha.src = &sg; 1769 hmac_cmd.u.sha.src_len = block_size + digest_size; 1770 hmac_cmd.u.sha.opad = NULL; 1771 hmac_cmd.u.sha.opad_len = 0; 1772 hmac_cmd.u.sha.first = 1; 1773 hmac_cmd.u.sha.final = 1; 1774 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; 1775 1776 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); 1777 if (ret) 1778 cmd->engine_error = hmac_cmd.engine_error; 1779 1780 kfree(hmac_buf); 1781 } 1782 1783 e_data: 1784 if (sha->src) 1785 ccp_free_data(&src, cmd_q); 1786 1787 e_ctx: 1788 ccp_dm_free(&ctx); 1789 1790 return ret; 1791 } 1792 1793 static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1794 { 1795 struct ccp_rsa_engine *rsa = &cmd->u.rsa; 1796 struct ccp_dm_workarea exp, src, dst; 1797 struct ccp_op op; 1798 unsigned int sb_count, i_len, o_len; 1799 int ret; 1800 1801 /* Check against the maximum allowable size, in bits */ 1802 if (rsa->key_size > cmd_q->ccp->vdata->rsamax) 1803 return -EINVAL; 1804 1805 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) 1806 return -EINVAL; 1807 1808 memset(&op, 0, sizeof(op)); 1809 op.cmd_q = cmd_q; 1810 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1811 1812 /* The RSA modulus must precede the message being acted upon, so 1813 * it must be copied to a DMA area where the message and the 1814 * modulus can be concatenated. Therefore the input buffer 1815 * length required is twice the output buffer length (which 1816 * must be a multiple of 256-bits). Compute o_len, i_len in bytes. 1817 * Buffer sizes must be a multiple of 32 bytes; rounding up may be 1818 * required. 1819 */ 1820 o_len = 32 * ((rsa->key_size + 255) / 256); 1821 i_len = o_len * 2; 1822 1823 sb_count = 0; 1824 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { 1825 /* sb_count is the number of storage block slots required 1826 * for the modulus. 1827 */ 1828 sb_count = o_len / CCP_SB_BYTES; 1829 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, 1830 sb_count); 1831 if (!op.sb_key) 1832 return -EIO; 1833 } else { 1834 /* A version 5 device allows a modulus size that will not fit 1835 * in the LSB, so the command will transfer it from memory. 1836 * Set the sb key to the default, even though it's not used. 1837 */ 1838 op.sb_key = cmd_q->sb_key; 1839 } 1840 1841 /* The RSA exponent must be in little endian format. Reverse its 1842 * byte order. 1843 */ 1844 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); 1845 if (ret) 1846 goto e_sb; 1847 1848 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); 1849 if (ret) 1850 goto e_exp; 1851 1852 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { 1853 /* Copy the exponent to the local storage block, using 1854 * as many 32-byte blocks as were allocated above. It's 1855 * already little endian, so no further change is required. 1856 */ 1857 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, 1858 CCP_PASSTHRU_BYTESWAP_NOOP); 1859 if (ret) { 1860 cmd->engine_error = cmd_q->cmd_error; 1861 goto e_exp; 1862 } 1863 } else { 1864 /* The exponent can be retrieved from memory via DMA. */ 1865 op.exp.u.dma.address = exp.dma.address; 1866 op.exp.u.dma.offset = 0; 1867 } 1868 1869 /* Concatenate the modulus and the message. Both the modulus and 1870 * the operands must be in little endian format. Since the input 1871 * is in big endian format it must be converted. 1872 */ 1873 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); 1874 if (ret) 1875 goto e_exp; 1876 1877 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); 1878 if (ret) 1879 goto e_src; 1880 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); 1881 if (ret) 1882 goto e_src; 1883 1884 /* Prepare the output area for the operation */ 1885 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); 1886 if (ret) 1887 goto e_src; 1888 1889 op.soc = 1; 1890 op.src.u.dma.address = src.dma.address; 1891 op.src.u.dma.offset = 0; 1892 op.src.u.dma.length = i_len; 1893 op.dst.u.dma.address = dst.dma.address; 1894 op.dst.u.dma.offset = 0; 1895 op.dst.u.dma.length = o_len; 1896 1897 op.u.rsa.mod_size = rsa->key_size; 1898 op.u.rsa.input_len = i_len; 1899 1900 ret = cmd_q->ccp->vdata->perform->rsa(&op); 1901 if (ret) { 1902 cmd->engine_error = cmd_q->cmd_error; 1903 goto e_dst; 1904 } 1905 1906 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); 1907 1908 e_dst: 1909 ccp_dm_free(&dst); 1910 1911 e_src: 1912 ccp_dm_free(&src); 1913 1914 e_exp: 1915 ccp_dm_free(&exp); 1916 1917 e_sb: 1918 if (sb_count) 1919 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); 1920 1921 return ret; 1922 } 1923 1924 static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, 1925 struct ccp_cmd *cmd) 1926 { 1927 struct ccp_passthru_engine *pt = &cmd->u.passthru; 1928 struct ccp_dm_workarea mask; 1929 struct ccp_data src, dst; 1930 struct ccp_op op; 1931 bool in_place = false; 1932 unsigned int i; 1933 int ret = 0; 1934 1935 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 1936 return -EINVAL; 1937 1938 if (!pt->src || !pt->dst) 1939 return -EINVAL; 1940 1941 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1942 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 1943 return -EINVAL; 1944 if (!pt->mask) 1945 return -EINVAL; 1946 } 1947 1948 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 1949 1950 memset(&op, 0, sizeof(op)); 1951 op.cmd_q = cmd_q; 1952 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1953 1954 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1955 /* Load the mask */ 1956 op.sb_key = cmd_q->sb_key; 1957 1958 ret = ccp_init_dm_workarea(&mask, cmd_q, 1959 CCP_PASSTHRU_SB_COUNT * 1960 CCP_SB_BYTES, 1961 DMA_TO_DEVICE); 1962 if (ret) 1963 return ret; 1964 1965 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); 1966 if (ret) 1967 goto e_mask; 1968 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 1969 CCP_PASSTHRU_BYTESWAP_NOOP); 1970 if (ret) { 1971 cmd->engine_error = cmd_q->cmd_error; 1972 goto e_mask; 1973 } 1974 } 1975 1976 /* Prepare the input and output data workareas. For in-place 1977 * operations we need to set the dma direction to BIDIRECTIONAL 1978 * and copy the src workarea to the dst workarea. 1979 */ 1980 if (sg_virt(pt->src) == sg_virt(pt->dst)) 1981 in_place = true; 1982 1983 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, 1984 CCP_PASSTHRU_MASKSIZE, 1985 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1986 if (ret) 1987 goto e_mask; 1988 1989 if (in_place) { 1990 dst = src; 1991 } else { 1992 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, 1993 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); 1994 if (ret) 1995 goto e_src; 1996 } 1997 1998 /* Send data to the CCP Passthru engine 1999 * Because the CCP engine works on a single source and destination 2000 * dma address at a time, each entry in the source scatterlist 2001 * (after the dma_map_sg call) must be less than or equal to the 2002 * (remaining) length in the destination scatterlist entry and the 2003 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE 2004 */ 2005 dst.sg_wa.sg_used = 0; 2006 for (i = 1; i <= src.sg_wa.dma_count; i++) { 2007 if (!dst.sg_wa.sg || 2008 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { 2009 ret = -EINVAL; 2010 goto e_dst; 2011 } 2012 2013 if (i == src.sg_wa.dma_count) { 2014 op.eom = 1; 2015 op.soc = 1; 2016 } 2017 2018 op.src.type = CCP_MEMTYPE_SYSTEM; 2019 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); 2020 op.src.u.dma.offset = 0; 2021 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); 2022 2023 op.dst.type = CCP_MEMTYPE_SYSTEM; 2024 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); 2025 op.dst.u.dma.offset = dst.sg_wa.sg_used; 2026 op.dst.u.dma.length = op.src.u.dma.length; 2027 2028 ret = cmd_q->ccp->vdata->perform->passthru(&op); 2029 if (ret) { 2030 cmd->engine_error = cmd_q->cmd_error; 2031 goto e_dst; 2032 } 2033 2034 dst.sg_wa.sg_used += src.sg_wa.sg->length; 2035 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { 2036 dst.sg_wa.sg = sg_next(dst.sg_wa.sg); 2037 dst.sg_wa.sg_used = 0; 2038 } 2039 src.sg_wa.sg = sg_next(src.sg_wa.sg); 2040 } 2041 2042 e_dst: 2043 if (!in_place) 2044 ccp_free_data(&dst, cmd_q); 2045 2046 e_src: 2047 ccp_free_data(&src, cmd_q); 2048 2049 e_mask: 2050 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) 2051 ccp_dm_free(&mask); 2052 2053 return ret; 2054 } 2055 2056 static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, 2057 struct ccp_cmd *cmd) 2058 { 2059 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; 2060 struct ccp_dm_workarea mask; 2061 struct ccp_op op; 2062 int ret; 2063 2064 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 2065 return -EINVAL; 2066 2067 if (!pt->src_dma || !pt->dst_dma) 2068 return -EINVAL; 2069 2070 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 2071 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 2072 return -EINVAL; 2073 if (!pt->mask) 2074 return -EINVAL; 2075 } 2076 2077 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 2078 2079 memset(&op, 0, sizeof(op)); 2080 op.cmd_q = cmd_q; 2081 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2082 2083 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 2084 /* Load the mask */ 2085 op.sb_key = cmd_q->sb_key; 2086 2087 mask.length = pt->mask_len; 2088 mask.dma.address = pt->mask; 2089 mask.dma.length = pt->mask_len; 2090 2091 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 2092 CCP_PASSTHRU_BYTESWAP_NOOP); 2093 if (ret) { 2094 cmd->engine_error = cmd_q->cmd_error; 2095 return ret; 2096 } 2097 } 2098 2099 /* Send data to the CCP Passthru engine */ 2100 op.eom = 1; 2101 op.soc = 1; 2102 2103 op.src.type = CCP_MEMTYPE_SYSTEM; 2104 op.src.u.dma.address = pt->src_dma; 2105 op.src.u.dma.offset = 0; 2106 op.src.u.dma.length = pt->src_len; 2107 2108 op.dst.type = CCP_MEMTYPE_SYSTEM; 2109 op.dst.u.dma.address = pt->dst_dma; 2110 op.dst.u.dma.offset = 0; 2111 op.dst.u.dma.length = pt->src_len; 2112 2113 ret = cmd_q->ccp->vdata->perform->passthru(&op); 2114 if (ret) 2115 cmd->engine_error = cmd_q->cmd_error; 2116 2117 return ret; 2118 } 2119 2120 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2121 { 2122 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2123 struct ccp_dm_workarea src, dst; 2124 struct ccp_op op; 2125 int ret; 2126 u8 *save; 2127 2128 if (!ecc->u.mm.operand_1 || 2129 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) 2130 return -EINVAL; 2131 2132 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) 2133 if (!ecc->u.mm.operand_2 || 2134 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) 2135 return -EINVAL; 2136 2137 if (!ecc->u.mm.result || 2138 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) 2139 return -EINVAL; 2140 2141 memset(&op, 0, sizeof(op)); 2142 op.cmd_q = cmd_q; 2143 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2144 2145 /* Concatenate the modulus and the operands. Both the modulus and 2146 * the operands must be in little endian format. Since the input 2147 * is in big endian format it must be converted and placed in a 2148 * fixed length buffer. 2149 */ 2150 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 2151 DMA_TO_DEVICE); 2152 if (ret) 2153 return ret; 2154 2155 /* Save the workarea address since it is updated in order to perform 2156 * the concatenation 2157 */ 2158 save = src.address; 2159 2160 /* Copy the ECC modulus */ 2161 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); 2162 if (ret) 2163 goto e_src; 2164 src.address += CCP_ECC_OPERAND_SIZE; 2165 2166 /* Copy the first operand */ 2167 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0, 2168 ecc->u.mm.operand_1_len); 2169 if (ret) 2170 goto e_src; 2171 src.address += CCP_ECC_OPERAND_SIZE; 2172 2173 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { 2174 /* Copy the second operand */ 2175 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0, 2176 ecc->u.mm.operand_2_len); 2177 if (ret) 2178 goto e_src; 2179 src.address += CCP_ECC_OPERAND_SIZE; 2180 } 2181 2182 /* Restore the workarea address */ 2183 src.address = save; 2184 2185 /* Prepare the output area for the operation */ 2186 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, 2187 DMA_FROM_DEVICE); 2188 if (ret) 2189 goto e_src; 2190 2191 op.soc = 1; 2192 op.src.u.dma.address = src.dma.address; 2193 op.src.u.dma.offset = 0; 2194 op.src.u.dma.length = src.length; 2195 op.dst.u.dma.address = dst.dma.address; 2196 op.dst.u.dma.offset = 0; 2197 op.dst.u.dma.length = dst.length; 2198 2199 op.u.ecc.function = cmd->u.ecc.function; 2200 2201 ret = cmd_q->ccp->vdata->perform->ecc(&op); 2202 if (ret) { 2203 cmd->engine_error = cmd_q->cmd_error; 2204 goto e_dst; 2205 } 2206 2207 ecc->ecc_result = le16_to_cpup( 2208 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); 2209 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { 2210 ret = -EIO; 2211 goto e_dst; 2212 } 2213 2214 /* Save the ECC result */ 2215 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0, 2216 CCP_ECC_MODULUS_BYTES); 2217 2218 e_dst: 2219 ccp_dm_free(&dst); 2220 2221 e_src: 2222 ccp_dm_free(&src); 2223 2224 return ret; 2225 } 2226 2227 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2228 { 2229 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2230 struct ccp_dm_workarea src, dst; 2231 struct ccp_op op; 2232 int ret; 2233 u8 *save; 2234 2235 if (!ecc->u.pm.point_1.x || 2236 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || 2237 !ecc->u.pm.point_1.y || 2238 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) 2239 return -EINVAL; 2240 2241 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2242 if (!ecc->u.pm.point_2.x || 2243 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || 2244 !ecc->u.pm.point_2.y || 2245 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) 2246 return -EINVAL; 2247 } else { 2248 if (!ecc->u.pm.domain_a || 2249 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) 2250 return -EINVAL; 2251 2252 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) 2253 if (!ecc->u.pm.scalar || 2254 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) 2255 return -EINVAL; 2256 } 2257 2258 if (!ecc->u.pm.result.x || 2259 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || 2260 !ecc->u.pm.result.y || 2261 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) 2262 return -EINVAL; 2263 2264 memset(&op, 0, sizeof(op)); 2265 op.cmd_q = cmd_q; 2266 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2267 2268 /* Concatenate the modulus and the operands. Both the modulus and 2269 * the operands must be in little endian format. Since the input 2270 * is in big endian format it must be converted and placed in a 2271 * fixed length buffer. 2272 */ 2273 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 2274 DMA_TO_DEVICE); 2275 if (ret) 2276 return ret; 2277 2278 /* Save the workarea address since it is updated in order to perform 2279 * the concatenation 2280 */ 2281 save = src.address; 2282 2283 /* Copy the ECC modulus */ 2284 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); 2285 if (ret) 2286 goto e_src; 2287 src.address += CCP_ECC_OPERAND_SIZE; 2288 2289 /* Copy the first point X and Y coordinate */ 2290 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, 2291 ecc->u.pm.point_1.x_len); 2292 if (ret) 2293 goto e_src; 2294 src.address += CCP_ECC_OPERAND_SIZE; 2295 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, 2296 ecc->u.pm.point_1.y_len); 2297 if (ret) 2298 goto e_src; 2299 src.address += CCP_ECC_OPERAND_SIZE; 2300 2301 /* Set the first point Z coordinate to 1 */ 2302 *src.address = 0x01; 2303 src.address += CCP_ECC_OPERAND_SIZE; 2304 2305 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2306 /* Copy the second point X and Y coordinate */ 2307 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, 2308 ecc->u.pm.point_2.x_len); 2309 if (ret) 2310 goto e_src; 2311 src.address += CCP_ECC_OPERAND_SIZE; 2312 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, 2313 ecc->u.pm.point_2.y_len); 2314 if (ret) 2315 goto e_src; 2316 src.address += CCP_ECC_OPERAND_SIZE; 2317 2318 /* Set the second point Z coordinate to 1 */ 2319 *src.address = 0x01; 2320 src.address += CCP_ECC_OPERAND_SIZE; 2321 } else { 2322 /* Copy the Domain "a" parameter */ 2323 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, 2324 ecc->u.pm.domain_a_len); 2325 if (ret) 2326 goto e_src; 2327 src.address += CCP_ECC_OPERAND_SIZE; 2328 2329 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { 2330 /* Copy the scalar value */ 2331 ret = ccp_reverse_set_dm_area(&src, 0, 2332 ecc->u.pm.scalar, 0, 2333 ecc->u.pm.scalar_len); 2334 if (ret) 2335 goto e_src; 2336 src.address += CCP_ECC_OPERAND_SIZE; 2337 } 2338 } 2339 2340 /* Restore the workarea address */ 2341 src.address = save; 2342 2343 /* Prepare the output area for the operation */ 2344 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, 2345 DMA_FROM_DEVICE); 2346 if (ret) 2347 goto e_src; 2348 2349 op.soc = 1; 2350 op.src.u.dma.address = src.dma.address; 2351 op.src.u.dma.offset = 0; 2352 op.src.u.dma.length = src.length; 2353 op.dst.u.dma.address = dst.dma.address; 2354 op.dst.u.dma.offset = 0; 2355 op.dst.u.dma.length = dst.length; 2356 2357 op.u.ecc.function = cmd->u.ecc.function; 2358 2359 ret = cmd_q->ccp->vdata->perform->ecc(&op); 2360 if (ret) { 2361 cmd->engine_error = cmd_q->cmd_error; 2362 goto e_dst; 2363 } 2364 2365 ecc->ecc_result = le16_to_cpup( 2366 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); 2367 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { 2368 ret = -EIO; 2369 goto e_dst; 2370 } 2371 2372 /* Save the workarea address since it is updated as we walk through 2373 * to copy the point math result 2374 */ 2375 save = dst.address; 2376 2377 /* Save the ECC result X and Y coordinates */ 2378 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, 2379 CCP_ECC_MODULUS_BYTES); 2380 dst.address += CCP_ECC_OUTPUT_SIZE; 2381 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, 2382 CCP_ECC_MODULUS_BYTES); 2383 dst.address += CCP_ECC_OUTPUT_SIZE; 2384 2385 /* Restore the workarea address */ 2386 dst.address = save; 2387 2388 e_dst: 2389 ccp_dm_free(&dst); 2390 2391 e_src: 2392 ccp_dm_free(&src); 2393 2394 return ret; 2395 } 2396 2397 static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2398 { 2399 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2400 2401 ecc->ecc_result = 0; 2402 2403 if (!ecc->mod || 2404 (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) 2405 return -EINVAL; 2406 2407 switch (ecc->function) { 2408 case CCP_ECC_FUNCTION_MMUL_384BIT: 2409 case CCP_ECC_FUNCTION_MADD_384BIT: 2410 case CCP_ECC_FUNCTION_MINV_384BIT: 2411 return ccp_run_ecc_mm_cmd(cmd_q, cmd); 2412 2413 case CCP_ECC_FUNCTION_PADD_384BIT: 2414 case CCP_ECC_FUNCTION_PMUL_384BIT: 2415 case CCP_ECC_FUNCTION_PDBL_384BIT: 2416 return ccp_run_ecc_pm_cmd(cmd_q, cmd); 2417 2418 default: 2419 return -EINVAL; 2420 } 2421 } 2422 2423 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2424 { 2425 int ret; 2426 2427 cmd->engine_error = 0; 2428 cmd_q->cmd_error = 0; 2429 cmd_q->int_rcvd = 0; 2430 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); 2431 2432 switch (cmd->engine) { 2433 case CCP_ENGINE_AES: 2434 ret = ccp_run_aes_cmd(cmd_q, cmd); 2435 break; 2436 case CCP_ENGINE_XTS_AES_128: 2437 ret = ccp_run_xts_aes_cmd(cmd_q, cmd); 2438 break; 2439 case CCP_ENGINE_DES3: 2440 ret = ccp_run_des3_cmd(cmd_q, cmd); 2441 break; 2442 case CCP_ENGINE_SHA: 2443 ret = ccp_run_sha_cmd(cmd_q, cmd); 2444 break; 2445 case CCP_ENGINE_RSA: 2446 ret = ccp_run_rsa_cmd(cmd_q, cmd); 2447 break; 2448 case CCP_ENGINE_PASSTHRU: 2449 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) 2450 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd); 2451 else 2452 ret = ccp_run_passthru_cmd(cmd_q, cmd); 2453 break; 2454 case CCP_ENGINE_ECC: 2455 ret = ccp_run_ecc_cmd(cmd_q, cmd); 2456 break; 2457 default: 2458 ret = -EINVAL; 2459 } 2460 2461 return ret; 2462 } 2463