ccp-ops.c (bb4e89b34d1bf46156b7e880a0f34205fb7ce2a5) | ccp-ops.c (4b394a232df78414442778b02ca4a388d947d059) |
---|---|
1/* 2 * AMD Cryptographic Coprocessor (CCP) driver 3 * 4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 5 * 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * Author: Gary R Hook <gary.hook@amd.com> 8 * --- 7 unchanged lines hidden (view full) --- 16#include <linux/pci.h> 17#include <linux/interrupt.h> 18#include <crypto/scatterwalk.h> 19#include <linux/ccp.h> 20 21#include "ccp-dev.h" 22 23/* SHA initial context values */ | 1/* 2 * AMD Cryptographic Coprocessor (CCP) driver 3 * 4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 5 * 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * Author: Gary R Hook <gary.hook@amd.com> 8 * --- 7 unchanged lines hidden (view full) --- 16#include <linux/pci.h> 17#include <linux/interrupt.h> 18#include <crypto/scatterwalk.h> 19#include <linux/ccp.h> 20 21#include "ccp-dev.h" 22 23/* SHA initial context values */ |
24static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 24static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { |
25 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 26 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), | 25 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 26 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), |
27 cpu_to_be32(SHA1_H4), 0, 0, 0, | 27 cpu_to_be32(SHA1_H4), |
28}; 29 | 28}; 29 |
30static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 30static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { |
31 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 32 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 33 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 34 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 35}; 36 | 31 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 32 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 33 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 34 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 35}; 36 |
37static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 37static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { |
38 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 39 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 40 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 41 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 42}; 43 | 38 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 39 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 40 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 41 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 42}; 43 |
44#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ 45 ccp_gen_jobid(ccp) : 0) 46 |
|
44static u32 ccp_gen_jobid(struct ccp_device *ccp) 45{ 46 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; 47} 48 49static void ccp_sg_free(struct ccp_sg_workarea *wa) 50{ 51 if (wa->dma_count) --- 430 unchanged lines hidden (view full) --- 482 } 483 484 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 485 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 486 487 ret = -EIO; 488 memset(&op, 0, sizeof(op)); 489 op.cmd_q = cmd_q; | 47static u32 ccp_gen_jobid(struct ccp_device *ccp) 48{ 49 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; 50} 51 52static void ccp_sg_free(struct ccp_sg_workarea *wa) 53{ 54 if (wa->dma_count) --- 430 unchanged lines hidden (view full) --- 485 } 486 487 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 488 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 489 490 ret = -EIO; 491 memset(&op, 0, sizeof(op)); 492 op.cmd_q = cmd_q; |
490 op.jobid = ccp_gen_jobid(cmd_q->ccp); | 493 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
491 op.sb_key = cmd_q->sb_key; 492 op.sb_ctx = cmd_q->sb_ctx; 493 op.init = 1; 494 op.u.aes.type = aes->type; 495 op.u.aes.mode = aes->mode; 496 op.u.aes.action = aes->action; 497 498 /* All supported key sizes fit in a single (32-byte) SB entry --- 136 unchanged lines hidden (view full) --- 635 } 636 637 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 638 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 639 640 ret = -EIO; 641 memset(&op, 0, sizeof(op)); 642 op.cmd_q = cmd_q; | 494 op.sb_key = cmd_q->sb_key; 495 op.sb_ctx = cmd_q->sb_ctx; 496 op.init = 1; 497 op.u.aes.type = aes->type; 498 op.u.aes.mode = aes->mode; 499 op.u.aes.action = aes->action; 500 501 /* All supported key sizes fit in a single (32-byte) SB entry --- 136 unchanged lines hidden (view full) --- 638 } 639 640 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 641 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 642 643 ret = -EIO; 644 memset(&op, 0, sizeof(op)); 645 op.cmd_q = cmd_q; |
643 op.jobid = ccp_gen_jobid(cmd_q->ccp); | 646 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
644 op.sb_key = cmd_q->sb_key; 645 op.sb_ctx = cmd_q->sb_ctx; 646 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; 647 op.u.aes.type = aes->type; 648 op.u.aes.mode = aes->mode; 649 op.u.aes.action = aes->action; 650 651 /* All supported key sizes fit in a single (32-byte) SB entry --- 22 unchanged lines hidden (view full) --- 674 */ 675 ret = ccp_init_dm_workarea(&ctx, cmd_q, 676 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 677 DMA_BIDIRECTIONAL); 678 if (ret) 679 goto e_key; 680 681 if (aes->mode != CCP_AES_MODE_ECB) { | 647 op.sb_key = cmd_q->sb_key; 648 op.sb_ctx = cmd_q->sb_ctx; 649 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; 650 op.u.aes.type = aes->type; 651 op.u.aes.mode = aes->mode; 652 op.u.aes.action = aes->action; 653 654 /* All supported key sizes fit in a single (32-byte) SB entry --- 22 unchanged lines hidden (view full) --- 677 */ 678 ret = ccp_init_dm_workarea(&ctx, cmd_q, 679 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 680 DMA_BIDIRECTIONAL); 681 if (ret) 682 goto e_key; 683 684 if (aes->mode != CCP_AES_MODE_ECB) { |
682 /* Load the AES context - conver to LE */ | 685 /* Load the AES context - convert to LE */ |
683 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 684 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 685 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 686 CCP_PASSTHRU_BYTESWAP_256BIT); 687 if (ret) { 688 cmd->engine_error = cmd_q->cmd_error; 689 goto e_ctx; 690 } --- 121 unchanged lines hidden (view full) --- 812 return -EINVAL; 813 814 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); 815 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); 816 817 ret = -EIO; 818 memset(&op, 0, sizeof(op)); 819 op.cmd_q = cmd_q; | 686 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 687 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 688 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 689 CCP_PASSTHRU_BYTESWAP_256BIT); 690 if (ret) { 691 cmd->engine_error = cmd_q->cmd_error; 692 goto e_ctx; 693 } --- 121 unchanged lines hidden (view full) --- 815 return -EINVAL; 816 817 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); 818 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); 819 820 ret = -EIO; 821 memset(&op, 0, sizeof(op)); 822 op.cmd_q = cmd_q; |
820 op.jobid = ccp_gen_jobid(cmd_q->ccp); | 823 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
821 op.sb_key = cmd_q->sb_key; 822 op.sb_ctx = cmd_q->sb_ctx; 823 op.init = 1; 824 op.u.xts.action = xts->action; 825 op.u.xts.unit_size = xts->unit_size; 826 827 /* All supported key sizes fit in a single (32-byte) SB entry 828 * and must be in little endian format. Use the 256-bit byte --- 102 unchanged lines hidden (view full) --- 931} 932 933static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 934{ 935 struct ccp_sha_engine *sha = &cmd->u.sha; 936 struct ccp_dm_workarea ctx; 937 struct ccp_data src; 938 struct ccp_op op; | 824 op.sb_key = cmd_q->sb_key; 825 op.sb_ctx = cmd_q->sb_ctx; 826 op.init = 1; 827 op.u.xts.action = xts->action; 828 op.u.xts.unit_size = xts->unit_size; 829 830 /* All supported key sizes fit in a single (32-byte) SB entry 831 * and must be in little endian format. Use the 256-bit byte --- 102 unchanged lines hidden (view full) --- 934} 935 936static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 937{ 938 struct ccp_sha_engine *sha = &cmd->u.sha; 939 struct ccp_dm_workarea ctx; 940 struct ccp_data src; 941 struct ccp_op op; |
942 unsigned int ioffset, ooffset; 943 unsigned int digest_size; 944 int sb_count; 945 const void *init; 946 u64 block_size; 947 int ctx_size; |
|
939 int ret; 940 | 948 int ret; 949 |
941 if (sha->ctx_len != CCP_SHA_CTXSIZE) | 950 switch (sha->type) { 951 case CCP_SHA_TYPE_1: 952 if (sha->ctx_len < SHA1_DIGEST_SIZE) 953 return -EINVAL; 954 block_size = SHA1_BLOCK_SIZE; 955 break; 956 case CCP_SHA_TYPE_224: 957 if (sha->ctx_len < SHA224_DIGEST_SIZE) 958 return -EINVAL; 959 block_size = SHA224_BLOCK_SIZE; 960 break; 961 case CCP_SHA_TYPE_256: 962 if (sha->ctx_len < SHA256_DIGEST_SIZE) 963 return -EINVAL; 964 block_size = SHA256_BLOCK_SIZE; 965 break; 966 default: |
942 return -EINVAL; | 967 return -EINVAL; |
968 } |
|
943 944 if (!sha->ctx) 945 return -EINVAL; 946 | 969 970 if (!sha->ctx) 971 return -EINVAL; 972 |
947 if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1))) | 973 if (!sha->final && (sha->src_len & (block_size - 1))) |
948 return -EINVAL; 949 | 974 return -EINVAL; 975 |
950 if (!sha->src_len) { 951 const u8 *sha_zero; | 976 /* The version 3 device can't handle zero-length input */ 977 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { |
952 | 978 |
953 /* Not final, just return */ 954 if (!sha->final) 955 return 0; | 979 if (!sha->src_len) { 980 unsigned int digest_len; 981 const u8 *sha_zero; |
956 | 982 |
957 /* CCP can't do a zero length sha operation so the caller 958 * must buffer the data. 959 */ 960 if (sha->msg_bits) 961 return -EINVAL; | 983 /* Not final, just return */ 984 if (!sha->final) 985 return 0; |
962 | 986 |
963 /* The CCP cannot perform zero-length sha operations so the 964 * caller is required to buffer data for the final operation. 965 * However, a sha operation for a message with a total length 966 * of zero is valid so known values are required to supply 967 * the result. 968 */ 969 switch (sha->type) { 970 case CCP_SHA_TYPE_1: 971 sha_zero = sha1_zero_message_hash; 972 break; 973 case CCP_SHA_TYPE_224: 974 sha_zero = sha224_zero_message_hash; 975 break; 976 case CCP_SHA_TYPE_256: 977 sha_zero = sha256_zero_message_hash; 978 break; 979 default: 980 return -EINVAL; 981 } | 987 /* CCP can't do a zero length sha operation so the 988 * caller must buffer the data. 989 */ 990 if (sha->msg_bits) 991 return -EINVAL; |
982 | 992 |
983 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, 984 sha->ctx_len, 1); | 993 /* The CCP cannot perform zero-length sha operations 994 * so the caller is required to buffer data for the 995 * final operation. However, a sha operation for a 996 * message with a total length of zero is valid so 997 * known values are required to supply the result. 998 */ 999 switch (sha->type) { 1000 case CCP_SHA_TYPE_1: 1001 sha_zero = sha1_zero_message_hash; 1002 digest_len = SHA1_DIGEST_SIZE; 1003 break; 1004 case CCP_SHA_TYPE_224: 1005 sha_zero = sha224_zero_message_hash; 1006 digest_len = SHA224_DIGEST_SIZE; 1007 break; 1008 case CCP_SHA_TYPE_256: 1009 sha_zero = sha256_zero_message_hash; 1010 digest_len = SHA256_DIGEST_SIZE; 1011 break; 1012 default: 1013 return -EINVAL; 1014 } |
985 | 1015 |
986 return 0; | 1016 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, 1017 digest_len, 1); 1018 1019 return 0; 1020 } |
987 } 988 | 1021 } 1022 |
989 if (!sha->src) | 1023 /* Set variables used throughout */ 1024 switch (sha->type) { 1025 case CCP_SHA_TYPE_1: 1026 digest_size = SHA1_DIGEST_SIZE; 1027 init = (void *) ccp_sha1_init; 1028 ctx_size = SHA1_DIGEST_SIZE; 1029 sb_count = 1; 1030 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1031 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; 1032 else 1033 ooffset = ioffset = 0; 1034 break; 1035 case CCP_SHA_TYPE_224: 1036 digest_size = SHA224_DIGEST_SIZE; 1037 init = (void *) ccp_sha224_init; 1038 ctx_size = SHA256_DIGEST_SIZE; 1039 sb_count = 1; 1040 ioffset = 0; 1041 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1042 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; 1043 else 1044 ooffset = 0; 1045 break; 1046 case CCP_SHA_TYPE_256: 1047 digest_size = SHA256_DIGEST_SIZE; 1048 init = (void *) ccp_sha256_init; 1049 ctx_size = SHA256_DIGEST_SIZE; 1050 sb_count = 1; 1051 ooffset = ioffset = 0; 1052 break; 1053 default: 1054 ret = -EINVAL; 1055 goto e_data; 1056 } 1057 1058 /* For zero-length plaintext the src pointer is ignored; 1059 * otherwise both parts must be valid 1060 */ 1061 if (sha->src_len && !sha->src) |
990 return -EINVAL; 991 | 1062 return -EINVAL; 1063 |
992 BUILD_BUG_ON(CCP_SHA_SB_COUNT != 1); 993 | |
994 memset(&op, 0, sizeof(op)); 995 op.cmd_q = cmd_q; | 1064 memset(&op, 0, sizeof(op)); 1065 op.cmd_q = cmd_q; |
996 op.jobid = ccp_gen_jobid(cmd_q->ccp); 997 op.sb_ctx = cmd_q->sb_ctx; | 1066 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1067 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ |
998 op.u.sha.type = sha->type; 999 op.u.sha.msg_bits = sha->msg_bits; 1000 | 1068 op.u.sha.type = sha->type; 1069 op.u.sha.msg_bits = sha->msg_bits; 1070 |
1001 /* The SHA context fits in a single (32-byte) SB entry and 1002 * must be in little endian format. Use the 256-bit byte swap 1003 * passthru option to convert from big endian to little endian. 1004 */ 1005 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1006 CCP_SHA_SB_COUNT * CCP_SB_BYTES, | 1071 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, |
1007 DMA_BIDIRECTIONAL); 1008 if (ret) 1009 return ret; | 1072 DMA_BIDIRECTIONAL); 1073 if (ret) 1074 return ret; |
1010 | |
1011 if (sha->first) { | 1075 if (sha->first) { |
1012 const __be32 *init; 1013 | |
1014 switch (sha->type) { 1015 case CCP_SHA_TYPE_1: | 1076 switch (sha->type) { 1077 case CCP_SHA_TYPE_1: |
1016 init = ccp_sha1_init; 1017 break; | |
1018 case CCP_SHA_TYPE_224: | 1078 case CCP_SHA_TYPE_224: |
1019 init = ccp_sha224_init; 1020 break; | |
1021 case CCP_SHA_TYPE_256: | 1079 case CCP_SHA_TYPE_256: |
1022 init = ccp_sha256_init; | 1080 memcpy(ctx.address + ioffset, init, ctx_size); |
1023 break; 1024 default: 1025 ret = -EINVAL; 1026 goto e_ctx; 1027 } | 1081 break; 1082 default: 1083 ret = -EINVAL; 1084 goto e_ctx; 1085 } |
1028 memcpy(ctx.address, init, CCP_SHA_CTXSIZE); | |
1029 } else { | 1086 } else { |
1030 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | 1087 /* Restore the context */ 1088 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, 1089 sb_count * CCP_SB_BYTES); |
1031 } 1032 1033 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1034 CCP_PASSTHRU_BYTESWAP_256BIT); 1035 if (ret) { 1036 cmd->engine_error = cmd_q->cmd_error; 1037 goto e_ctx; 1038 } 1039 | 1090 } 1091 1092 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1093 CCP_PASSTHRU_BYTESWAP_256BIT); 1094 if (ret) { 1095 cmd->engine_error = cmd_q->cmd_error; 1096 goto e_ctx; 1097 } 1098 |
1040 /* Send data to the CCP SHA engine */ 1041 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, 1042 CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE); 1043 if (ret) 1044 goto e_ctx; | 1099 if (sha->src) { 1100 /* Send data to the CCP SHA engine; block_size is set above */ 1101 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, 1102 block_size, DMA_TO_DEVICE); 1103 if (ret) 1104 goto e_ctx; |
1045 | 1105 |
1046 while (src.sg_wa.bytes_left) { 1047 ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false); 1048 if (sha->final && !src.sg_wa.bytes_left) 1049 op.eom = 1; | 1106 while (src.sg_wa.bytes_left) { 1107 ccp_prepare_data(&src, NULL, &op, block_size, false); 1108 if (sha->final && !src.sg_wa.bytes_left) 1109 op.eom = 1; |
1050 | 1110 |
1111 ret = cmd_q->ccp->vdata->perform->sha(&op); 1112 if (ret) { 1113 cmd->engine_error = cmd_q->cmd_error; 1114 goto e_data; 1115 } 1116 1117 ccp_process_data(&src, NULL, &op); 1118 } 1119 } else { 1120 op.eom = 1; |
|
1051 ret = cmd_q->ccp->vdata->perform->sha(&op); 1052 if (ret) { 1053 cmd->engine_error = cmd_q->cmd_error; 1054 goto e_data; 1055 } | 1121 ret = cmd_q->ccp->vdata->perform->sha(&op); 1122 if (ret) { 1123 cmd->engine_error = cmd_q->cmd_error; 1124 goto e_data; 1125 } |
1056 1057 ccp_process_data(&src, NULL, &op); | |
1058 } 1059 1060 /* Retrieve the SHA context - convert from LE to BE using 1061 * 32-byte (256-bit) byteswapping to BE 1062 */ 1063 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1064 CCP_PASSTHRU_BYTESWAP_256BIT); 1065 if (ret) { 1066 cmd->engine_error = cmd_q->cmd_error; 1067 goto e_data; 1068 } 1069 | 1126 } 1127 1128 /* Retrieve the SHA context - convert from LE to BE using 1129 * 32-byte (256-bit) byteswapping to BE 1130 */ 1131 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1132 CCP_PASSTHRU_BYTESWAP_256BIT); 1133 if (ret) { 1134 cmd->engine_error = cmd_q->cmd_error; 1135 goto e_data; 1136 } 1137 |
1070 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); 1071 1072 if (sha->final && sha->opad) { 1073 /* HMAC operation, recursively perform final SHA */ 1074 struct ccp_cmd hmac_cmd; 1075 struct scatterlist sg; 1076 u64 block_size, digest_size; 1077 u8 *hmac_buf; 1078 | 1138 if (sha->final) { 1139 /* Finishing up, so get the digest */ |
1079 switch (sha->type) { 1080 case CCP_SHA_TYPE_1: | 1140 switch (sha->type) { 1141 case CCP_SHA_TYPE_1: |
1081 block_size = SHA1_BLOCK_SIZE; 1082 digest_size = SHA1_DIGEST_SIZE; 1083 break; | |
1084 case CCP_SHA_TYPE_224: | 1142 case CCP_SHA_TYPE_224: |
1085 block_size = SHA224_BLOCK_SIZE; 1086 digest_size = SHA224_DIGEST_SIZE; 1087 break; | |
1088 case CCP_SHA_TYPE_256: | 1143 case CCP_SHA_TYPE_256: |
1089 block_size = SHA256_BLOCK_SIZE; 1090 digest_size = SHA256_DIGEST_SIZE; | 1144 ccp_get_dm_area(&ctx, ooffset, 1145 sha->ctx, 0, 1146 digest_size); |
1091 break; 1092 default: 1093 ret = -EINVAL; | 1147 break; 1148 default: 1149 ret = -EINVAL; |
1094 goto e_data; | 1150 goto e_ctx; |
1095 } | 1151 } |
1152 } else { 1153 /* Stash the context */ 1154 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, 1155 sb_count * CCP_SB_BYTES); 1156 } |
|
1096 | 1157 |
1158 if (sha->final && sha->opad) { 1159 /* HMAC operation, recursively perform final SHA */ 1160 struct ccp_cmd hmac_cmd; 1161 struct scatterlist sg; 1162 u8 *hmac_buf; 1163 |
|
1097 if (sha->opad_len != block_size) { 1098 ret = -EINVAL; 1099 goto e_data; 1100 } 1101 1102 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); 1103 if (!hmac_buf) { 1104 ret = -ENOMEM; 1105 goto e_data; 1106 } 1107 sg_init_one(&sg, hmac_buf, block_size + digest_size); 1108 1109 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); | 1164 if (sha->opad_len != block_size) { 1165 ret = -EINVAL; 1166 goto e_data; 1167 } 1168 1169 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); 1170 if (!hmac_buf) { 1171 ret = -ENOMEM; 1172 goto e_data; 1173 } 1174 sg_init_one(&sg, hmac_buf, block_size + digest_size); 1175 1176 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); |
1110 memcpy(hmac_buf + block_size, ctx.address, digest_size); | 1177 switch (sha->type) { 1178 case CCP_SHA_TYPE_1: 1179 case CCP_SHA_TYPE_224: 1180 case CCP_SHA_TYPE_256: 1181 memcpy(hmac_buf + block_size, 1182 ctx.address + ooffset, 1183 digest_size); 1184 break; 1185 default: 1186 ret = -EINVAL; 1187 goto e_ctx; 1188 } |
1111 1112 memset(&hmac_cmd, 0, sizeof(hmac_cmd)); 1113 hmac_cmd.engine = CCP_ENGINE_SHA; 1114 hmac_cmd.u.sha.type = sha->type; 1115 hmac_cmd.u.sha.ctx = sha->ctx; 1116 hmac_cmd.u.sha.ctx_len = sha->ctx_len; 1117 hmac_cmd.u.sha.src = &sg; 1118 hmac_cmd.u.sha.src_len = block_size + digest_size; --- 6 unchanged lines hidden (view full) --- 1125 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); 1126 if (ret) 1127 cmd->engine_error = hmac_cmd.engine_error; 1128 1129 kfree(hmac_buf); 1130 } 1131 1132e_data: | 1189 1190 memset(&hmac_cmd, 0, sizeof(hmac_cmd)); 1191 hmac_cmd.engine = CCP_ENGINE_SHA; 1192 hmac_cmd.u.sha.type = sha->type; 1193 hmac_cmd.u.sha.ctx = sha->ctx; 1194 hmac_cmd.u.sha.ctx_len = sha->ctx_len; 1195 hmac_cmd.u.sha.src = &sg; 1196 hmac_cmd.u.sha.src_len = block_size + digest_size; --- 6 unchanged lines hidden (view full) --- 1203 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); 1204 if (ret) 1205 cmd->engine_error = hmac_cmd.engine_error; 1206 1207 kfree(hmac_buf); 1208 } 1209 1210e_data: |
1133 ccp_free_data(&src, cmd_q); | 1211 if (sha->src) 1212 ccp_free_data(&src, cmd_q); |
1134 1135e_ctx: 1136 ccp_dm_free(&ctx); 1137 1138 return ret; 1139} 1140 1141static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) --- 114 unchanged lines hidden (view full) --- 1256 struct ccp_cmd *cmd) 1257{ 1258 struct ccp_passthru_engine *pt = &cmd->u.passthru; 1259 struct ccp_dm_workarea mask; 1260 struct ccp_data src, dst; 1261 struct ccp_op op; 1262 bool in_place = false; 1263 unsigned int i; | 1213 1214e_ctx: 1215 ccp_dm_free(&ctx); 1216 1217 return ret; 1218} 1219 1220static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) --- 114 unchanged lines hidden (view full) --- 1335 struct ccp_cmd *cmd) 1336{ 1337 struct ccp_passthru_engine *pt = &cmd->u.passthru; 1338 struct ccp_dm_workarea mask; 1339 struct ccp_data src, dst; 1340 struct ccp_op op; 1341 bool in_place = false; 1342 unsigned int i; |
1264 int ret; | 1343 int ret = 0; |
1265 1266 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 1267 return -EINVAL; 1268 1269 if (!pt->src || !pt->dst) 1270 return -EINVAL; 1271 1272 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1273 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 1274 return -EINVAL; 1275 if (!pt->mask) 1276 return -EINVAL; 1277 } 1278 1279 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 1280 1281 memset(&op, 0, sizeof(op)); 1282 op.cmd_q = cmd_q; | 1344 1345 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 1346 return -EINVAL; 1347 1348 if (!pt->src || !pt->dst) 1349 return -EINVAL; 1350 1351 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1352 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 1353 return -EINVAL; 1354 if (!pt->mask) 1355 return -EINVAL; 1356 } 1357 1358 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 1359 1360 memset(&op, 0, sizeof(op)); 1361 op.cmd_q = cmd_q; |
1283 op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1362 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
1284 1285 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1286 /* Load the mask */ 1287 op.sb_key = cmd_q->sb_key; 1288 1289 ret = ccp_init_dm_workarea(&mask, cmd_q, 1290 CCP_PASSTHRU_SB_COUNT * 1291 CCP_SB_BYTES, --- 172 unchanged lines hidden (view full) --- 1464 return -EINVAL; 1465 1466 if (!ecc->u.mm.result || 1467 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) 1468 return -EINVAL; 1469 1470 memset(&op, 0, sizeof(op)); 1471 op.cmd_q = cmd_q; | 1363 1364 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1365 /* Load the mask */ 1366 op.sb_key = cmd_q->sb_key; 1367 1368 ret = ccp_init_dm_workarea(&mask, cmd_q, 1369 CCP_PASSTHRU_SB_COUNT * 1370 CCP_SB_BYTES, --- 172 unchanged lines hidden (view full) --- 1543 return -EINVAL; 1544 1545 if (!ecc->u.mm.result || 1546 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) 1547 return -EINVAL; 1548 1549 memset(&op, 0, sizeof(op)); 1550 op.cmd_q = cmd_q; |
1472 op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1551 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
1473 1474 /* Concatenate the modulus and the operands. Both the modulus and 1475 * the operands must be in little endian format. Since the input 1476 * is in big endian format it must be converted and placed in a 1477 * fixed length buffer. 1478 */ 1479 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 1480 DMA_TO_DEVICE); --- 108 unchanged lines hidden (view full) --- 1589 if (!ecc->u.pm.result.x || 1590 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || 1591 !ecc->u.pm.result.y || 1592 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) 1593 return -EINVAL; 1594 1595 memset(&op, 0, sizeof(op)); 1596 op.cmd_q = cmd_q; | 1552 1553 /* Concatenate the modulus and the operands. Both the modulus and 1554 * the operands must be in little endian format. Since the input 1555 * is in big endian format it must be converted and placed in a 1556 * fixed length buffer. 1557 */ 1558 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 1559 DMA_TO_DEVICE); --- 108 unchanged lines hidden (view full) --- 1668 if (!ecc->u.pm.result.x || 1669 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || 1670 !ecc->u.pm.result.y || 1671 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) 1672 return -EINVAL; 1673 1674 memset(&op, 0, sizeof(op)); 1675 op.cmd_q = cmd_q; |
1597 op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1676 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
1598 1599 /* Concatenate the modulus and the operands. Both the modulus and 1600 * the operands must be in little endian format. Since the input 1601 * is in big endian format it must be converted and placed in a 1602 * fixed length buffer. 1603 */ 1604 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 1605 DMA_TO_DEVICE); --- 21 unchanged lines hidden (view full) --- 1627 src.address += CCP_ECC_OPERAND_SIZE; 1628 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, 1629 ecc->u.pm.point_1.y_len, 1630 CCP_ECC_OPERAND_SIZE, false); 1631 if (ret) 1632 goto e_src; 1633 src.address += CCP_ECC_OPERAND_SIZE; 1634 | 1677 1678 /* Concatenate the modulus and the operands. Both the modulus and 1679 * the operands must be in little endian format. Since the input 1680 * is in big endian format it must be converted and placed in a 1681 * fixed length buffer. 1682 */ 1683 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 1684 DMA_TO_DEVICE); --- 21 unchanged lines hidden (view full) --- 1706 src.address += CCP_ECC_OPERAND_SIZE; 1707 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, 1708 ecc->u.pm.point_1.y_len, 1709 CCP_ECC_OPERAND_SIZE, false); 1710 if (ret) 1711 goto e_src; 1712 src.address += CCP_ECC_OPERAND_SIZE; 1713 |
1635 /* Set the first point Z coordianate to 1 */ | 1714 /* Set the first point Z coordinate to 1 */ |
1636 *src.address = 0x01; 1637 src.address += CCP_ECC_OPERAND_SIZE; 1638 1639 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 1640 /* Copy the second point X and Y coordinate */ 1641 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, 1642 ecc->u.pm.point_2.x_len, 1643 CCP_ECC_OPERAND_SIZE, false); 1644 if (ret) 1645 goto e_src; 1646 src.address += CCP_ECC_OPERAND_SIZE; 1647 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, 1648 ecc->u.pm.point_2.y_len, 1649 CCP_ECC_OPERAND_SIZE, false); 1650 if (ret) 1651 goto e_src; 1652 src.address += CCP_ECC_OPERAND_SIZE; 1653 | 1715 *src.address = 0x01; 1716 src.address += CCP_ECC_OPERAND_SIZE; 1717 1718 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 1719 /* Copy the second point X and Y coordinate */ 1720 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, 1721 ecc->u.pm.point_2.x_len, 1722 CCP_ECC_OPERAND_SIZE, false); 1723 if (ret) 1724 goto e_src; 1725 src.address += CCP_ECC_OPERAND_SIZE; 1726 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, 1727 ecc->u.pm.point_2.y_len, 1728 CCP_ECC_OPERAND_SIZE, false); 1729 if (ret) 1730 goto e_src; 1731 src.address += CCP_ECC_OPERAND_SIZE; 1732 |
1654 /* Set the second point Z coordianate to 1 */ | 1733 /* Set the second point Z coordinate to 1 */ |
1655 *src.address = 0x01; 1656 src.address += CCP_ECC_OPERAND_SIZE; 1657 } else { 1658 /* Copy the Domain "a" parameter */ 1659 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, 1660 ecc->u.pm.domain_a_len, 1661 CCP_ECC_OPERAND_SIZE, false); 1662 if (ret) --- 135 unchanged lines hidden --- | 1734 *src.address = 0x01; 1735 src.address += CCP_ECC_OPERAND_SIZE; 1736 } else { 1737 /* Copy the Domain "a" parameter */ 1738 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, 1739 ecc->u.pm.domain_a_len, 1740 CCP_ECC_OPERAND_SIZE, false); 1741 if (ret) --- 135 unchanged lines hidden --- |