1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support 4 * 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/sched.h> 12 #include <linux/delay.h> 13 #include <linux/scatterlist.h> 14 #include <linux/crypto.h> 15 #include <crypto/algapi.h> 16 #include <crypto/aes.h> 17 #include <crypto/ctr.h> 18 #include <crypto/scatterwalk.h> 19 20 #include "ccp-crypto.h" 21 22 static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) 23 { 24 struct skcipher_request *req = skcipher_request_cast(async_req); 25 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma( 26 crypto_skcipher_reqtfm(req)); 27 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); 28 29 if (ret) 30 return ret; 31 32 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) 33 memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE); 34 35 return 0; 36 } 37 38 static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 39 unsigned int key_len) 40 { 41 struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); 42 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 43 44 switch (key_len) { 45 case AES_KEYSIZE_128: 46 ctx->u.aes.type = CCP_AES_TYPE_128; 47 break; 48 case AES_KEYSIZE_192: 49 ctx->u.aes.type = CCP_AES_TYPE_192; 50 break; 51 case AES_KEYSIZE_256: 52 ctx->u.aes.type = CCP_AES_TYPE_256; 53 break; 54 default: 55 return -EINVAL; 56 } 57 ctx->u.aes.mode = alg->mode; 58 ctx->u.aes.key_len = key_len; 59 60 memcpy(ctx->u.aes.key, key, key_len); 61 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); 62 63 return 0; 64 } 65 66 static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt) 67 { 68 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 69 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 70 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); 71 struct scatterlist *iv_sg = NULL; 72 unsigned int iv_len = 0; 73 74 if (!ctx->u.aes.key_len) 75 return -EINVAL; 76 77 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || 78 (ctx->u.aes.mode == CCP_AES_MODE_CBC)) && 79 (req->cryptlen & (AES_BLOCK_SIZE - 1))) 80 return -EINVAL; 81 82 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) { 83 if (!req->iv) 84 return -EINVAL; 85 86 memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE); 87 iv_sg = &rctx->iv_sg; 88 iv_len = AES_BLOCK_SIZE; 89 sg_init_one(iv_sg, rctx->iv, iv_len); 90 } 91 92 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 93 INIT_LIST_HEAD(&rctx->cmd.entry); 94 rctx->cmd.engine = CCP_ENGINE_AES; 95 rctx->cmd.u.aes.type = ctx->u.aes.type; 96 rctx->cmd.u.aes.mode = ctx->u.aes.mode; 97 rctx->cmd.u.aes.action = 98 (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; 99 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; 100 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; 101 rctx->cmd.u.aes.iv = iv_sg; 102 rctx->cmd.u.aes.iv_len = iv_len; 103 rctx->cmd.u.aes.src = req->src; 104 rctx->cmd.u.aes.src_len = req->cryptlen; 105 rctx->cmd.u.aes.dst = req->dst; 106 107 return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 108 } 109 110 static int ccp_aes_encrypt(struct skcipher_request *req) 111 { 112 return ccp_aes_crypt(req, true); 113 } 114 115 static int ccp_aes_decrypt(struct skcipher_request *req) 116 { 117 return ccp_aes_crypt(req, false); 118 } 119 120 static int ccp_aes_init_tfm(struct crypto_skcipher *tfm) 121 { 122 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 123 124 ctx->complete = ccp_aes_complete; 125 ctx->u.aes.key_len = 0; 126 127 crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); 128 129 return 0; 130 } 131 132 static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, 133 int ret) 134 { 135 struct skcipher_request *req = skcipher_request_cast(async_req); 136 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); 137 138 /* Restore the original pointer */ 139 req->iv = rctx->rfc3686_info; 140 141 return ccp_aes_complete(async_req, ret); 142 } 143 144 static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key, 145 unsigned int key_len) 146 { 147 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 148 149 if (key_len < CTR_RFC3686_NONCE_SIZE) 150 return -EINVAL; 151 152 key_len -= CTR_RFC3686_NONCE_SIZE; 153 memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE); 154 155 return ccp_aes_setkey(tfm, key, key_len); 156 } 157 158 static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt) 159 { 160 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 161 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 162 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); 163 u8 *iv; 164 165 /* Initialize the CTR block */ 166 iv = rctx->rfc3686_iv; 167 memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE); 168 169 iv += CTR_RFC3686_NONCE_SIZE; 170 memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE); 171 172 iv += CTR_RFC3686_IV_SIZE; 173 *(__be32 *)iv = cpu_to_be32(1); 174 175 /* Point to the new IV */ 176 rctx->rfc3686_info = req->iv; 177 req->iv = rctx->rfc3686_iv; 178 179 return ccp_aes_crypt(req, encrypt); 180 } 181 182 static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req) 183 { 184 return ccp_aes_rfc3686_crypt(req, true); 185 } 186 187 static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req) 188 { 189 return ccp_aes_rfc3686_crypt(req, false); 190 } 191 192 static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm) 193 { 194 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 195 196 ctx->complete = ccp_aes_rfc3686_complete; 197 ctx->u.aes.key_len = 0; 198 199 crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx)); 200 201 return 0; 202 } 203 204 static const struct skcipher_alg ccp_aes_defaults = { 205 .setkey = ccp_aes_setkey, 206 .encrypt = ccp_aes_encrypt, 207 .decrypt = ccp_aes_decrypt, 208 .min_keysize = AES_MIN_KEY_SIZE, 209 .max_keysize = AES_MAX_KEY_SIZE, 210 .init = ccp_aes_init_tfm, 211 212 .base.cra_flags = CRYPTO_ALG_ASYNC | 213 CRYPTO_ALG_ALLOCATES_MEMORY | 214 CRYPTO_ALG_KERN_DRIVER_ONLY | 215 CRYPTO_ALG_NEED_FALLBACK, 216 .base.cra_blocksize = AES_BLOCK_SIZE, 217 .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, 218 .base.cra_priority = CCP_CRA_PRIORITY, 219 .base.cra_module = THIS_MODULE, 220 }; 221 222 static const struct skcipher_alg ccp_aes_rfc3686_defaults = { 223 .setkey = ccp_aes_rfc3686_setkey, 224 .encrypt = ccp_aes_rfc3686_encrypt, 225 .decrypt = ccp_aes_rfc3686_decrypt, 226 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 227 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 228 .init = ccp_aes_rfc3686_init_tfm, 229 230 .base.cra_flags = CRYPTO_ALG_ASYNC | 231 CRYPTO_ALG_ALLOCATES_MEMORY | 232 CRYPTO_ALG_KERN_DRIVER_ONLY | 233 CRYPTO_ALG_NEED_FALLBACK, 234 .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE, 235 .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, 236 .base.cra_priority = CCP_CRA_PRIORITY, 237 .base.cra_module = THIS_MODULE, 238 }; 239 240 struct ccp_aes_def { 241 enum ccp_aes_mode mode; 242 unsigned int version; 243 const char *name; 244 const char *driver_name; 245 unsigned int blocksize; 246 unsigned int ivsize; 247 const struct skcipher_alg *alg_defaults; 248 }; 249 250 static struct ccp_aes_def aes_algs[] = { 251 { 252 .mode = CCP_AES_MODE_ECB, 253 .version = CCP_VERSION(3, 0), 254 .name = "ecb(aes)", 255 .driver_name = "ecb-aes-ccp", 256 .blocksize = AES_BLOCK_SIZE, 257 .ivsize = 0, 258 .alg_defaults = &ccp_aes_defaults, 259 }, 260 { 261 .mode = CCP_AES_MODE_CBC, 262 .version = CCP_VERSION(3, 0), 263 .name = "cbc(aes)", 264 .driver_name = "cbc-aes-ccp", 265 .blocksize = AES_BLOCK_SIZE, 266 .ivsize = AES_BLOCK_SIZE, 267 .alg_defaults = &ccp_aes_defaults, 268 }, 269 { 270 .mode = CCP_AES_MODE_CFB, 271 .version = CCP_VERSION(3, 0), 272 .name = "cfb(aes)", 273 .driver_name = "cfb-aes-ccp", 274 .blocksize = 1, 275 .ivsize = AES_BLOCK_SIZE, 276 .alg_defaults = &ccp_aes_defaults, 277 }, 278 { 279 .mode = CCP_AES_MODE_OFB, 280 .version = CCP_VERSION(3, 0), 281 .name = "ofb(aes)", 282 .driver_name = "ofb-aes-ccp", 283 .blocksize = 1, 284 .ivsize = AES_BLOCK_SIZE, 285 .alg_defaults = &ccp_aes_defaults, 286 }, 287 { 288 .mode = CCP_AES_MODE_CTR, 289 .version = CCP_VERSION(3, 0), 290 .name = "ctr(aes)", 291 .driver_name = "ctr-aes-ccp", 292 .blocksize = 1, 293 .ivsize = AES_BLOCK_SIZE, 294 .alg_defaults = &ccp_aes_defaults, 295 }, 296 { 297 .mode = CCP_AES_MODE_CTR, 298 .version = CCP_VERSION(3, 0), 299 .name = "rfc3686(ctr(aes))", 300 .driver_name = "rfc3686-ctr-aes-ccp", 301 .blocksize = 1, 302 .ivsize = CTR_RFC3686_IV_SIZE, 303 .alg_defaults = &ccp_aes_rfc3686_defaults, 304 }, 305 }; 306 307 static int ccp_register_aes_alg(struct list_head *head, 308 const struct ccp_aes_def *def) 309 { 310 struct ccp_crypto_skcipher_alg *ccp_alg; 311 struct skcipher_alg *alg; 312 int ret; 313 314 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 315 if (!ccp_alg) 316 return -ENOMEM; 317 318 INIT_LIST_HEAD(&ccp_alg->entry); 319 320 ccp_alg->mode = def->mode; 321 322 /* Copy the defaults and override as necessary */ 323 alg = &ccp_alg->alg; 324 *alg = *def->alg_defaults; 325 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 326 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 327 def->driver_name); 328 alg->base.cra_blocksize = def->blocksize; 329 alg->ivsize = def->ivsize; 330 331 ret = crypto_register_skcipher(alg); 332 if (ret) { 333 pr_err("%s skcipher algorithm registration error (%d)\n", 334 alg->base.cra_name, ret); 335 kfree(ccp_alg); 336 return ret; 337 } 338 339 list_add(&ccp_alg->entry, head); 340 341 return 0; 342 } 343 344 int ccp_register_aes_algs(struct list_head *head) 345 { 346 int i, ret; 347 unsigned int ccpversion = ccp_version(); 348 349 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 350 if (aes_algs[i].version > ccpversion) 351 continue; 352 ret = ccp_register_aes_alg(head, &aes_algs[i]); 353 if (ret) 354 return ret; 355 } 356 357 return 0; 358 } 359