1 /* 2 * AMD Cryptographic Coprocessor (CCP) AES crypto API support 3 * 4 * Copyright (C) 2013 Advanced Micro Devices, Inc. 5 * 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/sched.h> 15 #include <linux/delay.h> 16 #include <linux/scatterlist.h> 17 #include <linux/crypto.h> 18 #include <crypto/algapi.h> 19 #include <crypto/aes.h> 20 #include <crypto/ctr.h> 21 #include <crypto/scatterwalk.h> 22 23 #include "ccp-crypto.h" 24 25 26 static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) 27 { 28 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); 29 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 30 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 31 32 if (ret) 33 return ret; 34 35 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) 36 memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); 37 38 return 0; 39 } 40 41 static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 42 unsigned int key_len) 43 { 44 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); 45 struct ccp_crypto_ablkcipher_alg *alg = 46 ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm)); 47 48 switch (key_len) { 49 case AES_KEYSIZE_128: 50 ctx->u.aes.type = CCP_AES_TYPE_128; 51 break; 52 case AES_KEYSIZE_192: 53 ctx->u.aes.type = CCP_AES_TYPE_192; 54 break; 55 case AES_KEYSIZE_256: 56 ctx->u.aes.type = CCP_AES_TYPE_256; 57 break; 58 default: 59 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 60 return -EINVAL; 61 } 62 ctx->u.aes.mode = alg->mode; 63 ctx->u.aes.key_len = key_len; 64 65 memcpy(ctx->u.aes.key, key, key_len); 66 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); 67 68 return 0; 69 } 70 71 static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt) 72 { 73 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 74 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 75 struct scatterlist *iv_sg = NULL; 76 unsigned int iv_len = 0; 77 int ret; 78 79 if (!ctx->u.aes.key_len) 80 return -EINVAL; 81 82 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || 83 (ctx->u.aes.mode == CCP_AES_MODE_CBC) || 84 (ctx->u.aes.mode == CCP_AES_MODE_CFB)) && 85 (req->nbytes & (AES_BLOCK_SIZE - 1))) 86 return -EINVAL; 87 88 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) { 89 if (!req->info) 90 return -EINVAL; 91 92 memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); 93 iv_sg = &rctx->iv_sg; 94 iv_len = AES_BLOCK_SIZE; 95 sg_init_one(iv_sg, rctx->iv, iv_len); 96 } 97 98 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 99 INIT_LIST_HEAD(&rctx->cmd.entry); 100 rctx->cmd.engine = CCP_ENGINE_AES; 101 rctx->cmd.u.aes.type = ctx->u.aes.type; 102 rctx->cmd.u.aes.mode = ctx->u.aes.mode; 103 rctx->cmd.u.aes.action = 104 (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; 105 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; 106 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; 107 rctx->cmd.u.aes.iv = iv_sg; 108 rctx->cmd.u.aes.iv_len = iv_len; 109 rctx->cmd.u.aes.src = req->src; 110 rctx->cmd.u.aes.src_len = req->nbytes; 111 rctx->cmd.u.aes.dst = req->dst; 112 113 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 114 115 return ret; 116 } 117 118 static int ccp_aes_encrypt(struct ablkcipher_request *req) 119 { 120 return ccp_aes_crypt(req, true); 121 } 122 123 static int ccp_aes_decrypt(struct ablkcipher_request *req) 124 { 125 return ccp_aes_crypt(req, false); 126 } 127 128 static int ccp_aes_cra_init(struct crypto_tfm *tfm) 129 { 130 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 131 132 ctx->complete = ccp_aes_complete; 133 ctx->u.aes.key_len = 0; 134 135 tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx); 136 137 return 0; 138 } 139 140 static void ccp_aes_cra_exit(struct crypto_tfm *tfm) 141 { 142 } 143 144 static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, 145 int ret) 146 { 147 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); 148 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 149 150 /* Restore the original pointer */ 151 req->info = rctx->rfc3686_info; 152 153 return ccp_aes_complete(async_req, ret); 154 } 155 156 static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 157 unsigned int key_len) 158 { 159 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); 160 161 if (key_len < CTR_RFC3686_NONCE_SIZE) 162 return -EINVAL; 163 164 key_len -= CTR_RFC3686_NONCE_SIZE; 165 memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE); 166 167 return ccp_aes_setkey(tfm, key, key_len); 168 } 169 170 static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt) 171 { 172 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 173 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 174 u8 *iv; 175 176 /* Initialize the CTR block */ 177 iv = rctx->rfc3686_iv; 178 memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE); 179 180 iv += CTR_RFC3686_NONCE_SIZE; 181 memcpy(iv, req->info, CTR_RFC3686_IV_SIZE); 182 183 iv += CTR_RFC3686_IV_SIZE; 184 *(__be32 *)iv = cpu_to_be32(1); 185 186 /* Point to the new IV */ 187 rctx->rfc3686_info = req->info; 188 req->info = rctx->rfc3686_iv; 189 190 return ccp_aes_crypt(req, encrypt); 191 } 192 193 static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req) 194 { 195 return ccp_aes_rfc3686_crypt(req, true); 196 } 197 198 static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req) 199 { 200 return ccp_aes_rfc3686_crypt(req, false); 201 } 202 203 static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm) 204 { 205 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 206 207 ctx->complete = ccp_aes_rfc3686_complete; 208 ctx->u.aes.key_len = 0; 209 210 tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx); 211 212 return 0; 213 } 214 215 static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm) 216 { 217 } 218 219 static struct crypto_alg ccp_aes_defaults = { 220 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 221 CRYPTO_ALG_ASYNC | 222 CRYPTO_ALG_KERN_DRIVER_ONLY | 223 CRYPTO_ALG_NEED_FALLBACK, 224 .cra_blocksize = AES_BLOCK_SIZE, 225 .cra_ctxsize = sizeof(struct ccp_ctx), 226 .cra_priority = CCP_CRA_PRIORITY, 227 .cra_type = &crypto_ablkcipher_type, 228 .cra_init = ccp_aes_cra_init, 229 .cra_exit = ccp_aes_cra_exit, 230 .cra_module = THIS_MODULE, 231 .cra_ablkcipher = { 232 .setkey = ccp_aes_setkey, 233 .encrypt = ccp_aes_encrypt, 234 .decrypt = ccp_aes_decrypt, 235 .min_keysize = AES_MIN_KEY_SIZE, 236 .max_keysize = AES_MAX_KEY_SIZE, 237 }, 238 }; 239 240 static struct crypto_alg ccp_aes_rfc3686_defaults = { 241 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 242 CRYPTO_ALG_ASYNC | 243 CRYPTO_ALG_KERN_DRIVER_ONLY | 244 CRYPTO_ALG_NEED_FALLBACK, 245 .cra_blocksize = CTR_RFC3686_BLOCK_SIZE, 246 .cra_ctxsize = sizeof(struct ccp_ctx), 247 .cra_priority = CCP_CRA_PRIORITY, 248 .cra_type = &crypto_ablkcipher_type, 249 .cra_init = ccp_aes_rfc3686_cra_init, 250 .cra_exit = ccp_aes_rfc3686_cra_exit, 251 .cra_module = THIS_MODULE, 252 .cra_ablkcipher = { 253 .setkey = ccp_aes_rfc3686_setkey, 254 .encrypt = ccp_aes_rfc3686_encrypt, 255 .decrypt = ccp_aes_rfc3686_decrypt, 256 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 257 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 258 }, 259 }; 260 261 struct ccp_aes_def { 262 enum ccp_aes_mode mode; 263 const char *name; 264 const char *driver_name; 265 unsigned int blocksize; 266 unsigned int ivsize; 267 struct crypto_alg *alg_defaults; 268 }; 269 270 static struct ccp_aes_def aes_algs[] = { 271 { 272 .mode = CCP_AES_MODE_ECB, 273 .name = "ecb(aes)", 274 .driver_name = "ecb-aes-ccp", 275 .blocksize = AES_BLOCK_SIZE, 276 .ivsize = 0, 277 .alg_defaults = &ccp_aes_defaults, 278 }, 279 { 280 .mode = CCP_AES_MODE_CBC, 281 .name = "cbc(aes)", 282 .driver_name = "cbc-aes-ccp", 283 .blocksize = AES_BLOCK_SIZE, 284 .ivsize = AES_BLOCK_SIZE, 285 .alg_defaults = &ccp_aes_defaults, 286 }, 287 { 288 .mode = CCP_AES_MODE_CFB, 289 .name = "cfb(aes)", 290 .driver_name = "cfb-aes-ccp", 291 .blocksize = AES_BLOCK_SIZE, 292 .ivsize = AES_BLOCK_SIZE, 293 .alg_defaults = &ccp_aes_defaults, 294 }, 295 { 296 .mode = CCP_AES_MODE_OFB, 297 .name = "ofb(aes)", 298 .driver_name = "ofb-aes-ccp", 299 .blocksize = 1, 300 .ivsize = AES_BLOCK_SIZE, 301 .alg_defaults = &ccp_aes_defaults, 302 }, 303 { 304 .mode = CCP_AES_MODE_CTR, 305 .name = "ctr(aes)", 306 .driver_name = "ctr-aes-ccp", 307 .blocksize = 1, 308 .ivsize = AES_BLOCK_SIZE, 309 .alg_defaults = &ccp_aes_defaults, 310 }, 311 { 312 .mode = CCP_AES_MODE_CTR, 313 .name = "rfc3686(ctr(aes))", 314 .driver_name = "rfc3686-ctr-aes-ccp", 315 .blocksize = 1, 316 .ivsize = CTR_RFC3686_IV_SIZE, 317 .alg_defaults = &ccp_aes_rfc3686_defaults, 318 }, 319 }; 320 321 static int ccp_register_aes_alg(struct list_head *head, 322 const struct ccp_aes_def *def) 323 { 324 struct ccp_crypto_ablkcipher_alg *ccp_alg; 325 struct crypto_alg *alg; 326 int ret; 327 328 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 329 if (!ccp_alg) 330 return -ENOMEM; 331 332 INIT_LIST_HEAD(&ccp_alg->entry); 333 334 ccp_alg->mode = def->mode; 335 336 /* Copy the defaults and override as necessary */ 337 alg = &ccp_alg->alg; 338 *alg = *def->alg_defaults; 339 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 340 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 341 def->driver_name); 342 alg->cra_blocksize = def->blocksize; 343 alg->cra_ablkcipher.ivsize = def->ivsize; 344 345 ret = crypto_register_alg(alg); 346 if (ret) { 347 pr_err("%s ablkcipher algorithm registration error (%d)\n", 348 alg->cra_name, ret); 349 kfree(ccp_alg); 350 return ret; 351 } 352 353 list_add(&ccp_alg->entry, head); 354 355 return 0; 356 } 357 358 int ccp_register_aes_algs(struct list_head *head) 359 { 360 int i, ret; 361 362 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 363 ret = ccp_register_aes_alg(head, &aes_algs[i]); 364 if (ret) 365 return ret; 366 } 367 368 return 0; 369 } 370