1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale i.MX23/i.MX28 Data Co-Processor driver 4 * 5 * Copyright (C) 2013 Marek Vasut <marex@denx.de> 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/stmp_device.h> 17 #include <linux/clk.h> 18 19 #include <crypto/aes.h> 20 #include <crypto/sha.h> 21 #include <crypto/internal/hash.h> 22 #include <crypto/internal/skcipher.h> 23 #include <crypto/scatterwalk.h> 24 25 #define DCP_MAX_CHANS 4 26 #define DCP_BUF_SZ PAGE_SIZE 27 #define DCP_SHA_PAY_SZ 64 28 29 #define DCP_ALIGNMENT 64 30 31 /* 32 * Null hashes to align with hw behavior on imx6sl and ull 33 * these are flipped for consistency with hw output 34 */ 35 static const uint8_t sha1_null_hash[] = 36 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" 37 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; 38 39 static const uint8_t sha256_null_hash[] = 40 "\x55\xb8\x52\x78\x1b\x99\x95\xa4" 41 "\x4c\x93\x9b\x64\xe4\x41\xae\x27" 42 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" 43 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; 44 45 /* DCP DMA descriptor. */ 46 struct dcp_dma_desc { 47 uint32_t next_cmd_addr; 48 uint32_t control0; 49 uint32_t control1; 50 uint32_t source; 51 uint32_t destination; 52 uint32_t size; 53 uint32_t payload; 54 uint32_t status; 55 }; 56 57 /* Coherent aligned block for bounce buffering. */ 58 struct dcp_coherent_block { 59 uint8_t aes_in_buf[DCP_BUF_SZ]; 60 uint8_t aes_out_buf[DCP_BUF_SZ]; 61 uint8_t sha_in_buf[DCP_BUF_SZ]; 62 uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; 63 64 uint8_t aes_key[2 * AES_KEYSIZE_128]; 65 66 struct dcp_dma_desc desc[DCP_MAX_CHANS]; 67 }; 68 69 struct dcp { 70 struct device *dev; 71 void __iomem *base; 72 73 uint32_t caps; 74 75 struct dcp_coherent_block *coh; 76 77 struct completion completion[DCP_MAX_CHANS]; 78 spinlock_t lock[DCP_MAX_CHANS]; 79 struct task_struct *thread[DCP_MAX_CHANS]; 80 struct crypto_queue queue[DCP_MAX_CHANS]; 81 struct clk *dcp_clk; 82 }; 83 84 enum dcp_chan { 85 DCP_CHAN_HASH_SHA = 0, 86 DCP_CHAN_CRYPTO = 2, 87 }; 88 89 struct dcp_async_ctx { 90 /* Common context */ 91 enum dcp_chan chan; 92 uint32_t fill; 93 94 /* SHA Hash-specific context */ 95 struct mutex mutex; 96 uint32_t alg; 97 unsigned int hot:1; 98 99 /* Crypto-specific context */ 100 struct crypto_sync_skcipher *fallback; 101 unsigned int key_len; 102 uint8_t key[AES_KEYSIZE_128]; 103 }; 104 105 struct dcp_aes_req_ctx { 106 unsigned int enc:1; 107 unsigned int ecb:1; 108 }; 109 110 struct dcp_sha_req_ctx { 111 unsigned int init:1; 112 unsigned int fini:1; 113 }; 114 115 struct dcp_export_state { 116 struct dcp_sha_req_ctx req_ctx; 117 struct dcp_async_ctx async_ctx; 118 }; 119 120 /* 121 * There can even be only one instance of the MXS DCP due to the 122 * design of Linux Crypto API. 123 */ 124 static struct dcp *global_sdcp; 125 126 /* DCP register layout. */ 127 #define MXS_DCP_CTRL 0x00 128 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) 129 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) 130 131 #define MXS_DCP_STAT 0x10 132 #define MXS_DCP_STAT_CLR 0x18 133 #define MXS_DCP_STAT_IRQ_MASK 0xf 134 135 #define MXS_DCP_CHANNELCTRL 0x20 136 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff 137 138 #define MXS_DCP_CAPABILITY1 0x40 139 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) 140 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) 141 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) 142 143 #define MXS_DCP_CONTEXT 0x50 144 145 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) 146 147 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) 148 149 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) 150 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) 151 152 /* DMA descriptor bits. */ 153 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) 154 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) 155 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) 156 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) 157 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) 158 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) 159 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) 160 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) 161 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) 162 163 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) 164 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) 165 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) 166 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) 167 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) 168 169 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) 170 { 171 struct dcp *sdcp = global_sdcp; 172 const int chan = actx->chan; 173 uint32_t stat; 174 unsigned long ret; 175 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 176 177 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), 178 DMA_TO_DEVICE); 179 180 reinit_completion(&sdcp->completion[chan]); 181 182 /* Clear status register. */ 183 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); 184 185 /* Load the DMA descriptor. */ 186 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); 187 188 /* Increment the semaphore to start the DMA transfer. */ 189 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); 190 191 ret = wait_for_completion_timeout(&sdcp->completion[chan], 192 msecs_to_jiffies(1000)); 193 if (!ret) { 194 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", 195 chan, readl(sdcp->base + MXS_DCP_STAT)); 196 return -ETIMEDOUT; 197 } 198 199 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); 200 if (stat & 0xff) { 201 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", 202 chan, stat); 203 return -EINVAL; 204 } 205 206 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); 207 208 return 0; 209 } 210 211 /* 212 * Encryption (AES128) 213 */ 214 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, 215 struct skcipher_request *req, int init) 216 { 217 struct dcp *sdcp = global_sdcp; 218 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 219 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 220 int ret; 221 222 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, 223 2 * AES_KEYSIZE_128, 224 DMA_TO_DEVICE); 225 dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, 226 DCP_BUF_SZ, DMA_TO_DEVICE); 227 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, 228 DCP_BUF_SZ, DMA_FROM_DEVICE); 229 230 if (actx->fill % AES_BLOCK_SIZE) { 231 dev_err(sdcp->dev, "Invalid block size!\n"); 232 ret = -EINVAL; 233 goto aes_done_run; 234 } 235 236 /* Fill in the DMA descriptor. */ 237 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 238 MXS_DCP_CONTROL0_INTERRUPT | 239 MXS_DCP_CONTROL0_ENABLE_CIPHER; 240 241 /* Payload contains the key. */ 242 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; 243 244 if (rctx->enc) 245 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; 246 if (init) 247 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; 248 249 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; 250 251 if (rctx->ecb) 252 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; 253 else 254 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; 255 256 desc->next_cmd_addr = 0; 257 desc->source = src_phys; 258 desc->destination = dst_phys; 259 desc->size = actx->fill; 260 desc->payload = key_phys; 261 desc->status = 0; 262 263 ret = mxs_dcp_start_dma(actx); 264 265 aes_done_run: 266 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, 267 DMA_TO_DEVICE); 268 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 269 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); 270 271 return ret; 272 } 273 274 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) 275 { 276 struct dcp *sdcp = global_sdcp; 277 278 struct skcipher_request *req = skcipher_request_cast(arq); 279 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 280 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 281 282 struct scatterlist *dst = req->dst; 283 struct scatterlist *src = req->src; 284 const int nents = sg_nents(req->src); 285 286 const int out_off = DCP_BUF_SZ; 287 uint8_t *in_buf = sdcp->coh->aes_in_buf; 288 uint8_t *out_buf = sdcp->coh->aes_out_buf; 289 290 uint8_t *out_tmp, *src_buf, *dst_buf = NULL; 291 uint32_t dst_off = 0; 292 uint32_t last_out_len = 0; 293 294 uint8_t *key = sdcp->coh->aes_key; 295 296 int ret = 0; 297 int split = 0; 298 unsigned int i, len, clen, rem = 0, tlen = 0; 299 int init = 0; 300 bool limit_hit = false; 301 302 actx->fill = 0; 303 304 /* Copy the key from the temporary location. */ 305 memcpy(key, actx->key, actx->key_len); 306 307 if (!rctx->ecb) { 308 /* Copy the CBC IV just past the key. */ 309 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); 310 /* CBC needs the INIT set. */ 311 init = 1; 312 } else { 313 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); 314 } 315 316 for_each_sg(req->src, src, nents, i) { 317 src_buf = sg_virt(src); 318 len = sg_dma_len(src); 319 tlen += len; 320 limit_hit = tlen > req->cryptlen; 321 322 if (limit_hit) 323 len = req->cryptlen - (tlen - len); 324 325 do { 326 if (actx->fill + len > out_off) 327 clen = out_off - actx->fill; 328 else 329 clen = len; 330 331 memcpy(in_buf + actx->fill, src_buf, clen); 332 len -= clen; 333 src_buf += clen; 334 actx->fill += clen; 335 336 /* 337 * If we filled the buffer or this is the last SG, 338 * submit the buffer. 339 */ 340 if (actx->fill == out_off || sg_is_last(src) || 341 limit_hit) { 342 ret = mxs_dcp_run_aes(actx, req, init); 343 if (ret) 344 return ret; 345 init = 0; 346 347 out_tmp = out_buf; 348 last_out_len = actx->fill; 349 while (dst && actx->fill) { 350 if (!split) { 351 dst_buf = sg_virt(dst); 352 dst_off = 0; 353 } 354 rem = min(sg_dma_len(dst) - dst_off, 355 actx->fill); 356 357 memcpy(dst_buf + dst_off, out_tmp, rem); 358 out_tmp += rem; 359 dst_off += rem; 360 actx->fill -= rem; 361 362 if (dst_off == sg_dma_len(dst)) { 363 dst = sg_next(dst); 364 split = 0; 365 } else { 366 split = 1; 367 } 368 } 369 } 370 } while (len); 371 372 if (limit_hit) 373 break; 374 } 375 376 /* Copy the IV for CBC for chaining */ 377 if (!rctx->ecb) { 378 if (rctx->enc) 379 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), 380 AES_BLOCK_SIZE); 381 else 382 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), 383 AES_BLOCK_SIZE); 384 } 385 386 return ret; 387 } 388 389 static int dcp_chan_thread_aes(void *data) 390 { 391 struct dcp *sdcp = global_sdcp; 392 const int chan = DCP_CHAN_CRYPTO; 393 394 struct crypto_async_request *backlog; 395 struct crypto_async_request *arq; 396 397 int ret; 398 399 while (!kthread_should_stop()) { 400 set_current_state(TASK_INTERRUPTIBLE); 401 402 spin_lock(&sdcp->lock[chan]); 403 backlog = crypto_get_backlog(&sdcp->queue[chan]); 404 arq = crypto_dequeue_request(&sdcp->queue[chan]); 405 spin_unlock(&sdcp->lock[chan]); 406 407 if (!backlog && !arq) { 408 schedule(); 409 continue; 410 } 411 412 set_current_state(TASK_RUNNING); 413 414 if (backlog) 415 backlog->complete(backlog, -EINPROGRESS); 416 417 if (arq) { 418 ret = mxs_dcp_aes_block_crypt(arq); 419 arq->complete(arq, ret); 420 } 421 } 422 423 return 0; 424 } 425 426 static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) 427 { 428 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 429 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); 430 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 431 int ret; 432 433 skcipher_request_set_sync_tfm(subreq, ctx->fallback); 434 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); 435 skcipher_request_set_crypt(subreq, req->src, req->dst, 436 req->cryptlen, req->iv); 437 438 if (enc) 439 ret = crypto_skcipher_encrypt(subreq); 440 else 441 ret = crypto_skcipher_decrypt(subreq); 442 443 skcipher_request_zero(subreq); 444 445 return ret; 446 } 447 448 static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) 449 { 450 struct dcp *sdcp = global_sdcp; 451 struct crypto_async_request *arq = &req->base; 452 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 453 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 454 int ret; 455 456 if (unlikely(actx->key_len != AES_KEYSIZE_128)) 457 return mxs_dcp_block_fallback(req, enc); 458 459 rctx->enc = enc; 460 rctx->ecb = ecb; 461 actx->chan = DCP_CHAN_CRYPTO; 462 463 spin_lock(&sdcp->lock[actx->chan]); 464 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 465 spin_unlock(&sdcp->lock[actx->chan]); 466 467 wake_up_process(sdcp->thread[actx->chan]); 468 469 return ret; 470 } 471 472 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) 473 { 474 return mxs_dcp_aes_enqueue(req, 0, 1); 475 } 476 477 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) 478 { 479 return mxs_dcp_aes_enqueue(req, 1, 1); 480 } 481 482 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) 483 { 484 return mxs_dcp_aes_enqueue(req, 0, 0); 485 } 486 487 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) 488 { 489 return mxs_dcp_aes_enqueue(req, 1, 0); 490 } 491 492 static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 493 unsigned int len) 494 { 495 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 496 497 /* 498 * AES 128 is supposed by the hardware, store key into temporary 499 * buffer and exit. We must use the temporary buffer here, since 500 * there can still be an operation in progress. 501 */ 502 actx->key_len = len; 503 if (len == AES_KEYSIZE_128) { 504 memcpy(actx->key, key, len); 505 return 0; 506 } 507 508 /* 509 * If the requested AES key size is not supported by the hardware, 510 * but is supported by in-kernel software implementation, we use 511 * software fallback. 512 */ 513 crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 514 crypto_sync_skcipher_set_flags(actx->fallback, 515 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 516 return crypto_sync_skcipher_setkey(actx->fallback, key, len); 517 } 518 519 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) 520 { 521 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); 522 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 523 struct crypto_sync_skcipher *blk; 524 525 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 526 if (IS_ERR(blk)) 527 return PTR_ERR(blk); 528 529 actx->fallback = blk; 530 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); 531 return 0; 532 } 533 534 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) 535 { 536 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 537 538 crypto_free_sync_skcipher(actx->fallback); 539 } 540 541 /* 542 * Hashing (SHA1/SHA256) 543 */ 544 static int mxs_dcp_run_sha(struct ahash_request *req) 545 { 546 struct dcp *sdcp = global_sdcp; 547 int ret; 548 549 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 550 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 551 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 552 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 553 554 dma_addr_t digest_phys = 0; 555 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, 556 DCP_BUF_SZ, DMA_TO_DEVICE); 557 558 /* Fill in the DMA descriptor. */ 559 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 560 MXS_DCP_CONTROL0_INTERRUPT | 561 MXS_DCP_CONTROL0_ENABLE_HASH; 562 if (rctx->init) 563 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; 564 565 desc->control1 = actx->alg; 566 desc->next_cmd_addr = 0; 567 desc->source = buf_phys; 568 desc->destination = 0; 569 desc->size = actx->fill; 570 desc->payload = 0; 571 desc->status = 0; 572 573 /* 574 * Align driver with hw behavior when generating null hashes 575 */ 576 if (rctx->init && rctx->fini && desc->size == 0) { 577 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 578 const uint8_t *sha_buf = 579 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? 580 sha1_null_hash : sha256_null_hash; 581 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); 582 ret = 0; 583 goto done_run; 584 } 585 586 /* Set HASH_TERM bit for last transfer block. */ 587 if (rctx->fini) { 588 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, 589 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); 590 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; 591 desc->payload = digest_phys; 592 } 593 594 ret = mxs_dcp_start_dma(actx); 595 596 if (rctx->fini) 597 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, 598 DMA_FROM_DEVICE); 599 600 done_run: 601 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 602 603 return ret; 604 } 605 606 static int dcp_sha_req_to_buf(struct crypto_async_request *arq) 607 { 608 struct dcp *sdcp = global_sdcp; 609 610 struct ahash_request *req = ahash_request_cast(arq); 611 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 612 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 613 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 614 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 615 616 uint8_t *in_buf = sdcp->coh->sha_in_buf; 617 uint8_t *out_buf = sdcp->coh->sha_out_buf; 618 619 struct scatterlist *src; 620 621 unsigned int i, len, clen, oft = 0; 622 int ret; 623 624 int fin = rctx->fini; 625 if (fin) 626 rctx->fini = 0; 627 628 src = req->src; 629 len = req->nbytes; 630 631 while (len) { 632 if (actx->fill + len > DCP_BUF_SZ) 633 clen = DCP_BUF_SZ - actx->fill; 634 else 635 clen = len; 636 637 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, 638 0); 639 640 len -= clen; 641 oft += clen; 642 actx->fill += clen; 643 644 /* 645 * If we filled the buffer and still have some 646 * more data, submit the buffer. 647 */ 648 if (len && actx->fill == DCP_BUF_SZ) { 649 ret = mxs_dcp_run_sha(req); 650 if (ret) 651 return ret; 652 actx->fill = 0; 653 rctx->init = 0; 654 } 655 } 656 657 if (fin) { 658 rctx->fini = 1; 659 660 /* Submit whatever is left. */ 661 if (!req->result) 662 return -EINVAL; 663 664 ret = mxs_dcp_run_sha(req); 665 if (ret) 666 return ret; 667 668 actx->fill = 0; 669 670 /* For some reason the result is flipped */ 671 for (i = 0; i < halg->digestsize; i++) 672 req->result[i] = out_buf[halg->digestsize - i - 1]; 673 } 674 675 return 0; 676 } 677 678 static int dcp_chan_thread_sha(void *data) 679 { 680 struct dcp *sdcp = global_sdcp; 681 const int chan = DCP_CHAN_HASH_SHA; 682 683 struct crypto_async_request *backlog; 684 struct crypto_async_request *arq; 685 int ret; 686 687 while (!kthread_should_stop()) { 688 set_current_state(TASK_INTERRUPTIBLE); 689 690 spin_lock(&sdcp->lock[chan]); 691 backlog = crypto_get_backlog(&sdcp->queue[chan]); 692 arq = crypto_dequeue_request(&sdcp->queue[chan]); 693 spin_unlock(&sdcp->lock[chan]); 694 695 if (!backlog && !arq) { 696 schedule(); 697 continue; 698 } 699 700 set_current_state(TASK_RUNNING); 701 702 if (backlog) 703 backlog->complete(backlog, -EINPROGRESS); 704 705 if (arq) { 706 ret = dcp_sha_req_to_buf(arq); 707 arq->complete(arq, ret); 708 } 709 } 710 711 return 0; 712 } 713 714 static int dcp_sha_init(struct ahash_request *req) 715 { 716 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 717 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 718 719 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 720 721 /* 722 * Start hashing session. The code below only inits the 723 * hashing session context, nothing more. 724 */ 725 memset(actx, 0, sizeof(*actx)); 726 727 if (strcmp(halg->base.cra_name, "sha1") == 0) 728 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; 729 else 730 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; 731 732 actx->fill = 0; 733 actx->hot = 0; 734 actx->chan = DCP_CHAN_HASH_SHA; 735 736 mutex_init(&actx->mutex); 737 738 return 0; 739 } 740 741 static int dcp_sha_update_fx(struct ahash_request *req, int fini) 742 { 743 struct dcp *sdcp = global_sdcp; 744 745 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 746 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 747 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 748 749 int ret; 750 751 /* 752 * Ignore requests that have no data in them and are not 753 * the trailing requests in the stream of requests. 754 */ 755 if (!req->nbytes && !fini) 756 return 0; 757 758 mutex_lock(&actx->mutex); 759 760 rctx->fini = fini; 761 762 if (!actx->hot) { 763 actx->hot = 1; 764 rctx->init = 1; 765 } 766 767 spin_lock(&sdcp->lock[actx->chan]); 768 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 769 spin_unlock(&sdcp->lock[actx->chan]); 770 771 wake_up_process(sdcp->thread[actx->chan]); 772 mutex_unlock(&actx->mutex); 773 774 return ret; 775 } 776 777 static int dcp_sha_update(struct ahash_request *req) 778 { 779 return dcp_sha_update_fx(req, 0); 780 } 781 782 static int dcp_sha_final(struct ahash_request *req) 783 { 784 ahash_request_set_crypt(req, NULL, req->result, 0); 785 req->nbytes = 0; 786 return dcp_sha_update_fx(req, 1); 787 } 788 789 static int dcp_sha_finup(struct ahash_request *req) 790 { 791 return dcp_sha_update_fx(req, 1); 792 } 793 794 static int dcp_sha_digest(struct ahash_request *req) 795 { 796 int ret; 797 798 ret = dcp_sha_init(req); 799 if (ret) 800 return ret; 801 802 return dcp_sha_finup(req); 803 } 804 805 static int dcp_sha_import(struct ahash_request *req, const void *in) 806 { 807 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 808 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 809 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 810 const struct dcp_export_state *export = in; 811 812 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); 813 memset(actx, 0, sizeof(struct dcp_async_ctx)); 814 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); 815 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); 816 817 return 0; 818 } 819 820 static int dcp_sha_export(struct ahash_request *req, void *out) 821 { 822 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); 823 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 824 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); 825 struct dcp_export_state *export = out; 826 827 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); 828 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); 829 830 return 0; 831 } 832 833 static int dcp_sha_cra_init(struct crypto_tfm *tfm) 834 { 835 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 836 sizeof(struct dcp_sha_req_ctx)); 837 return 0; 838 } 839 840 static void dcp_sha_cra_exit(struct crypto_tfm *tfm) 841 { 842 } 843 844 /* AES 128 ECB and AES 128 CBC */ 845 static struct skcipher_alg dcp_aes_algs[] = { 846 { 847 .base.cra_name = "ecb(aes)", 848 .base.cra_driver_name = "ecb-aes-dcp", 849 .base.cra_priority = 400, 850 .base.cra_alignmask = 15, 851 .base.cra_flags = CRYPTO_ALG_ASYNC | 852 CRYPTO_ALG_NEED_FALLBACK, 853 .base.cra_blocksize = AES_BLOCK_SIZE, 854 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 855 .base.cra_module = THIS_MODULE, 856 857 .min_keysize = AES_MIN_KEY_SIZE, 858 .max_keysize = AES_MAX_KEY_SIZE, 859 .setkey = mxs_dcp_aes_setkey, 860 .encrypt = mxs_dcp_aes_ecb_encrypt, 861 .decrypt = mxs_dcp_aes_ecb_decrypt, 862 .init = mxs_dcp_aes_fallback_init_tfm, 863 .exit = mxs_dcp_aes_fallback_exit_tfm, 864 }, { 865 .base.cra_name = "cbc(aes)", 866 .base.cra_driver_name = "cbc-aes-dcp", 867 .base.cra_priority = 400, 868 .base.cra_alignmask = 15, 869 .base.cra_flags = CRYPTO_ALG_ASYNC | 870 CRYPTO_ALG_NEED_FALLBACK, 871 .base.cra_blocksize = AES_BLOCK_SIZE, 872 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 873 .base.cra_module = THIS_MODULE, 874 875 .min_keysize = AES_MIN_KEY_SIZE, 876 .max_keysize = AES_MAX_KEY_SIZE, 877 .setkey = mxs_dcp_aes_setkey, 878 .encrypt = mxs_dcp_aes_cbc_encrypt, 879 .decrypt = mxs_dcp_aes_cbc_decrypt, 880 .ivsize = AES_BLOCK_SIZE, 881 .init = mxs_dcp_aes_fallback_init_tfm, 882 .exit = mxs_dcp_aes_fallback_exit_tfm, 883 }, 884 }; 885 886 /* SHA1 */ 887 static struct ahash_alg dcp_sha1_alg = { 888 .init = dcp_sha_init, 889 .update = dcp_sha_update, 890 .final = dcp_sha_final, 891 .finup = dcp_sha_finup, 892 .digest = dcp_sha_digest, 893 .import = dcp_sha_import, 894 .export = dcp_sha_export, 895 .halg = { 896 .digestsize = SHA1_DIGEST_SIZE, 897 .statesize = sizeof(struct dcp_export_state), 898 .base = { 899 .cra_name = "sha1", 900 .cra_driver_name = "sha1-dcp", 901 .cra_priority = 400, 902 .cra_alignmask = 63, 903 .cra_flags = CRYPTO_ALG_ASYNC, 904 .cra_blocksize = SHA1_BLOCK_SIZE, 905 .cra_ctxsize = sizeof(struct dcp_async_ctx), 906 .cra_module = THIS_MODULE, 907 .cra_init = dcp_sha_cra_init, 908 .cra_exit = dcp_sha_cra_exit, 909 }, 910 }, 911 }; 912 913 /* SHA256 */ 914 static struct ahash_alg dcp_sha256_alg = { 915 .init = dcp_sha_init, 916 .update = dcp_sha_update, 917 .final = dcp_sha_final, 918 .finup = dcp_sha_finup, 919 .digest = dcp_sha_digest, 920 .import = dcp_sha_import, 921 .export = dcp_sha_export, 922 .halg = { 923 .digestsize = SHA256_DIGEST_SIZE, 924 .statesize = sizeof(struct dcp_export_state), 925 .base = { 926 .cra_name = "sha256", 927 .cra_driver_name = "sha256-dcp", 928 .cra_priority = 400, 929 .cra_alignmask = 63, 930 .cra_flags = CRYPTO_ALG_ASYNC, 931 .cra_blocksize = SHA256_BLOCK_SIZE, 932 .cra_ctxsize = sizeof(struct dcp_async_ctx), 933 .cra_module = THIS_MODULE, 934 .cra_init = dcp_sha_cra_init, 935 .cra_exit = dcp_sha_cra_exit, 936 }, 937 }, 938 }; 939 940 static irqreturn_t mxs_dcp_irq(int irq, void *context) 941 { 942 struct dcp *sdcp = context; 943 uint32_t stat; 944 int i; 945 946 stat = readl(sdcp->base + MXS_DCP_STAT); 947 stat &= MXS_DCP_STAT_IRQ_MASK; 948 if (!stat) 949 return IRQ_NONE; 950 951 /* Clear the interrupts. */ 952 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); 953 954 /* Complete the DMA requests that finished. */ 955 for (i = 0; i < DCP_MAX_CHANS; i++) 956 if (stat & (1 << i)) 957 complete(&sdcp->completion[i]); 958 959 return IRQ_HANDLED; 960 } 961 962 static int mxs_dcp_probe(struct platform_device *pdev) 963 { 964 struct device *dev = &pdev->dev; 965 struct dcp *sdcp = NULL; 966 int i, ret; 967 int dcp_vmi_irq, dcp_irq; 968 969 if (global_sdcp) { 970 dev_err(dev, "Only one DCP instance allowed!\n"); 971 return -ENODEV; 972 } 973 974 dcp_vmi_irq = platform_get_irq(pdev, 0); 975 if (dcp_vmi_irq < 0) 976 return dcp_vmi_irq; 977 978 dcp_irq = platform_get_irq(pdev, 1); 979 if (dcp_irq < 0) 980 return dcp_irq; 981 982 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 983 if (!sdcp) 984 return -ENOMEM; 985 986 sdcp->dev = dev; 987 sdcp->base = devm_platform_ioremap_resource(pdev, 0); 988 if (IS_ERR(sdcp->base)) 989 return PTR_ERR(sdcp->base); 990 991 992 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, 993 "dcp-vmi-irq", sdcp); 994 if (ret) { 995 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); 996 return ret; 997 } 998 999 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, 1000 "dcp-irq", sdcp); 1001 if (ret) { 1002 dev_err(dev, "Failed to claim DCP IRQ!\n"); 1003 return ret; 1004 } 1005 1006 /* Allocate coherent helper block. */ 1007 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, 1008 GFP_KERNEL); 1009 if (!sdcp->coh) 1010 return -ENOMEM; 1011 1012 /* Re-align the structure so it fits the DCP constraints. */ 1013 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); 1014 1015 /* DCP clock is optional, only used on some SOCs */ 1016 sdcp->dcp_clk = devm_clk_get(dev, "dcp"); 1017 if (IS_ERR(sdcp->dcp_clk)) { 1018 if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) 1019 return PTR_ERR(sdcp->dcp_clk); 1020 sdcp->dcp_clk = NULL; 1021 } 1022 ret = clk_prepare_enable(sdcp->dcp_clk); 1023 if (ret) 1024 return ret; 1025 1026 /* Restart the DCP block. */ 1027 ret = stmp_reset_block(sdcp->base); 1028 if (ret) { 1029 dev_err(dev, "Failed reset\n"); 1030 goto err_disable_unprepare_clk; 1031 } 1032 1033 /* Initialize control register. */ 1034 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | 1035 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, 1036 sdcp->base + MXS_DCP_CTRL); 1037 1038 /* Enable all DCP DMA channels. */ 1039 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, 1040 sdcp->base + MXS_DCP_CHANNELCTRL); 1041 1042 /* 1043 * We do not enable context switching. Give the context buffer a 1044 * pointer to an illegal address so if context switching is 1045 * inadvertantly enabled, the DCP will return an error instead of 1046 * trashing good memory. The DCP DMA cannot access ROM, so any ROM 1047 * address will do. 1048 */ 1049 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); 1050 for (i = 0; i < DCP_MAX_CHANS; i++) 1051 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); 1052 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); 1053 1054 global_sdcp = sdcp; 1055 1056 platform_set_drvdata(pdev, sdcp); 1057 1058 for (i = 0; i < DCP_MAX_CHANS; i++) { 1059 spin_lock_init(&sdcp->lock[i]); 1060 init_completion(&sdcp->completion[i]); 1061 crypto_init_queue(&sdcp->queue[i], 50); 1062 } 1063 1064 /* Create the SHA and AES handler threads. */ 1065 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, 1066 NULL, "mxs_dcp_chan/sha"); 1067 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { 1068 dev_err(dev, "Error starting SHA thread!\n"); 1069 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); 1070 goto err_disable_unprepare_clk; 1071 } 1072 1073 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, 1074 NULL, "mxs_dcp_chan/aes"); 1075 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { 1076 dev_err(dev, "Error starting SHA thread!\n"); 1077 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); 1078 goto err_destroy_sha_thread; 1079 } 1080 1081 /* Register the various crypto algorithms. */ 1082 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); 1083 1084 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { 1085 ret = crypto_register_skciphers(dcp_aes_algs, 1086 ARRAY_SIZE(dcp_aes_algs)); 1087 if (ret) { 1088 /* Failed to register algorithm. */ 1089 dev_err(dev, "Failed to register AES crypto!\n"); 1090 goto err_destroy_aes_thread; 1091 } 1092 } 1093 1094 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { 1095 ret = crypto_register_ahash(&dcp_sha1_alg); 1096 if (ret) { 1097 dev_err(dev, "Failed to register %s hash!\n", 1098 dcp_sha1_alg.halg.base.cra_name); 1099 goto err_unregister_aes; 1100 } 1101 } 1102 1103 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { 1104 ret = crypto_register_ahash(&dcp_sha256_alg); 1105 if (ret) { 1106 dev_err(dev, "Failed to register %s hash!\n", 1107 dcp_sha256_alg.halg.base.cra_name); 1108 goto err_unregister_sha1; 1109 } 1110 } 1111 1112 return 0; 1113 1114 err_unregister_sha1: 1115 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1116 crypto_unregister_ahash(&dcp_sha1_alg); 1117 1118 err_unregister_aes: 1119 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1120 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1121 1122 err_destroy_aes_thread: 1123 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1124 1125 err_destroy_sha_thread: 1126 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1127 1128 err_disable_unprepare_clk: 1129 clk_disable_unprepare(sdcp->dcp_clk); 1130 1131 return ret; 1132 } 1133 1134 static int mxs_dcp_remove(struct platform_device *pdev) 1135 { 1136 struct dcp *sdcp = platform_get_drvdata(pdev); 1137 1138 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) 1139 crypto_unregister_ahash(&dcp_sha256_alg); 1140 1141 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1142 crypto_unregister_ahash(&dcp_sha1_alg); 1143 1144 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1145 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1146 1147 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1148 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1149 1150 clk_disable_unprepare(sdcp->dcp_clk); 1151 1152 platform_set_drvdata(pdev, NULL); 1153 1154 global_sdcp = NULL; 1155 1156 return 0; 1157 } 1158 1159 static const struct of_device_id mxs_dcp_dt_ids[] = { 1160 { .compatible = "fsl,imx23-dcp", .data = NULL, }, 1161 { .compatible = "fsl,imx28-dcp", .data = NULL, }, 1162 { /* sentinel */ } 1163 }; 1164 1165 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); 1166 1167 static struct platform_driver mxs_dcp_driver = { 1168 .probe = mxs_dcp_probe, 1169 .remove = mxs_dcp_remove, 1170 .driver = { 1171 .name = "mxs-dcp", 1172 .of_match_table = mxs_dcp_dt_ids, 1173 }, 1174 }; 1175 1176 module_platform_driver(mxs_dcp_driver); 1177 1178 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 1179 MODULE_DESCRIPTION("Freescale MXS DCP Driver"); 1180 MODULE_LICENSE("GPL"); 1181 MODULE_ALIAS("platform:mxs-dcp"); 1182