1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale i.MX23/i.MX28 Data Co-Processor driver 4 * 5 * Copyright (C) 2013 Marek Vasut <marex@denx.de> 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/stmp_device.h> 17 #include <linux/clk.h> 18 #include <soc/fsl/dcp.h> 19 20 #include <crypto/aes.h> 21 #include <crypto/sha1.h> 22 #include <crypto/sha2.h> 23 #include <crypto/internal/hash.h> 24 #include <crypto/internal/skcipher.h> 25 #include <crypto/scatterwalk.h> 26 27 #define DCP_MAX_CHANS 4 28 #define DCP_BUF_SZ PAGE_SIZE 29 #define DCP_SHA_PAY_SZ 64 30 31 #define DCP_ALIGNMENT 64 32 33 /* 34 * Null hashes to align with hw behavior on imx6sl and ull 35 * these are flipped for consistency with hw output 36 */ 37 static const uint8_t sha1_null_hash[] = 38 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" 39 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; 40 41 static const uint8_t sha256_null_hash[] = 42 "\x55\xb8\x52\x78\x1b\x99\x95\xa4" 43 "\x4c\x93\x9b\x64\xe4\x41\xae\x27" 44 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" 45 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; 46 47 /* DCP DMA descriptor. */ 48 struct dcp_dma_desc { 49 uint32_t next_cmd_addr; 50 uint32_t control0; 51 uint32_t control1; 52 uint32_t source; 53 uint32_t destination; 54 uint32_t size; 55 uint32_t payload; 56 uint32_t status; 57 }; 58 59 /* Coherent aligned block for bounce buffering. */ 60 struct dcp_coherent_block { 61 uint8_t aes_in_buf[DCP_BUF_SZ]; 62 uint8_t aes_out_buf[DCP_BUF_SZ]; 63 uint8_t sha_in_buf[DCP_BUF_SZ]; 64 uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; 65 66 uint8_t aes_key[2 * AES_KEYSIZE_128]; 67 68 struct dcp_dma_desc desc[DCP_MAX_CHANS]; 69 }; 70 71 struct dcp { 72 struct device *dev; 73 void __iomem *base; 74 75 uint32_t caps; 76 77 struct dcp_coherent_block *coh; 78 79 struct completion completion[DCP_MAX_CHANS]; 80 spinlock_t lock[DCP_MAX_CHANS]; 81 struct task_struct *thread[DCP_MAX_CHANS]; 82 struct crypto_queue queue[DCP_MAX_CHANS]; 83 struct clk *dcp_clk; 84 }; 85 86 enum dcp_chan { 87 DCP_CHAN_HASH_SHA = 0, 88 DCP_CHAN_CRYPTO = 2, 89 }; 90 91 struct dcp_async_ctx { 92 /* Common context */ 93 enum dcp_chan chan; 94 uint32_t fill; 95 96 /* SHA Hash-specific context */ 97 struct mutex mutex; 98 uint32_t alg; 99 unsigned int hot:1; 100 101 /* Crypto-specific context */ 102 struct crypto_skcipher *fallback; 103 unsigned int key_len; 104 uint8_t key[AES_KEYSIZE_128]; 105 bool key_referenced; 106 }; 107 108 struct dcp_aes_req_ctx { 109 unsigned int enc:1; 110 unsigned int ecb:1; 111 struct skcipher_request fallback_req; // keep at the end 112 }; 113 114 struct dcp_sha_req_ctx { 115 unsigned int init:1; 116 unsigned int fini:1; 117 }; 118 119 struct dcp_export_state { 120 struct dcp_sha_req_ctx req_ctx; 121 struct dcp_async_ctx async_ctx; 122 }; 123 124 /* 125 * There can even be only one instance of the MXS DCP due to the 126 * design of Linux Crypto API. 127 */ 128 static struct dcp *global_sdcp; 129 130 /* DCP register layout. */ 131 #define MXS_DCP_CTRL 0x00 132 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) 133 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) 134 135 #define MXS_DCP_STAT 0x10 136 #define MXS_DCP_STAT_CLR 0x18 137 #define MXS_DCP_STAT_IRQ_MASK 0xf 138 139 #define MXS_DCP_CHANNELCTRL 0x20 140 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff 141 142 #define MXS_DCP_CAPABILITY1 0x40 143 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) 144 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) 145 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) 146 147 #define MXS_DCP_CONTEXT 0x50 148 149 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) 150 151 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) 152 153 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) 154 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) 155 156 /* DMA descriptor bits. */ 157 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) 158 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) 159 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) 160 #define MXS_DCP_CONTROL0_OTP_KEY (1 << 10) 161 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) 162 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) 163 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) 164 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) 165 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) 166 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) 167 168 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) 169 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) 170 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) 171 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) 172 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) 173 174 #define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8 175 176 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) 177 { 178 int dma_err; 179 struct dcp *sdcp = global_sdcp; 180 const int chan = actx->chan; 181 uint32_t stat; 182 unsigned long ret; 183 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 184 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), 185 DMA_TO_DEVICE); 186 187 dma_err = dma_mapping_error(sdcp->dev, desc_phys); 188 if (dma_err) 189 return dma_err; 190 191 reinit_completion(&sdcp->completion[chan]); 192 193 /* Clear status register. */ 194 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); 195 196 /* Load the DMA descriptor. */ 197 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); 198 199 /* Increment the semaphore to start the DMA transfer. */ 200 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); 201 202 ret = wait_for_completion_timeout(&sdcp->completion[chan], 203 msecs_to_jiffies(1000)); 204 if (!ret) { 205 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", 206 chan, readl(sdcp->base + MXS_DCP_STAT)); 207 return -ETIMEDOUT; 208 } 209 210 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); 211 if (stat & 0xff) { 212 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", 213 chan, stat); 214 return -EINVAL; 215 } 216 217 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); 218 219 return 0; 220 } 221 222 /* 223 * Encryption (AES128) 224 */ 225 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, 226 struct skcipher_request *req, int init) 227 { 228 dma_addr_t key_phys = 0; 229 dma_addr_t src_phys, dst_phys; 230 struct dcp *sdcp = global_sdcp; 231 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 232 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 233 bool key_referenced = actx->key_referenced; 234 int ret; 235 236 if (!key_referenced) { 237 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, 238 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); 239 ret = dma_mapping_error(sdcp->dev, key_phys); 240 if (ret) 241 return ret; 242 } 243 244 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, 245 DCP_BUF_SZ, DMA_TO_DEVICE); 246 ret = dma_mapping_error(sdcp->dev, src_phys); 247 if (ret) 248 goto err_src; 249 250 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, 251 DCP_BUF_SZ, DMA_FROM_DEVICE); 252 ret = dma_mapping_error(sdcp->dev, dst_phys); 253 if (ret) 254 goto err_dst; 255 256 if (actx->fill % AES_BLOCK_SIZE) { 257 dev_err(sdcp->dev, "Invalid block size!\n"); 258 ret = -EINVAL; 259 goto aes_done_run; 260 } 261 262 /* Fill in the DMA descriptor. */ 263 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 264 MXS_DCP_CONTROL0_INTERRUPT | 265 MXS_DCP_CONTROL0_ENABLE_CIPHER; 266 267 if (key_referenced) 268 /* Set OTP key bit to select the key via KEY_SELECT. */ 269 desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; 270 else 271 /* Payload contains the key. */ 272 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; 273 274 if (rctx->enc) 275 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; 276 if (init) 277 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; 278 279 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; 280 281 if (rctx->ecb) 282 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; 283 else 284 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; 285 286 if (key_referenced) 287 desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT; 288 289 desc->next_cmd_addr = 0; 290 desc->source = src_phys; 291 desc->destination = dst_phys; 292 desc->size = actx->fill; 293 desc->payload = key_phys; 294 desc->status = 0; 295 296 ret = mxs_dcp_start_dma(actx); 297 298 aes_done_run: 299 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); 300 err_dst: 301 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 302 err_src: 303 if (!key_referenced) 304 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, 305 DMA_TO_DEVICE); 306 return ret; 307 } 308 309 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) 310 { 311 struct dcp *sdcp = global_sdcp; 312 313 struct skcipher_request *req = skcipher_request_cast(arq); 314 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 315 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 316 317 struct scatterlist *dst = req->dst; 318 struct scatterlist *src = req->src; 319 int dst_nents = sg_nents(dst); 320 321 const int out_off = DCP_BUF_SZ; 322 uint8_t *in_buf = sdcp->coh->aes_in_buf; 323 uint8_t *out_buf = sdcp->coh->aes_out_buf; 324 325 uint32_t dst_off = 0; 326 uint8_t *src_buf = NULL; 327 uint32_t last_out_len = 0; 328 329 uint8_t *key = sdcp->coh->aes_key; 330 331 int ret = 0; 332 unsigned int i, len, clen, tlen = 0; 333 int init = 0; 334 bool limit_hit = false; 335 336 actx->fill = 0; 337 338 /* Copy the key from the temporary location. */ 339 memcpy(key, actx->key, actx->key_len); 340 341 if (!rctx->ecb) { 342 /* Copy the CBC IV just past the key. */ 343 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); 344 /* CBC needs the INIT set. */ 345 init = 1; 346 } else { 347 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); 348 } 349 350 for_each_sg(req->src, src, sg_nents(req->src), i) { 351 src_buf = sg_virt(src); 352 len = sg_dma_len(src); 353 tlen += len; 354 limit_hit = tlen > req->cryptlen; 355 356 if (limit_hit) 357 len = req->cryptlen - (tlen - len); 358 359 do { 360 if (actx->fill + len > out_off) 361 clen = out_off - actx->fill; 362 else 363 clen = len; 364 365 memcpy(in_buf + actx->fill, src_buf, clen); 366 len -= clen; 367 src_buf += clen; 368 actx->fill += clen; 369 370 /* 371 * If we filled the buffer or this is the last SG, 372 * submit the buffer. 373 */ 374 if (actx->fill == out_off || sg_is_last(src) || 375 limit_hit) { 376 ret = mxs_dcp_run_aes(actx, req, init); 377 if (ret) 378 return ret; 379 init = 0; 380 381 sg_pcopy_from_buffer(dst, dst_nents, out_buf, 382 actx->fill, dst_off); 383 dst_off += actx->fill; 384 last_out_len = actx->fill; 385 actx->fill = 0; 386 } 387 } while (len); 388 389 if (limit_hit) 390 break; 391 } 392 393 /* Copy the IV for CBC for chaining */ 394 if (!rctx->ecb) { 395 if (rctx->enc) 396 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), 397 AES_BLOCK_SIZE); 398 else 399 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), 400 AES_BLOCK_SIZE); 401 } 402 403 return ret; 404 } 405 406 static int dcp_chan_thread_aes(void *data) 407 { 408 struct dcp *sdcp = global_sdcp; 409 const int chan = DCP_CHAN_CRYPTO; 410 411 struct crypto_async_request *backlog; 412 struct crypto_async_request *arq; 413 414 int ret; 415 416 while (!kthread_should_stop()) { 417 set_current_state(TASK_INTERRUPTIBLE); 418 419 spin_lock(&sdcp->lock[chan]); 420 backlog = crypto_get_backlog(&sdcp->queue[chan]); 421 arq = crypto_dequeue_request(&sdcp->queue[chan]); 422 spin_unlock(&sdcp->lock[chan]); 423 424 if (!backlog && !arq) { 425 schedule(); 426 continue; 427 } 428 429 set_current_state(TASK_RUNNING); 430 431 if (backlog) 432 crypto_request_complete(backlog, -EINPROGRESS); 433 434 if (arq) { 435 ret = mxs_dcp_aes_block_crypt(arq); 436 crypto_request_complete(arq, ret); 437 } 438 } 439 440 return 0; 441 } 442 443 static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) 444 { 445 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 446 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 447 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); 448 int ret; 449 450 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); 451 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, 452 req->base.complete, req->base.data); 453 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, 454 req->cryptlen, req->iv); 455 456 if (enc) 457 ret = crypto_skcipher_encrypt(&rctx->fallback_req); 458 else 459 ret = crypto_skcipher_decrypt(&rctx->fallback_req); 460 461 return ret; 462 } 463 464 static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) 465 { 466 struct dcp *sdcp = global_sdcp; 467 struct crypto_async_request *arq = &req->base; 468 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 469 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 470 int ret; 471 472 if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced)) 473 return mxs_dcp_block_fallback(req, enc); 474 475 rctx->enc = enc; 476 rctx->ecb = ecb; 477 actx->chan = DCP_CHAN_CRYPTO; 478 479 spin_lock(&sdcp->lock[actx->chan]); 480 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 481 spin_unlock(&sdcp->lock[actx->chan]); 482 483 wake_up_process(sdcp->thread[actx->chan]); 484 485 return ret; 486 } 487 488 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) 489 { 490 return mxs_dcp_aes_enqueue(req, 0, 1); 491 } 492 493 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) 494 { 495 return mxs_dcp_aes_enqueue(req, 1, 1); 496 } 497 498 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) 499 { 500 return mxs_dcp_aes_enqueue(req, 0, 0); 501 } 502 503 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) 504 { 505 return mxs_dcp_aes_enqueue(req, 1, 0); 506 } 507 508 static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 509 unsigned int len) 510 { 511 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 512 513 /* 514 * AES 128 is supposed by the hardware, store key into temporary 515 * buffer and exit. We must use the temporary buffer here, since 516 * there can still be an operation in progress. 517 */ 518 actx->key_len = len; 519 actx->key_referenced = false; 520 if (len == AES_KEYSIZE_128) { 521 memcpy(actx->key, key, len); 522 return 0; 523 } 524 525 /* 526 * If the requested AES key size is not supported by the hardware, 527 * but is supported by in-kernel software implementation, we use 528 * software fallback. 529 */ 530 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 531 crypto_skcipher_set_flags(actx->fallback, 532 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 533 return crypto_skcipher_setkey(actx->fallback, key, len); 534 } 535 536 static int mxs_dcp_aes_setrefkey(struct crypto_skcipher *tfm, const u8 *key, 537 unsigned int len) 538 { 539 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 540 541 if (len != DCP_PAES_KEYSIZE) 542 return -EINVAL; 543 544 switch (key[0]) { 545 case DCP_PAES_KEY_SLOT0: 546 case DCP_PAES_KEY_SLOT1: 547 case DCP_PAES_KEY_SLOT2: 548 case DCP_PAES_KEY_SLOT3: 549 case DCP_PAES_KEY_UNIQUE: 550 case DCP_PAES_KEY_OTP: 551 memcpy(actx->key, key, len); 552 actx->key_len = len; 553 actx->key_referenced = true; 554 break; 555 default: 556 return -EINVAL; 557 } 558 559 return 0; 560 } 561 562 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) 563 { 564 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); 565 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 566 struct crypto_skcipher *blk; 567 568 blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 569 if (IS_ERR(blk)) 570 return PTR_ERR(blk); 571 572 actx->fallback = blk; 573 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + 574 crypto_skcipher_reqsize(blk)); 575 return 0; 576 } 577 578 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) 579 { 580 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 581 582 crypto_free_skcipher(actx->fallback); 583 } 584 585 static int mxs_dcp_paes_init_tfm(struct crypto_skcipher *tfm) 586 { 587 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); 588 589 return 0; 590 } 591 592 /* 593 * Hashing (SHA1/SHA256) 594 */ 595 static int mxs_dcp_run_sha(struct ahash_request *req) 596 { 597 struct dcp *sdcp = global_sdcp; 598 int ret; 599 600 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 601 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 602 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 603 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 604 605 dma_addr_t digest_phys = 0; 606 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, 607 DCP_BUF_SZ, DMA_TO_DEVICE); 608 609 ret = dma_mapping_error(sdcp->dev, buf_phys); 610 if (ret) 611 return ret; 612 613 /* Fill in the DMA descriptor. */ 614 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 615 MXS_DCP_CONTROL0_INTERRUPT | 616 MXS_DCP_CONTROL0_ENABLE_HASH; 617 if (rctx->init) 618 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; 619 620 desc->control1 = actx->alg; 621 desc->next_cmd_addr = 0; 622 desc->source = buf_phys; 623 desc->destination = 0; 624 desc->size = actx->fill; 625 desc->payload = 0; 626 desc->status = 0; 627 628 /* 629 * Align driver with hw behavior when generating null hashes 630 */ 631 if (rctx->init && rctx->fini && desc->size == 0) { 632 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 633 const uint8_t *sha_buf = 634 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? 635 sha1_null_hash : sha256_null_hash; 636 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); 637 ret = 0; 638 goto done_run; 639 } 640 641 /* Set HASH_TERM bit for last transfer block. */ 642 if (rctx->fini) { 643 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, 644 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); 645 ret = dma_mapping_error(sdcp->dev, digest_phys); 646 if (ret) 647 goto done_run; 648 649 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; 650 desc->payload = digest_phys; 651 } 652 653 ret = mxs_dcp_start_dma(actx); 654 655 if (rctx->fini) 656 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, 657 DMA_FROM_DEVICE); 658 659 done_run: 660 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 661 662 return ret; 663 } 664 665 static int dcp_sha_req_to_buf(struct crypto_async_request *arq) 666 { 667 struct dcp *sdcp = global_sdcp; 668 669 struct ahash_request *req = ahash_request_cast(arq); 670 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 671 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 672 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 673 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 674 675 uint8_t *in_buf = sdcp->coh->sha_in_buf; 676 uint8_t *out_buf = sdcp->coh->sha_out_buf; 677 678 struct scatterlist *src; 679 680 unsigned int i, len, clen, oft = 0; 681 int ret; 682 683 int fin = rctx->fini; 684 if (fin) 685 rctx->fini = 0; 686 687 src = req->src; 688 len = req->nbytes; 689 690 while (len) { 691 if (actx->fill + len > DCP_BUF_SZ) 692 clen = DCP_BUF_SZ - actx->fill; 693 else 694 clen = len; 695 696 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, 697 0); 698 699 len -= clen; 700 oft += clen; 701 actx->fill += clen; 702 703 /* 704 * If we filled the buffer and still have some 705 * more data, submit the buffer. 706 */ 707 if (len && actx->fill == DCP_BUF_SZ) { 708 ret = mxs_dcp_run_sha(req); 709 if (ret) 710 return ret; 711 actx->fill = 0; 712 rctx->init = 0; 713 } 714 } 715 716 if (fin) { 717 rctx->fini = 1; 718 719 /* Submit whatever is left. */ 720 if (!req->result) 721 return -EINVAL; 722 723 ret = mxs_dcp_run_sha(req); 724 if (ret) 725 return ret; 726 727 actx->fill = 0; 728 729 /* For some reason the result is flipped */ 730 for (i = 0; i < halg->digestsize; i++) 731 req->result[i] = out_buf[halg->digestsize - i - 1]; 732 } 733 734 return 0; 735 } 736 737 static int dcp_chan_thread_sha(void *data) 738 { 739 struct dcp *sdcp = global_sdcp; 740 const int chan = DCP_CHAN_HASH_SHA; 741 742 struct crypto_async_request *backlog; 743 struct crypto_async_request *arq; 744 int ret; 745 746 while (!kthread_should_stop()) { 747 set_current_state(TASK_INTERRUPTIBLE); 748 749 spin_lock(&sdcp->lock[chan]); 750 backlog = crypto_get_backlog(&sdcp->queue[chan]); 751 arq = crypto_dequeue_request(&sdcp->queue[chan]); 752 spin_unlock(&sdcp->lock[chan]); 753 754 if (!backlog && !arq) { 755 schedule(); 756 continue; 757 } 758 759 set_current_state(TASK_RUNNING); 760 761 if (backlog) 762 crypto_request_complete(backlog, -EINPROGRESS); 763 764 if (arq) { 765 ret = dcp_sha_req_to_buf(arq); 766 crypto_request_complete(arq, ret); 767 } 768 } 769 770 return 0; 771 } 772 773 static int dcp_sha_init(struct ahash_request *req) 774 { 775 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 776 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 777 778 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 779 780 /* 781 * Start hashing session. The code below only inits the 782 * hashing session context, nothing more. 783 */ 784 memset(actx, 0, sizeof(*actx)); 785 786 if (strcmp(halg->base.cra_name, "sha1") == 0) 787 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; 788 else 789 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; 790 791 actx->fill = 0; 792 actx->hot = 0; 793 actx->chan = DCP_CHAN_HASH_SHA; 794 795 mutex_init(&actx->mutex); 796 797 return 0; 798 } 799 800 static int dcp_sha_update_fx(struct ahash_request *req, int fini) 801 { 802 struct dcp *sdcp = global_sdcp; 803 804 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 805 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 806 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 807 808 int ret; 809 810 /* 811 * Ignore requests that have no data in them and are not 812 * the trailing requests in the stream of requests. 813 */ 814 if (!req->nbytes && !fini) 815 return 0; 816 817 mutex_lock(&actx->mutex); 818 819 rctx->fini = fini; 820 821 if (!actx->hot) { 822 actx->hot = 1; 823 rctx->init = 1; 824 } 825 826 spin_lock(&sdcp->lock[actx->chan]); 827 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 828 spin_unlock(&sdcp->lock[actx->chan]); 829 830 wake_up_process(sdcp->thread[actx->chan]); 831 mutex_unlock(&actx->mutex); 832 833 return ret; 834 } 835 836 static int dcp_sha_update(struct ahash_request *req) 837 { 838 return dcp_sha_update_fx(req, 0); 839 } 840 841 static int dcp_sha_final(struct ahash_request *req) 842 { 843 ahash_request_set_crypt(req, NULL, req->result, 0); 844 req->nbytes = 0; 845 return dcp_sha_update_fx(req, 1); 846 } 847 848 static int dcp_sha_finup(struct ahash_request *req) 849 { 850 return dcp_sha_update_fx(req, 1); 851 } 852 853 static int dcp_sha_digest(struct ahash_request *req) 854 { 855 int ret; 856 857 ret = dcp_sha_init(req); 858 if (ret) 859 return ret; 860 861 return dcp_sha_finup(req); 862 } 863 864 static int dcp_sha_import(struct ahash_request *req, const void *in) 865 { 866 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 867 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 868 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 869 const struct dcp_export_state *export = in; 870 871 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); 872 memset(actx, 0, sizeof(struct dcp_async_ctx)); 873 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); 874 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); 875 876 return 0; 877 } 878 879 static int dcp_sha_export(struct ahash_request *req, void *out) 880 { 881 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); 882 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 883 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); 884 struct dcp_export_state *export = out; 885 886 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); 887 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); 888 889 return 0; 890 } 891 892 static int dcp_sha_cra_init(struct crypto_tfm *tfm) 893 { 894 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 895 sizeof(struct dcp_sha_req_ctx)); 896 return 0; 897 } 898 899 static void dcp_sha_cra_exit(struct crypto_tfm *tfm) 900 { 901 } 902 903 /* AES 128 ECB and AES 128 CBC */ 904 static struct skcipher_alg dcp_aes_algs[] = { 905 { 906 .base.cra_name = "ecb(aes)", 907 .base.cra_driver_name = "ecb-aes-dcp", 908 .base.cra_priority = 400, 909 .base.cra_alignmask = 15, 910 .base.cra_flags = CRYPTO_ALG_ASYNC | 911 CRYPTO_ALG_NEED_FALLBACK, 912 .base.cra_blocksize = AES_BLOCK_SIZE, 913 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 914 .base.cra_module = THIS_MODULE, 915 916 .min_keysize = AES_MIN_KEY_SIZE, 917 .max_keysize = AES_MAX_KEY_SIZE, 918 .setkey = mxs_dcp_aes_setkey, 919 .encrypt = mxs_dcp_aes_ecb_encrypt, 920 .decrypt = mxs_dcp_aes_ecb_decrypt, 921 .init = mxs_dcp_aes_fallback_init_tfm, 922 .exit = mxs_dcp_aes_fallback_exit_tfm, 923 }, { 924 .base.cra_name = "cbc(aes)", 925 .base.cra_driver_name = "cbc-aes-dcp", 926 .base.cra_priority = 400, 927 .base.cra_alignmask = 15, 928 .base.cra_flags = CRYPTO_ALG_ASYNC | 929 CRYPTO_ALG_NEED_FALLBACK, 930 .base.cra_blocksize = AES_BLOCK_SIZE, 931 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 932 .base.cra_module = THIS_MODULE, 933 934 .min_keysize = AES_MIN_KEY_SIZE, 935 .max_keysize = AES_MAX_KEY_SIZE, 936 .setkey = mxs_dcp_aes_setkey, 937 .encrypt = mxs_dcp_aes_cbc_encrypt, 938 .decrypt = mxs_dcp_aes_cbc_decrypt, 939 .ivsize = AES_BLOCK_SIZE, 940 .init = mxs_dcp_aes_fallback_init_tfm, 941 .exit = mxs_dcp_aes_fallback_exit_tfm, 942 }, { 943 .base.cra_name = "ecb(paes)", 944 .base.cra_driver_name = "ecb-paes-dcp", 945 .base.cra_priority = 401, 946 .base.cra_alignmask = 15, 947 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, 948 .base.cra_blocksize = AES_BLOCK_SIZE, 949 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 950 .base.cra_module = THIS_MODULE, 951 952 .min_keysize = DCP_PAES_KEYSIZE, 953 .max_keysize = DCP_PAES_KEYSIZE, 954 .setkey = mxs_dcp_aes_setrefkey, 955 .encrypt = mxs_dcp_aes_ecb_encrypt, 956 .decrypt = mxs_dcp_aes_ecb_decrypt, 957 .init = mxs_dcp_paes_init_tfm, 958 }, { 959 .base.cra_name = "cbc(paes)", 960 .base.cra_driver_name = "cbc-paes-dcp", 961 .base.cra_priority = 401, 962 .base.cra_alignmask = 15, 963 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, 964 .base.cra_blocksize = AES_BLOCK_SIZE, 965 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 966 .base.cra_module = THIS_MODULE, 967 968 .min_keysize = DCP_PAES_KEYSIZE, 969 .max_keysize = DCP_PAES_KEYSIZE, 970 .setkey = mxs_dcp_aes_setrefkey, 971 .encrypt = mxs_dcp_aes_cbc_encrypt, 972 .decrypt = mxs_dcp_aes_cbc_decrypt, 973 .ivsize = AES_BLOCK_SIZE, 974 .init = mxs_dcp_paes_init_tfm, 975 }, 976 }; 977 978 /* SHA1 */ 979 static struct ahash_alg dcp_sha1_alg = { 980 .init = dcp_sha_init, 981 .update = dcp_sha_update, 982 .final = dcp_sha_final, 983 .finup = dcp_sha_finup, 984 .digest = dcp_sha_digest, 985 .import = dcp_sha_import, 986 .export = dcp_sha_export, 987 .halg = { 988 .digestsize = SHA1_DIGEST_SIZE, 989 .statesize = sizeof(struct dcp_export_state), 990 .base = { 991 .cra_name = "sha1", 992 .cra_driver_name = "sha1-dcp", 993 .cra_priority = 400, 994 .cra_flags = CRYPTO_ALG_ASYNC, 995 .cra_blocksize = SHA1_BLOCK_SIZE, 996 .cra_ctxsize = sizeof(struct dcp_async_ctx), 997 .cra_module = THIS_MODULE, 998 .cra_init = dcp_sha_cra_init, 999 .cra_exit = dcp_sha_cra_exit, 1000 }, 1001 }, 1002 }; 1003 1004 /* SHA256 */ 1005 static struct ahash_alg dcp_sha256_alg = { 1006 .init = dcp_sha_init, 1007 .update = dcp_sha_update, 1008 .final = dcp_sha_final, 1009 .finup = dcp_sha_finup, 1010 .digest = dcp_sha_digest, 1011 .import = dcp_sha_import, 1012 .export = dcp_sha_export, 1013 .halg = { 1014 .digestsize = SHA256_DIGEST_SIZE, 1015 .statesize = sizeof(struct dcp_export_state), 1016 .base = { 1017 .cra_name = "sha256", 1018 .cra_driver_name = "sha256-dcp", 1019 .cra_priority = 400, 1020 .cra_flags = CRYPTO_ALG_ASYNC, 1021 .cra_blocksize = SHA256_BLOCK_SIZE, 1022 .cra_ctxsize = sizeof(struct dcp_async_ctx), 1023 .cra_module = THIS_MODULE, 1024 .cra_init = dcp_sha_cra_init, 1025 .cra_exit = dcp_sha_cra_exit, 1026 }, 1027 }, 1028 }; 1029 1030 static irqreturn_t mxs_dcp_irq(int irq, void *context) 1031 { 1032 struct dcp *sdcp = context; 1033 uint32_t stat; 1034 int i; 1035 1036 stat = readl(sdcp->base + MXS_DCP_STAT); 1037 stat &= MXS_DCP_STAT_IRQ_MASK; 1038 if (!stat) 1039 return IRQ_NONE; 1040 1041 /* Clear the interrupts. */ 1042 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); 1043 1044 /* Complete the DMA requests that finished. */ 1045 for (i = 0; i < DCP_MAX_CHANS; i++) 1046 if (stat & (1 << i)) 1047 complete(&sdcp->completion[i]); 1048 1049 return IRQ_HANDLED; 1050 } 1051 1052 static int mxs_dcp_probe(struct platform_device *pdev) 1053 { 1054 struct device *dev = &pdev->dev; 1055 struct dcp *sdcp = NULL; 1056 int i, ret; 1057 int dcp_vmi_irq, dcp_irq; 1058 1059 if (global_sdcp) { 1060 dev_err(dev, "Only one DCP instance allowed!\n"); 1061 return -ENODEV; 1062 } 1063 1064 dcp_vmi_irq = platform_get_irq(pdev, 0); 1065 if (dcp_vmi_irq < 0) 1066 return dcp_vmi_irq; 1067 1068 dcp_irq = platform_get_irq(pdev, 1); 1069 if (dcp_irq < 0) 1070 return dcp_irq; 1071 1072 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 1073 if (!sdcp) 1074 return -ENOMEM; 1075 1076 sdcp->dev = dev; 1077 sdcp->base = devm_platform_ioremap_resource(pdev, 0); 1078 if (IS_ERR(sdcp->base)) 1079 return PTR_ERR(sdcp->base); 1080 1081 1082 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, 1083 "dcp-vmi-irq", sdcp); 1084 if (ret) { 1085 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); 1086 return ret; 1087 } 1088 1089 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, 1090 "dcp-irq", sdcp); 1091 if (ret) { 1092 dev_err(dev, "Failed to claim DCP IRQ!\n"); 1093 return ret; 1094 } 1095 1096 /* Allocate coherent helper block. */ 1097 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, 1098 GFP_KERNEL); 1099 if (!sdcp->coh) 1100 return -ENOMEM; 1101 1102 /* Re-align the structure so it fits the DCP constraints. */ 1103 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); 1104 1105 /* DCP clock is optional, only used on some SOCs */ 1106 sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp"); 1107 if (IS_ERR(sdcp->dcp_clk)) 1108 return PTR_ERR(sdcp->dcp_clk); 1109 1110 /* Restart the DCP block. */ 1111 ret = stmp_reset_block(sdcp->base); 1112 if (ret) { 1113 dev_err(dev, "Failed reset\n"); 1114 return ret; 1115 } 1116 1117 /* Initialize control register. */ 1118 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | 1119 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, 1120 sdcp->base + MXS_DCP_CTRL); 1121 1122 /* Enable all DCP DMA channels. */ 1123 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, 1124 sdcp->base + MXS_DCP_CHANNELCTRL); 1125 1126 /* 1127 * We do not enable context switching. Give the context buffer a 1128 * pointer to an illegal address so if context switching is 1129 * inadvertantly enabled, the DCP will return an error instead of 1130 * trashing good memory. The DCP DMA cannot access ROM, so any ROM 1131 * address will do. 1132 */ 1133 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); 1134 for (i = 0; i < DCP_MAX_CHANS; i++) 1135 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); 1136 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); 1137 1138 global_sdcp = sdcp; 1139 1140 platform_set_drvdata(pdev, sdcp); 1141 1142 for (i = 0; i < DCP_MAX_CHANS; i++) { 1143 spin_lock_init(&sdcp->lock[i]); 1144 init_completion(&sdcp->completion[i]); 1145 crypto_init_queue(&sdcp->queue[i], 50); 1146 } 1147 1148 /* Create the SHA and AES handler threads. */ 1149 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, 1150 NULL, "mxs_dcp_chan/sha"); 1151 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { 1152 dev_err(dev, "Error starting SHA thread!\n"); 1153 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); 1154 return ret; 1155 } 1156 1157 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, 1158 NULL, "mxs_dcp_chan/aes"); 1159 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { 1160 dev_err(dev, "Error starting SHA thread!\n"); 1161 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); 1162 goto err_destroy_sha_thread; 1163 } 1164 1165 /* Register the various crypto algorithms. */ 1166 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); 1167 1168 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { 1169 ret = crypto_register_skciphers(dcp_aes_algs, 1170 ARRAY_SIZE(dcp_aes_algs)); 1171 if (ret) { 1172 /* Failed to register algorithm. */ 1173 dev_err(dev, "Failed to register AES crypto!\n"); 1174 goto err_destroy_aes_thread; 1175 } 1176 } 1177 1178 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { 1179 ret = crypto_register_ahash(&dcp_sha1_alg); 1180 if (ret) { 1181 dev_err(dev, "Failed to register %s hash!\n", 1182 dcp_sha1_alg.halg.base.cra_name); 1183 goto err_unregister_aes; 1184 } 1185 } 1186 1187 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { 1188 ret = crypto_register_ahash(&dcp_sha256_alg); 1189 if (ret) { 1190 dev_err(dev, "Failed to register %s hash!\n", 1191 dcp_sha256_alg.halg.base.cra_name); 1192 goto err_unregister_sha1; 1193 } 1194 } 1195 1196 return 0; 1197 1198 err_unregister_sha1: 1199 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1200 crypto_unregister_ahash(&dcp_sha1_alg); 1201 1202 err_unregister_aes: 1203 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1204 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1205 1206 err_destroy_aes_thread: 1207 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1208 1209 err_destroy_sha_thread: 1210 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1211 1212 return ret; 1213 } 1214 1215 static void mxs_dcp_remove(struct platform_device *pdev) 1216 { 1217 struct dcp *sdcp = platform_get_drvdata(pdev); 1218 1219 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) 1220 crypto_unregister_ahash(&dcp_sha256_alg); 1221 1222 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1223 crypto_unregister_ahash(&dcp_sha1_alg); 1224 1225 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1226 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1227 1228 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1229 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1230 1231 platform_set_drvdata(pdev, NULL); 1232 1233 global_sdcp = NULL; 1234 } 1235 1236 static const struct of_device_id mxs_dcp_dt_ids[] = { 1237 { .compatible = "fsl,imx23-dcp", .data = NULL, }, 1238 { .compatible = "fsl,imx28-dcp", .data = NULL, }, 1239 { /* sentinel */ } 1240 }; 1241 1242 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); 1243 1244 static struct platform_driver mxs_dcp_driver = { 1245 .probe = mxs_dcp_probe, 1246 .remove_new = mxs_dcp_remove, 1247 .driver = { 1248 .name = "mxs-dcp", 1249 .of_match_table = mxs_dcp_dt_ids, 1250 }, 1251 }; 1252 1253 module_platform_driver(mxs_dcp_driver); 1254 1255 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 1256 MODULE_DESCRIPTION("Freescale MXS DCP Driver"); 1257 MODULE_LICENSE("GPL"); 1258 MODULE_ALIAS("platform:mxs-dcp"); 1259