1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale i.MX23/i.MX28 Data Co-Processor driver 4 * 5 * Copyright (C) 2013 Marek Vasut <marex@denx.de> 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/stmp_device.h> 17 #include <linux/clk.h> 18 #include <soc/fsl/dcp.h> 19 20 #include <crypto/aes.h> 21 #include <crypto/sha1.h> 22 #include <crypto/sha2.h> 23 #include <crypto/internal/hash.h> 24 #include <crypto/internal/skcipher.h> 25 #include <crypto/scatterwalk.h> 26 27 #define DCP_MAX_CHANS 4 28 #define DCP_BUF_SZ PAGE_SIZE 29 #define DCP_SHA_PAY_SZ 64 30 31 #define DCP_ALIGNMENT 64 32 33 /* 34 * Null hashes to align with hw behavior on imx6sl and ull 35 * these are flipped for consistency with hw output 36 */ 37 static const uint8_t sha1_null_hash[] = 38 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" 39 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; 40 41 static const uint8_t sha256_null_hash[] = 42 "\x55\xb8\x52\x78\x1b\x99\x95\xa4" 43 "\x4c\x93\x9b\x64\xe4\x41\xae\x27" 44 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" 45 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; 46 47 /* DCP DMA descriptor. */ 48 struct dcp_dma_desc { 49 uint32_t next_cmd_addr; 50 uint32_t control0; 51 uint32_t control1; 52 uint32_t source; 53 uint32_t destination; 54 uint32_t size; 55 uint32_t payload; 56 uint32_t status; 57 }; 58 59 /* Coherent aligned block for bounce buffering. */ 60 struct dcp_coherent_block { 61 uint8_t aes_in_buf[DCP_BUF_SZ]; 62 uint8_t aes_out_buf[DCP_BUF_SZ]; 63 uint8_t sha_in_buf[DCP_BUF_SZ]; 64 uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; 65 66 uint8_t aes_key[2 * AES_KEYSIZE_128]; 67 68 struct dcp_dma_desc desc[DCP_MAX_CHANS]; 69 }; 70 71 struct dcp { 72 struct device *dev; 73 void __iomem *base; 74 75 uint32_t caps; 76 77 struct dcp_coherent_block *coh; 78 79 struct completion completion[DCP_MAX_CHANS]; 80 spinlock_t lock[DCP_MAX_CHANS]; 81 struct task_struct *thread[DCP_MAX_CHANS]; 82 struct crypto_queue queue[DCP_MAX_CHANS]; 83 struct clk *dcp_clk; 84 }; 85 86 enum dcp_chan { 87 DCP_CHAN_HASH_SHA = 0, 88 DCP_CHAN_CRYPTO = 2, 89 }; 90 91 struct dcp_async_ctx { 92 /* Common context */ 93 enum dcp_chan chan; 94 uint32_t fill; 95 96 /* SHA Hash-specific context */ 97 struct mutex mutex; 98 uint32_t alg; 99 unsigned int hot:1; 100 101 /* Crypto-specific context */ 102 struct crypto_skcipher *fallback; 103 unsigned int key_len; 104 uint8_t key[AES_KEYSIZE_128]; 105 bool key_referenced; 106 }; 107 108 struct dcp_aes_req_ctx { 109 unsigned int enc:1; 110 unsigned int ecb:1; 111 struct skcipher_request fallback_req; // keep at the end 112 }; 113 114 struct dcp_sha_req_ctx { 115 unsigned int init:1; 116 unsigned int fini:1; 117 }; 118 119 struct dcp_export_state { 120 struct dcp_sha_req_ctx req_ctx; 121 struct dcp_async_ctx async_ctx; 122 }; 123 124 /* 125 * There can even be only one instance of the MXS DCP due to the 126 * design of Linux Crypto API. 127 */ 128 static struct dcp *global_sdcp; 129 130 /* DCP register layout. */ 131 #define MXS_DCP_CTRL 0x00 132 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) 133 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) 134 135 #define MXS_DCP_STAT 0x10 136 #define MXS_DCP_STAT_CLR 0x18 137 #define MXS_DCP_STAT_IRQ_MASK 0xf 138 139 #define MXS_DCP_CHANNELCTRL 0x20 140 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff 141 142 #define MXS_DCP_CAPABILITY1 0x40 143 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) 144 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) 145 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) 146 147 #define MXS_DCP_CONTEXT 0x50 148 149 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) 150 151 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) 152 153 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) 154 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) 155 156 /* DMA descriptor bits. */ 157 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) 158 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) 159 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) 160 #define MXS_DCP_CONTROL0_OTP_KEY (1 << 10) 161 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) 162 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) 163 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) 164 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) 165 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) 166 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) 167 168 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) 169 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) 170 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) 171 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) 172 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) 173 174 #define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8 175 176 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) 177 { 178 int dma_err; 179 struct dcp *sdcp = global_sdcp; 180 const int chan = actx->chan; 181 uint32_t stat; 182 unsigned long ret; 183 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 184 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), 185 DMA_TO_DEVICE); 186 187 dma_err = dma_mapping_error(sdcp->dev, desc_phys); 188 if (dma_err) 189 return dma_err; 190 191 reinit_completion(&sdcp->completion[chan]); 192 193 /* Clear status register. */ 194 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); 195 196 /* Load the DMA descriptor. */ 197 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); 198 199 /* Increment the semaphore to start the DMA transfer. */ 200 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); 201 202 ret = wait_for_completion_timeout(&sdcp->completion[chan], 203 msecs_to_jiffies(1000)); 204 if (!ret) { 205 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", 206 chan, readl(sdcp->base + MXS_DCP_STAT)); 207 return -ETIMEDOUT; 208 } 209 210 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); 211 if (stat & 0xff) { 212 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", 213 chan, stat); 214 return -EINVAL; 215 } 216 217 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); 218 219 return 0; 220 } 221 222 /* 223 * Encryption (AES128) 224 */ 225 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, 226 struct skcipher_request *req, int init) 227 { 228 dma_addr_t key_phys, src_phys, dst_phys; 229 struct dcp *sdcp = global_sdcp; 230 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 231 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 232 bool key_referenced = actx->key_referenced; 233 int ret; 234 235 if (key_referenced) 236 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key + AES_KEYSIZE_128, 237 AES_KEYSIZE_128, DMA_TO_DEVICE); 238 else 239 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, 240 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); 241 ret = dma_mapping_error(sdcp->dev, key_phys); 242 if (ret) 243 return ret; 244 245 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, 246 DCP_BUF_SZ, DMA_TO_DEVICE); 247 ret = dma_mapping_error(sdcp->dev, src_phys); 248 if (ret) 249 goto err_src; 250 251 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, 252 DCP_BUF_SZ, DMA_FROM_DEVICE); 253 ret = dma_mapping_error(sdcp->dev, dst_phys); 254 if (ret) 255 goto err_dst; 256 257 if (actx->fill % AES_BLOCK_SIZE) { 258 dev_err(sdcp->dev, "Invalid block size!\n"); 259 ret = -EINVAL; 260 goto aes_done_run; 261 } 262 263 /* Fill in the DMA descriptor. */ 264 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 265 MXS_DCP_CONTROL0_INTERRUPT | 266 MXS_DCP_CONTROL0_ENABLE_CIPHER; 267 268 if (key_referenced) 269 /* Set OTP key bit to select the key via KEY_SELECT. */ 270 desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; 271 else 272 /* Payload contains the key. */ 273 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; 274 275 if (rctx->enc) 276 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; 277 if (init) 278 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; 279 280 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; 281 282 if (rctx->ecb) 283 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; 284 else 285 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; 286 287 if (key_referenced) 288 desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT; 289 290 desc->next_cmd_addr = 0; 291 desc->source = src_phys; 292 desc->destination = dst_phys; 293 desc->size = actx->fill; 294 desc->payload = key_phys; 295 desc->status = 0; 296 297 ret = mxs_dcp_start_dma(actx); 298 299 aes_done_run: 300 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); 301 err_dst: 302 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 303 err_src: 304 if (key_referenced) 305 dma_unmap_single(sdcp->dev, key_phys, AES_KEYSIZE_128, 306 DMA_TO_DEVICE); 307 else 308 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, 309 DMA_TO_DEVICE); 310 return ret; 311 } 312 313 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) 314 { 315 struct dcp *sdcp = global_sdcp; 316 317 struct skcipher_request *req = skcipher_request_cast(arq); 318 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 319 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 320 321 struct scatterlist *dst = req->dst; 322 struct scatterlist *src = req->src; 323 int dst_nents = sg_nents(dst); 324 325 const int out_off = DCP_BUF_SZ; 326 uint8_t *in_buf = sdcp->coh->aes_in_buf; 327 uint8_t *out_buf = sdcp->coh->aes_out_buf; 328 329 uint32_t dst_off = 0; 330 uint8_t *src_buf = NULL; 331 uint32_t last_out_len = 0; 332 333 uint8_t *key = sdcp->coh->aes_key; 334 335 int ret = 0; 336 unsigned int i, len, clen, tlen = 0; 337 int init = 0; 338 bool limit_hit = false; 339 340 actx->fill = 0; 341 342 /* Copy the key from the temporary location. */ 343 memcpy(key, actx->key, actx->key_len); 344 345 if (!rctx->ecb) { 346 /* Copy the CBC IV just past the key. */ 347 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); 348 /* CBC needs the INIT set. */ 349 init = 1; 350 } else { 351 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); 352 } 353 354 for_each_sg(req->src, src, sg_nents(req->src), i) { 355 src_buf = sg_virt(src); 356 len = sg_dma_len(src); 357 tlen += len; 358 limit_hit = tlen > req->cryptlen; 359 360 if (limit_hit) 361 len = req->cryptlen - (tlen - len); 362 363 do { 364 if (actx->fill + len > out_off) 365 clen = out_off - actx->fill; 366 else 367 clen = len; 368 369 memcpy(in_buf + actx->fill, src_buf, clen); 370 len -= clen; 371 src_buf += clen; 372 actx->fill += clen; 373 374 /* 375 * If we filled the buffer or this is the last SG, 376 * submit the buffer. 377 */ 378 if (actx->fill == out_off || sg_is_last(src) || 379 limit_hit) { 380 ret = mxs_dcp_run_aes(actx, req, init); 381 if (ret) 382 return ret; 383 init = 0; 384 385 sg_pcopy_from_buffer(dst, dst_nents, out_buf, 386 actx->fill, dst_off); 387 dst_off += actx->fill; 388 last_out_len = actx->fill; 389 actx->fill = 0; 390 } 391 } while (len); 392 393 if (limit_hit) 394 break; 395 } 396 397 /* Copy the IV for CBC for chaining */ 398 if (!rctx->ecb) { 399 if (rctx->enc) 400 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), 401 AES_BLOCK_SIZE); 402 else 403 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), 404 AES_BLOCK_SIZE); 405 } 406 407 return ret; 408 } 409 410 static int dcp_chan_thread_aes(void *data) 411 { 412 struct dcp *sdcp = global_sdcp; 413 const int chan = DCP_CHAN_CRYPTO; 414 415 struct crypto_async_request *backlog; 416 struct crypto_async_request *arq; 417 418 int ret; 419 420 while (!kthread_should_stop()) { 421 set_current_state(TASK_INTERRUPTIBLE); 422 423 spin_lock(&sdcp->lock[chan]); 424 backlog = crypto_get_backlog(&sdcp->queue[chan]); 425 arq = crypto_dequeue_request(&sdcp->queue[chan]); 426 spin_unlock(&sdcp->lock[chan]); 427 428 if (!backlog && !arq) { 429 schedule(); 430 continue; 431 } 432 433 set_current_state(TASK_RUNNING); 434 435 if (backlog) 436 crypto_request_complete(backlog, -EINPROGRESS); 437 438 if (arq) { 439 ret = mxs_dcp_aes_block_crypt(arq); 440 crypto_request_complete(arq, ret); 441 } 442 } 443 444 return 0; 445 } 446 447 static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) 448 { 449 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 450 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 451 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); 452 int ret; 453 454 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); 455 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, 456 req->base.complete, req->base.data); 457 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, 458 req->cryptlen, req->iv); 459 460 if (enc) 461 ret = crypto_skcipher_encrypt(&rctx->fallback_req); 462 else 463 ret = crypto_skcipher_decrypt(&rctx->fallback_req); 464 465 return ret; 466 } 467 468 static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) 469 { 470 struct dcp *sdcp = global_sdcp; 471 struct crypto_async_request *arq = &req->base; 472 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 473 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 474 int ret; 475 476 if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced)) 477 return mxs_dcp_block_fallback(req, enc); 478 479 rctx->enc = enc; 480 rctx->ecb = ecb; 481 actx->chan = DCP_CHAN_CRYPTO; 482 483 spin_lock(&sdcp->lock[actx->chan]); 484 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 485 spin_unlock(&sdcp->lock[actx->chan]); 486 487 wake_up_process(sdcp->thread[actx->chan]); 488 489 return ret; 490 } 491 492 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) 493 { 494 return mxs_dcp_aes_enqueue(req, 0, 1); 495 } 496 497 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) 498 { 499 return mxs_dcp_aes_enqueue(req, 1, 1); 500 } 501 502 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) 503 { 504 return mxs_dcp_aes_enqueue(req, 0, 0); 505 } 506 507 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) 508 { 509 return mxs_dcp_aes_enqueue(req, 1, 0); 510 } 511 512 static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 513 unsigned int len) 514 { 515 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 516 517 /* 518 * AES 128 is supposed by the hardware, store key into temporary 519 * buffer and exit. We must use the temporary buffer here, since 520 * there can still be an operation in progress. 521 */ 522 actx->key_len = len; 523 actx->key_referenced = false; 524 if (len == AES_KEYSIZE_128) { 525 memcpy(actx->key, key, len); 526 return 0; 527 } 528 529 /* 530 * If the requested AES key size is not supported by the hardware, 531 * but is supported by in-kernel software implementation, we use 532 * software fallback. 533 */ 534 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 535 crypto_skcipher_set_flags(actx->fallback, 536 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 537 return crypto_skcipher_setkey(actx->fallback, key, len); 538 } 539 540 static int mxs_dcp_aes_setrefkey(struct crypto_skcipher *tfm, const u8 *key, 541 unsigned int len) 542 { 543 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 544 545 if (len != DCP_PAES_KEYSIZE) 546 return -EINVAL; 547 548 switch (key[0]) { 549 case DCP_PAES_KEY_SLOT0: 550 case DCP_PAES_KEY_SLOT1: 551 case DCP_PAES_KEY_SLOT2: 552 case DCP_PAES_KEY_SLOT3: 553 case DCP_PAES_KEY_UNIQUE: 554 case DCP_PAES_KEY_OTP: 555 memcpy(actx->key, key, len); 556 actx->key_len = len; 557 actx->key_referenced = true; 558 break; 559 default: 560 return -EINVAL; 561 } 562 563 return 0; 564 } 565 566 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) 567 { 568 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); 569 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 570 struct crypto_skcipher *blk; 571 572 blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 573 if (IS_ERR(blk)) 574 return PTR_ERR(blk); 575 576 actx->fallback = blk; 577 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + 578 crypto_skcipher_reqsize(blk)); 579 return 0; 580 } 581 582 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) 583 { 584 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 585 586 crypto_free_skcipher(actx->fallback); 587 } 588 589 static int mxs_dcp_paes_init_tfm(struct crypto_skcipher *tfm) 590 { 591 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); 592 593 return 0; 594 } 595 596 /* 597 * Hashing (SHA1/SHA256) 598 */ 599 static int mxs_dcp_run_sha(struct ahash_request *req) 600 { 601 struct dcp *sdcp = global_sdcp; 602 int ret; 603 604 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 605 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 606 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 607 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 608 609 dma_addr_t digest_phys = 0; 610 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, 611 DCP_BUF_SZ, DMA_TO_DEVICE); 612 613 ret = dma_mapping_error(sdcp->dev, buf_phys); 614 if (ret) 615 return ret; 616 617 /* Fill in the DMA descriptor. */ 618 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 619 MXS_DCP_CONTROL0_INTERRUPT | 620 MXS_DCP_CONTROL0_ENABLE_HASH; 621 if (rctx->init) 622 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; 623 624 desc->control1 = actx->alg; 625 desc->next_cmd_addr = 0; 626 desc->source = buf_phys; 627 desc->destination = 0; 628 desc->size = actx->fill; 629 desc->payload = 0; 630 desc->status = 0; 631 632 /* 633 * Align driver with hw behavior when generating null hashes 634 */ 635 if (rctx->init && rctx->fini && desc->size == 0) { 636 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 637 const uint8_t *sha_buf = 638 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? 639 sha1_null_hash : sha256_null_hash; 640 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); 641 ret = 0; 642 goto done_run; 643 } 644 645 /* Set HASH_TERM bit for last transfer block. */ 646 if (rctx->fini) { 647 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, 648 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); 649 ret = dma_mapping_error(sdcp->dev, digest_phys); 650 if (ret) 651 goto done_run; 652 653 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; 654 desc->payload = digest_phys; 655 } 656 657 ret = mxs_dcp_start_dma(actx); 658 659 if (rctx->fini) 660 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, 661 DMA_FROM_DEVICE); 662 663 done_run: 664 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 665 666 return ret; 667 } 668 669 static int dcp_sha_req_to_buf(struct crypto_async_request *arq) 670 { 671 struct dcp *sdcp = global_sdcp; 672 673 struct ahash_request *req = ahash_request_cast(arq); 674 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 675 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 676 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 677 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 678 679 uint8_t *in_buf = sdcp->coh->sha_in_buf; 680 uint8_t *out_buf = sdcp->coh->sha_out_buf; 681 682 struct scatterlist *src; 683 684 unsigned int i, len, clen, oft = 0; 685 int ret; 686 687 int fin = rctx->fini; 688 if (fin) 689 rctx->fini = 0; 690 691 src = req->src; 692 len = req->nbytes; 693 694 while (len) { 695 if (actx->fill + len > DCP_BUF_SZ) 696 clen = DCP_BUF_SZ - actx->fill; 697 else 698 clen = len; 699 700 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, 701 0); 702 703 len -= clen; 704 oft += clen; 705 actx->fill += clen; 706 707 /* 708 * If we filled the buffer and still have some 709 * more data, submit the buffer. 710 */ 711 if (len && actx->fill == DCP_BUF_SZ) { 712 ret = mxs_dcp_run_sha(req); 713 if (ret) 714 return ret; 715 actx->fill = 0; 716 rctx->init = 0; 717 } 718 } 719 720 if (fin) { 721 rctx->fini = 1; 722 723 /* Submit whatever is left. */ 724 if (!req->result) 725 return -EINVAL; 726 727 ret = mxs_dcp_run_sha(req); 728 if (ret) 729 return ret; 730 731 actx->fill = 0; 732 733 /* For some reason the result is flipped */ 734 for (i = 0; i < halg->digestsize; i++) 735 req->result[i] = out_buf[halg->digestsize - i - 1]; 736 } 737 738 return 0; 739 } 740 741 static int dcp_chan_thread_sha(void *data) 742 { 743 struct dcp *sdcp = global_sdcp; 744 const int chan = DCP_CHAN_HASH_SHA; 745 746 struct crypto_async_request *backlog; 747 struct crypto_async_request *arq; 748 int ret; 749 750 while (!kthread_should_stop()) { 751 set_current_state(TASK_INTERRUPTIBLE); 752 753 spin_lock(&sdcp->lock[chan]); 754 backlog = crypto_get_backlog(&sdcp->queue[chan]); 755 arq = crypto_dequeue_request(&sdcp->queue[chan]); 756 spin_unlock(&sdcp->lock[chan]); 757 758 if (!backlog && !arq) { 759 schedule(); 760 continue; 761 } 762 763 set_current_state(TASK_RUNNING); 764 765 if (backlog) 766 crypto_request_complete(backlog, -EINPROGRESS); 767 768 if (arq) { 769 ret = dcp_sha_req_to_buf(arq); 770 crypto_request_complete(arq, ret); 771 } 772 } 773 774 return 0; 775 } 776 777 static int dcp_sha_init(struct ahash_request *req) 778 { 779 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 780 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 781 782 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 783 784 /* 785 * Start hashing session. The code below only inits the 786 * hashing session context, nothing more. 787 */ 788 memset(actx, 0, sizeof(*actx)); 789 790 if (strcmp(halg->base.cra_name, "sha1") == 0) 791 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; 792 else 793 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; 794 795 actx->fill = 0; 796 actx->hot = 0; 797 actx->chan = DCP_CHAN_HASH_SHA; 798 799 mutex_init(&actx->mutex); 800 801 return 0; 802 } 803 804 static int dcp_sha_update_fx(struct ahash_request *req, int fini) 805 { 806 struct dcp *sdcp = global_sdcp; 807 808 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 809 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 810 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 811 812 int ret; 813 814 /* 815 * Ignore requests that have no data in them and are not 816 * the trailing requests in the stream of requests. 817 */ 818 if (!req->nbytes && !fini) 819 return 0; 820 821 mutex_lock(&actx->mutex); 822 823 rctx->fini = fini; 824 825 if (!actx->hot) { 826 actx->hot = 1; 827 rctx->init = 1; 828 } 829 830 spin_lock(&sdcp->lock[actx->chan]); 831 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 832 spin_unlock(&sdcp->lock[actx->chan]); 833 834 wake_up_process(sdcp->thread[actx->chan]); 835 mutex_unlock(&actx->mutex); 836 837 return ret; 838 } 839 840 static int dcp_sha_update(struct ahash_request *req) 841 { 842 return dcp_sha_update_fx(req, 0); 843 } 844 845 static int dcp_sha_final(struct ahash_request *req) 846 { 847 ahash_request_set_crypt(req, NULL, req->result, 0); 848 req->nbytes = 0; 849 return dcp_sha_update_fx(req, 1); 850 } 851 852 static int dcp_sha_finup(struct ahash_request *req) 853 { 854 return dcp_sha_update_fx(req, 1); 855 } 856 857 static int dcp_sha_digest(struct ahash_request *req) 858 { 859 int ret; 860 861 ret = dcp_sha_init(req); 862 if (ret) 863 return ret; 864 865 return dcp_sha_finup(req); 866 } 867 868 static int dcp_sha_import(struct ahash_request *req, const void *in) 869 { 870 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 871 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 872 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 873 const struct dcp_export_state *export = in; 874 875 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); 876 memset(actx, 0, sizeof(struct dcp_async_ctx)); 877 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); 878 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); 879 880 return 0; 881 } 882 883 static int dcp_sha_export(struct ahash_request *req, void *out) 884 { 885 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); 886 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 887 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); 888 struct dcp_export_state *export = out; 889 890 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); 891 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); 892 893 return 0; 894 } 895 896 static int dcp_sha_cra_init(struct crypto_tfm *tfm) 897 { 898 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 899 sizeof(struct dcp_sha_req_ctx)); 900 return 0; 901 } 902 903 static void dcp_sha_cra_exit(struct crypto_tfm *tfm) 904 { 905 } 906 907 /* AES 128 ECB and AES 128 CBC */ 908 static struct skcipher_alg dcp_aes_algs[] = { 909 { 910 .base.cra_name = "ecb(aes)", 911 .base.cra_driver_name = "ecb-aes-dcp", 912 .base.cra_priority = 400, 913 .base.cra_alignmask = 15, 914 .base.cra_flags = CRYPTO_ALG_ASYNC | 915 CRYPTO_ALG_NEED_FALLBACK, 916 .base.cra_blocksize = AES_BLOCK_SIZE, 917 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 918 .base.cra_module = THIS_MODULE, 919 920 .min_keysize = AES_MIN_KEY_SIZE, 921 .max_keysize = AES_MAX_KEY_SIZE, 922 .setkey = mxs_dcp_aes_setkey, 923 .encrypt = mxs_dcp_aes_ecb_encrypt, 924 .decrypt = mxs_dcp_aes_ecb_decrypt, 925 .init = mxs_dcp_aes_fallback_init_tfm, 926 .exit = mxs_dcp_aes_fallback_exit_tfm, 927 }, { 928 .base.cra_name = "cbc(aes)", 929 .base.cra_driver_name = "cbc-aes-dcp", 930 .base.cra_priority = 400, 931 .base.cra_alignmask = 15, 932 .base.cra_flags = CRYPTO_ALG_ASYNC | 933 CRYPTO_ALG_NEED_FALLBACK, 934 .base.cra_blocksize = AES_BLOCK_SIZE, 935 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 936 .base.cra_module = THIS_MODULE, 937 938 .min_keysize = AES_MIN_KEY_SIZE, 939 .max_keysize = AES_MAX_KEY_SIZE, 940 .setkey = mxs_dcp_aes_setkey, 941 .encrypt = mxs_dcp_aes_cbc_encrypt, 942 .decrypt = mxs_dcp_aes_cbc_decrypt, 943 .ivsize = AES_BLOCK_SIZE, 944 .init = mxs_dcp_aes_fallback_init_tfm, 945 .exit = mxs_dcp_aes_fallback_exit_tfm, 946 }, { 947 .base.cra_name = "ecb(paes)", 948 .base.cra_driver_name = "ecb-paes-dcp", 949 .base.cra_priority = 401, 950 .base.cra_alignmask = 15, 951 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, 952 .base.cra_blocksize = AES_BLOCK_SIZE, 953 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 954 .base.cra_module = THIS_MODULE, 955 956 .min_keysize = DCP_PAES_KEYSIZE, 957 .max_keysize = DCP_PAES_KEYSIZE, 958 .setkey = mxs_dcp_aes_setrefkey, 959 .encrypt = mxs_dcp_aes_ecb_encrypt, 960 .decrypt = mxs_dcp_aes_ecb_decrypt, 961 .init = mxs_dcp_paes_init_tfm, 962 }, { 963 .base.cra_name = "cbc(paes)", 964 .base.cra_driver_name = "cbc-paes-dcp", 965 .base.cra_priority = 401, 966 .base.cra_alignmask = 15, 967 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, 968 .base.cra_blocksize = AES_BLOCK_SIZE, 969 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 970 .base.cra_module = THIS_MODULE, 971 972 .min_keysize = DCP_PAES_KEYSIZE, 973 .max_keysize = DCP_PAES_KEYSIZE, 974 .setkey = mxs_dcp_aes_setrefkey, 975 .encrypt = mxs_dcp_aes_cbc_encrypt, 976 .decrypt = mxs_dcp_aes_cbc_decrypt, 977 .ivsize = AES_BLOCK_SIZE, 978 .init = mxs_dcp_paes_init_tfm, 979 }, 980 }; 981 982 /* SHA1 */ 983 static struct ahash_alg dcp_sha1_alg = { 984 .init = dcp_sha_init, 985 .update = dcp_sha_update, 986 .final = dcp_sha_final, 987 .finup = dcp_sha_finup, 988 .digest = dcp_sha_digest, 989 .import = dcp_sha_import, 990 .export = dcp_sha_export, 991 .halg = { 992 .digestsize = SHA1_DIGEST_SIZE, 993 .statesize = sizeof(struct dcp_export_state), 994 .base = { 995 .cra_name = "sha1", 996 .cra_driver_name = "sha1-dcp", 997 .cra_priority = 400, 998 .cra_flags = CRYPTO_ALG_ASYNC, 999 .cra_blocksize = SHA1_BLOCK_SIZE, 1000 .cra_ctxsize = sizeof(struct dcp_async_ctx), 1001 .cra_module = THIS_MODULE, 1002 .cra_init = dcp_sha_cra_init, 1003 .cra_exit = dcp_sha_cra_exit, 1004 }, 1005 }, 1006 }; 1007 1008 /* SHA256 */ 1009 static struct ahash_alg dcp_sha256_alg = { 1010 .init = dcp_sha_init, 1011 .update = dcp_sha_update, 1012 .final = dcp_sha_final, 1013 .finup = dcp_sha_finup, 1014 .digest = dcp_sha_digest, 1015 .import = dcp_sha_import, 1016 .export = dcp_sha_export, 1017 .halg = { 1018 .digestsize = SHA256_DIGEST_SIZE, 1019 .statesize = sizeof(struct dcp_export_state), 1020 .base = { 1021 .cra_name = "sha256", 1022 .cra_driver_name = "sha256-dcp", 1023 .cra_priority = 400, 1024 .cra_flags = CRYPTO_ALG_ASYNC, 1025 .cra_blocksize = SHA256_BLOCK_SIZE, 1026 .cra_ctxsize = sizeof(struct dcp_async_ctx), 1027 .cra_module = THIS_MODULE, 1028 .cra_init = dcp_sha_cra_init, 1029 .cra_exit = dcp_sha_cra_exit, 1030 }, 1031 }, 1032 }; 1033 1034 static irqreturn_t mxs_dcp_irq(int irq, void *context) 1035 { 1036 struct dcp *sdcp = context; 1037 uint32_t stat; 1038 int i; 1039 1040 stat = readl(sdcp->base + MXS_DCP_STAT); 1041 stat &= MXS_DCP_STAT_IRQ_MASK; 1042 if (!stat) 1043 return IRQ_NONE; 1044 1045 /* Clear the interrupts. */ 1046 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); 1047 1048 /* Complete the DMA requests that finished. */ 1049 for (i = 0; i < DCP_MAX_CHANS; i++) 1050 if (stat & (1 << i)) 1051 complete(&sdcp->completion[i]); 1052 1053 return IRQ_HANDLED; 1054 } 1055 1056 static int mxs_dcp_probe(struct platform_device *pdev) 1057 { 1058 struct device *dev = &pdev->dev; 1059 struct dcp *sdcp = NULL; 1060 int i, ret; 1061 int dcp_vmi_irq, dcp_irq; 1062 1063 if (global_sdcp) { 1064 dev_err(dev, "Only one DCP instance allowed!\n"); 1065 return -ENODEV; 1066 } 1067 1068 dcp_vmi_irq = platform_get_irq(pdev, 0); 1069 if (dcp_vmi_irq < 0) 1070 return dcp_vmi_irq; 1071 1072 dcp_irq = platform_get_irq(pdev, 1); 1073 if (dcp_irq < 0) 1074 return dcp_irq; 1075 1076 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 1077 if (!sdcp) 1078 return -ENOMEM; 1079 1080 sdcp->dev = dev; 1081 sdcp->base = devm_platform_ioremap_resource(pdev, 0); 1082 if (IS_ERR(sdcp->base)) 1083 return PTR_ERR(sdcp->base); 1084 1085 1086 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, 1087 "dcp-vmi-irq", sdcp); 1088 if (ret) { 1089 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); 1090 return ret; 1091 } 1092 1093 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, 1094 "dcp-irq", sdcp); 1095 if (ret) { 1096 dev_err(dev, "Failed to claim DCP IRQ!\n"); 1097 return ret; 1098 } 1099 1100 /* Allocate coherent helper block. */ 1101 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, 1102 GFP_KERNEL); 1103 if (!sdcp->coh) 1104 return -ENOMEM; 1105 1106 /* Re-align the structure so it fits the DCP constraints. */ 1107 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); 1108 1109 /* DCP clock is optional, only used on some SOCs */ 1110 sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp"); 1111 if (IS_ERR(sdcp->dcp_clk)) 1112 return PTR_ERR(sdcp->dcp_clk); 1113 1114 /* Restart the DCP block. */ 1115 ret = stmp_reset_block(sdcp->base); 1116 if (ret) { 1117 dev_err(dev, "Failed reset\n"); 1118 return ret; 1119 } 1120 1121 /* Initialize control register. */ 1122 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | 1123 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, 1124 sdcp->base + MXS_DCP_CTRL); 1125 1126 /* Enable all DCP DMA channels. */ 1127 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, 1128 sdcp->base + MXS_DCP_CHANNELCTRL); 1129 1130 /* 1131 * We do not enable context switching. Give the context buffer a 1132 * pointer to an illegal address so if context switching is 1133 * inadvertantly enabled, the DCP will return an error instead of 1134 * trashing good memory. The DCP DMA cannot access ROM, so any ROM 1135 * address will do. 1136 */ 1137 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); 1138 for (i = 0; i < DCP_MAX_CHANS; i++) 1139 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); 1140 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); 1141 1142 global_sdcp = sdcp; 1143 1144 platform_set_drvdata(pdev, sdcp); 1145 1146 for (i = 0; i < DCP_MAX_CHANS; i++) { 1147 spin_lock_init(&sdcp->lock[i]); 1148 init_completion(&sdcp->completion[i]); 1149 crypto_init_queue(&sdcp->queue[i], 50); 1150 } 1151 1152 /* Create the SHA and AES handler threads. */ 1153 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, 1154 NULL, "mxs_dcp_chan/sha"); 1155 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { 1156 dev_err(dev, "Error starting SHA thread!\n"); 1157 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); 1158 return ret; 1159 } 1160 1161 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, 1162 NULL, "mxs_dcp_chan/aes"); 1163 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { 1164 dev_err(dev, "Error starting SHA thread!\n"); 1165 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); 1166 goto err_destroy_sha_thread; 1167 } 1168 1169 /* Register the various crypto algorithms. */ 1170 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); 1171 1172 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { 1173 ret = crypto_register_skciphers(dcp_aes_algs, 1174 ARRAY_SIZE(dcp_aes_algs)); 1175 if (ret) { 1176 /* Failed to register algorithm. */ 1177 dev_err(dev, "Failed to register AES crypto!\n"); 1178 goto err_destroy_aes_thread; 1179 } 1180 } 1181 1182 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { 1183 ret = crypto_register_ahash(&dcp_sha1_alg); 1184 if (ret) { 1185 dev_err(dev, "Failed to register %s hash!\n", 1186 dcp_sha1_alg.halg.base.cra_name); 1187 goto err_unregister_aes; 1188 } 1189 } 1190 1191 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { 1192 ret = crypto_register_ahash(&dcp_sha256_alg); 1193 if (ret) { 1194 dev_err(dev, "Failed to register %s hash!\n", 1195 dcp_sha256_alg.halg.base.cra_name); 1196 goto err_unregister_sha1; 1197 } 1198 } 1199 1200 return 0; 1201 1202 err_unregister_sha1: 1203 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1204 crypto_unregister_ahash(&dcp_sha1_alg); 1205 1206 err_unregister_aes: 1207 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1208 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1209 1210 err_destroy_aes_thread: 1211 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1212 1213 err_destroy_sha_thread: 1214 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1215 1216 return ret; 1217 } 1218 1219 static void mxs_dcp_remove(struct platform_device *pdev) 1220 { 1221 struct dcp *sdcp = platform_get_drvdata(pdev); 1222 1223 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) 1224 crypto_unregister_ahash(&dcp_sha256_alg); 1225 1226 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1227 crypto_unregister_ahash(&dcp_sha1_alg); 1228 1229 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1230 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1231 1232 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1233 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1234 1235 platform_set_drvdata(pdev, NULL); 1236 1237 global_sdcp = NULL; 1238 } 1239 1240 static const struct of_device_id mxs_dcp_dt_ids[] = { 1241 { .compatible = "fsl,imx23-dcp", .data = NULL, }, 1242 { .compatible = "fsl,imx28-dcp", .data = NULL, }, 1243 { /* sentinel */ } 1244 }; 1245 1246 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); 1247 1248 static struct platform_driver mxs_dcp_driver = { 1249 .probe = mxs_dcp_probe, 1250 .remove = mxs_dcp_remove, 1251 .driver = { 1252 .name = "mxs-dcp", 1253 .of_match_table = mxs_dcp_dt_ids, 1254 }, 1255 }; 1256 1257 module_platform_driver(mxs_dcp_driver); 1258 1259 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 1260 MODULE_DESCRIPTION("Freescale MXS DCP Driver"); 1261 MODULE_LICENSE("GPL"); 1262 MODULE_ALIAS("platform:mxs-dcp"); 1263