1 /* 2 * Cryptographic API. 3 * 4 * Support for Samsung S5PV210 HW acceleration. 5 * 6 * Copyright (C) 2011 NetUP Inc. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as published 10 * by the Free Software Foundation. 11 * 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/crypto.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/err.h> 18 #include <linux/errno.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/platform_device.h> 26 #include <linux/scatterlist.h> 27 28 #include <crypto/ctr.h> 29 #include <crypto/aes.h> 30 #include <crypto/algapi.h> 31 #include <crypto/scatterwalk.h> 32 33 #define _SBF(s, v) ((v) << (s)) 34 35 /* Feed control registers */ 36 #define SSS_REG_FCINTSTAT 0x0000 37 #define SSS_FCINTSTAT_BRDMAINT BIT(3) 38 #define SSS_FCINTSTAT_BTDMAINT BIT(2) 39 #define SSS_FCINTSTAT_HRDMAINT BIT(1) 40 #define SSS_FCINTSTAT_PKDMAINT BIT(0) 41 42 #define SSS_REG_FCINTENSET 0x0004 43 #define SSS_FCINTENSET_BRDMAINTENSET BIT(3) 44 #define SSS_FCINTENSET_BTDMAINTENSET BIT(2) 45 #define SSS_FCINTENSET_HRDMAINTENSET BIT(1) 46 #define SSS_FCINTENSET_PKDMAINTENSET BIT(0) 47 48 #define SSS_REG_FCINTENCLR 0x0008 49 #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3) 50 #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2) 51 #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1) 52 #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0) 53 54 #define SSS_REG_FCINTPEND 0x000C 55 #define SSS_FCINTPEND_BRDMAINTP BIT(3) 56 #define SSS_FCINTPEND_BTDMAINTP BIT(2) 57 #define SSS_FCINTPEND_HRDMAINTP BIT(1) 58 #define SSS_FCINTPEND_PKDMAINTP BIT(0) 59 60 #define SSS_REG_FCFIFOSTAT 0x0010 61 #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7) 62 #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6) 63 #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5) 64 #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4) 65 #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3) 66 #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2) 67 #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1) 68 #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0) 69 70 #define SSS_REG_FCFIFOCTRL 0x0014 71 #define SSS_FCFIFOCTRL_DESSEL BIT(2) 72 #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) 73 #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) 74 #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) 75 76 #define SSS_REG_FCBRDMAS 0x0020 77 #define SSS_REG_FCBRDMAL 0x0024 78 #define SSS_REG_FCBRDMAC 0x0028 79 #define SSS_FCBRDMAC_BYTESWAP BIT(1) 80 #define SSS_FCBRDMAC_FLUSH BIT(0) 81 82 #define SSS_REG_FCBTDMAS 0x0030 83 #define SSS_REG_FCBTDMAL 0x0034 84 #define SSS_REG_FCBTDMAC 0x0038 85 #define SSS_FCBTDMAC_BYTESWAP BIT(1) 86 #define SSS_FCBTDMAC_FLUSH BIT(0) 87 88 #define SSS_REG_FCHRDMAS 0x0040 89 #define SSS_REG_FCHRDMAL 0x0044 90 #define SSS_REG_FCHRDMAC 0x0048 91 #define SSS_FCHRDMAC_BYTESWAP BIT(1) 92 #define SSS_FCHRDMAC_FLUSH BIT(0) 93 94 #define SSS_REG_FCPKDMAS 0x0050 95 #define SSS_REG_FCPKDMAL 0x0054 96 #define SSS_REG_FCPKDMAC 0x0058 97 #define SSS_FCPKDMAC_BYTESWAP BIT(3) 98 #define SSS_FCPKDMAC_DESCEND BIT(2) 99 #define SSS_FCPKDMAC_TRANSMIT BIT(1) 100 #define SSS_FCPKDMAC_FLUSH BIT(0) 101 102 #define SSS_REG_FCPKDMAO 0x005C 103 104 /* AES registers */ 105 #define SSS_REG_AES_CONTROL 0x00 106 #define SSS_AES_BYTESWAP_DI BIT(11) 107 #define SSS_AES_BYTESWAP_DO BIT(10) 108 #define SSS_AES_BYTESWAP_IV BIT(9) 109 #define SSS_AES_BYTESWAP_CNT BIT(8) 110 #define SSS_AES_BYTESWAP_KEY BIT(7) 111 #define SSS_AES_KEY_CHANGE_MODE BIT(6) 112 #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) 113 #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) 114 #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) 115 #define SSS_AES_FIFO_MODE BIT(3) 116 #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) 117 #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) 118 #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) 119 #define SSS_AES_MODE_DECRYPT BIT(0) 120 121 #define SSS_REG_AES_STATUS 0x04 122 #define SSS_AES_BUSY BIT(2) 123 #define SSS_AES_INPUT_READY BIT(1) 124 #define SSS_AES_OUTPUT_READY BIT(0) 125 126 #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) 127 #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) 128 #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2)) 129 #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2)) 130 #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2)) 131 132 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) 133 #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) 134 #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) 135 136 #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) 137 #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \ 138 SSS_AES_REG(dev, reg)) 139 140 /* HW engine modes */ 141 #define FLAGS_AES_DECRYPT BIT(0) 142 #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) 143 #define FLAGS_AES_CBC _SBF(1, 0x01) 144 #define FLAGS_AES_CTR _SBF(1, 0x02) 145 146 #define AES_KEY_LEN 16 147 #define CRYPTO_QUEUE_LEN 1 148 149 /** 150 * struct samsung_aes_variant - platform specific SSS driver data 151 * @aes_offset: AES register offset from SSS module's base. 152 * 153 * Specifies platform specific configuration of SSS module. 154 * Note: A structure for driver specific platform data is used for future 155 * expansion of its usage. 156 */ 157 struct samsung_aes_variant { 158 unsigned int aes_offset; 159 }; 160 161 struct s5p_aes_reqctx { 162 unsigned long mode; 163 }; 164 165 struct s5p_aes_ctx { 166 struct s5p_aes_dev *dev; 167 168 uint8_t aes_key[AES_MAX_KEY_SIZE]; 169 uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; 170 int keylen; 171 }; 172 173 struct s5p_aes_dev { 174 struct device *dev; 175 struct clk *clk; 176 void __iomem *ioaddr; 177 void __iomem *aes_ioaddr; 178 int irq_fc; 179 180 struct ablkcipher_request *req; 181 struct s5p_aes_ctx *ctx; 182 struct scatterlist *sg_src; 183 struct scatterlist *sg_dst; 184 185 /* In case of unaligned access: */ 186 struct scatterlist *sg_src_cpy; 187 struct scatterlist *sg_dst_cpy; 188 189 struct tasklet_struct tasklet; 190 struct crypto_queue queue; 191 bool busy; 192 spinlock_t lock; 193 194 struct samsung_aes_variant *variant; 195 }; 196 197 static struct s5p_aes_dev *s5p_dev; 198 199 static const struct samsung_aes_variant s5p_aes_data = { 200 .aes_offset = 0x4000, 201 }; 202 203 static const struct samsung_aes_variant exynos_aes_data = { 204 .aes_offset = 0x200, 205 }; 206 207 static const struct of_device_id s5p_sss_dt_match[] = { 208 { 209 .compatible = "samsung,s5pv210-secss", 210 .data = &s5p_aes_data, 211 }, 212 { 213 .compatible = "samsung,exynos4210-secss", 214 .data = &exynos_aes_data, 215 }, 216 { }, 217 }; 218 MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); 219 220 static inline struct samsung_aes_variant *find_s5p_sss_version 221 (struct platform_device *pdev) 222 { 223 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { 224 const struct of_device_id *match; 225 226 match = of_match_node(s5p_sss_dt_match, 227 pdev->dev.of_node); 228 return (struct samsung_aes_variant *)match->data; 229 } 230 return (struct samsung_aes_variant *) 231 platform_get_device_id(pdev)->driver_data; 232 } 233 234 static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 235 { 236 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); 237 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg)); 238 } 239 240 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) 241 { 242 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg)); 243 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); 244 } 245 246 static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg) 247 { 248 int len; 249 250 if (!*sg) 251 return; 252 253 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE); 254 free_pages((unsigned long)sg_virt(*sg), get_order(len)); 255 256 kfree(*sg); 257 *sg = NULL; 258 } 259 260 static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, 261 unsigned int nbytes, int out) 262 { 263 struct scatter_walk walk; 264 265 if (!nbytes) 266 return; 267 268 scatterwalk_start(&walk, sg); 269 scatterwalk_copychunks(buf, &walk, nbytes, out); 270 scatterwalk_done(&walk, out, 0); 271 } 272 273 static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 274 { 275 if (dev->sg_dst_cpy) { 276 dev_dbg(dev->dev, 277 "Copying %d bytes of output data back to original place\n", 278 dev->req->nbytes); 279 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst, 280 dev->req->nbytes, 1); 281 } 282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 284 285 /* holding a lock outside */ 286 dev->req->base.complete(&dev->req->base, err); 287 dev->busy = false; 288 } 289 290 static void s5p_unset_outdata(struct s5p_aes_dev *dev) 291 { 292 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE); 293 } 294 295 static void s5p_unset_indata(struct s5p_aes_dev *dev) 296 { 297 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE); 298 } 299 300 static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, 301 struct scatterlist **dst) 302 { 303 void *pages; 304 int len; 305 306 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC); 307 if (!*dst) 308 return -ENOMEM; 309 310 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE); 311 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len)); 312 if (!pages) { 313 kfree(*dst); 314 *dst = NULL; 315 return -ENOMEM; 316 } 317 318 s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0); 319 320 sg_init_table(*dst, 1); 321 sg_set_buf(*dst, pages, len); 322 323 return 0; 324 } 325 326 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) 327 { 328 int err; 329 330 if (!sg->length) { 331 err = -EINVAL; 332 goto exit; 333 } 334 335 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE); 336 if (!err) { 337 err = -ENOMEM; 338 goto exit; 339 } 340 341 dev->sg_dst = sg; 342 err = 0; 343 344 exit: 345 return err; 346 } 347 348 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 349 { 350 int err; 351 352 if (!sg->length) { 353 err = -EINVAL; 354 goto exit; 355 } 356 357 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE); 358 if (!err) { 359 err = -ENOMEM; 360 goto exit; 361 } 362 363 dev->sg_src = sg; 364 err = 0; 365 366 exit: 367 return err; 368 } 369 370 /* 371 * Returns true if new transmitting (output) data is ready and its 372 * address+length have to be written to device (by calling 373 * s5p_set_dma_outdata()). False otherwise. 374 */ 375 static bool s5p_aes_tx(struct s5p_aes_dev *dev) 376 { 377 int err = 0; 378 bool ret = false; 379 380 s5p_unset_outdata(dev); 381 382 if (!sg_is_last(dev->sg_dst)) { 383 err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); 384 if (err) 385 s5p_aes_complete(dev, err); 386 else 387 ret = true; 388 } else { 389 s5p_aes_complete(dev, err); 390 391 dev->busy = true; 392 tasklet_schedule(&dev->tasklet); 393 } 394 395 return ret; 396 } 397 398 /* 399 * Returns true if new receiving (input) data is ready and its 400 * address+length have to be written to device (by calling 401 * s5p_set_dma_indata()). False otherwise. 402 */ 403 static bool s5p_aes_rx(struct s5p_aes_dev *dev) 404 { 405 int err; 406 bool ret = false; 407 408 s5p_unset_indata(dev); 409 410 if (!sg_is_last(dev->sg_src)) { 411 err = s5p_set_indata(dev, sg_next(dev->sg_src)); 412 if (err) 413 s5p_aes_complete(dev, err); 414 else 415 ret = true; 416 } 417 418 return ret; 419 } 420 421 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) 422 { 423 struct platform_device *pdev = dev_id; 424 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 425 bool set_dma_tx = false; 426 bool set_dma_rx = false; 427 unsigned long flags; 428 uint32_t status; 429 430 spin_lock_irqsave(&dev->lock, flags); 431 432 status = SSS_READ(dev, FCINTSTAT); 433 if (status & SSS_FCINTSTAT_BRDMAINT) 434 set_dma_rx = s5p_aes_rx(dev); 435 if (status & SSS_FCINTSTAT_BTDMAINT) 436 set_dma_tx = s5p_aes_tx(dev); 437 438 SSS_WRITE(dev, FCINTPEND, status); 439 440 /* 441 * Writing length of DMA block (either receiving or transmitting) 442 * will start the operation immediately, so this should be done 443 * at the end (even after clearing pending interrupts to not miss the 444 * interrupt). 445 */ 446 if (set_dma_tx) 447 s5p_set_dma_outdata(dev, dev->sg_dst); 448 if (set_dma_rx) 449 s5p_set_dma_indata(dev, dev->sg_src); 450 451 spin_unlock_irqrestore(&dev->lock, flags); 452 453 return IRQ_HANDLED; 454 } 455 456 static void s5p_set_aes(struct s5p_aes_dev *dev, 457 uint8_t *key, uint8_t *iv, unsigned int keylen) 458 { 459 void __iomem *keystart; 460 461 if (iv) 462 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); 463 464 if (keylen == AES_KEYSIZE_256) 465 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); 466 else if (keylen == AES_KEYSIZE_192) 467 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2); 468 else 469 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4); 470 471 memcpy_toio(keystart, key, keylen); 472 } 473 474 static bool s5p_is_sg_aligned(struct scatterlist *sg) 475 { 476 while (sg) { 477 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) 478 return false; 479 sg = sg_next(sg); 480 } 481 482 return true; 483 } 484 485 static int s5p_set_indata_start(struct s5p_aes_dev *dev, 486 struct ablkcipher_request *req) 487 { 488 struct scatterlist *sg; 489 int err; 490 491 dev->sg_src_cpy = NULL; 492 sg = req->src; 493 if (!s5p_is_sg_aligned(sg)) { 494 dev_dbg(dev->dev, 495 "At least one unaligned source scatter list, making a copy\n"); 496 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy); 497 if (err) 498 return err; 499 500 sg = dev->sg_src_cpy; 501 } 502 503 err = s5p_set_indata(dev, sg); 504 if (err) { 505 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 506 return err; 507 } 508 509 return 0; 510 } 511 512 static int s5p_set_outdata_start(struct s5p_aes_dev *dev, 513 struct ablkcipher_request *req) 514 { 515 struct scatterlist *sg; 516 int err; 517 518 dev->sg_dst_cpy = NULL; 519 sg = req->dst; 520 if (!s5p_is_sg_aligned(sg)) { 521 dev_dbg(dev->dev, 522 "At least one unaligned dest scatter list, making a copy\n"); 523 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy); 524 if (err) 525 return err; 526 527 sg = dev->sg_dst_cpy; 528 } 529 530 err = s5p_set_outdata(dev, sg); 531 if (err) { 532 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 533 return err; 534 } 535 536 return 0; 537 } 538 539 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) 540 { 541 struct ablkcipher_request *req = dev->req; 542 uint32_t aes_control; 543 unsigned long flags; 544 int err; 545 546 aes_control = SSS_AES_KEY_CHANGE_MODE; 547 if (mode & FLAGS_AES_DECRYPT) 548 aes_control |= SSS_AES_MODE_DECRYPT; 549 550 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) 551 aes_control |= SSS_AES_CHAIN_MODE_CBC; 552 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) 553 aes_control |= SSS_AES_CHAIN_MODE_CTR; 554 555 if (dev->ctx->keylen == AES_KEYSIZE_192) 556 aes_control |= SSS_AES_KEY_SIZE_192; 557 else if (dev->ctx->keylen == AES_KEYSIZE_256) 558 aes_control |= SSS_AES_KEY_SIZE_256; 559 560 aes_control |= SSS_AES_FIFO_MODE; 561 562 /* as a variant it is possible to use byte swapping on DMA side */ 563 aes_control |= SSS_AES_BYTESWAP_DI 564 | SSS_AES_BYTESWAP_DO 565 | SSS_AES_BYTESWAP_IV 566 | SSS_AES_BYTESWAP_KEY 567 | SSS_AES_BYTESWAP_CNT; 568 569 spin_lock_irqsave(&dev->lock, flags); 570 571 SSS_WRITE(dev, FCINTENCLR, 572 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR); 573 SSS_WRITE(dev, FCFIFOCTRL, 0x00); 574 575 err = s5p_set_indata_start(dev, req); 576 if (err) 577 goto indata_error; 578 579 err = s5p_set_outdata_start(dev, req); 580 if (err) 581 goto outdata_error; 582 583 SSS_AES_WRITE(dev, AES_CONTROL, aes_control); 584 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); 585 586 s5p_set_dma_indata(dev, dev->sg_src); 587 s5p_set_dma_outdata(dev, dev->sg_dst); 588 589 SSS_WRITE(dev, FCINTENSET, 590 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET); 591 592 spin_unlock_irqrestore(&dev->lock, flags); 593 594 return; 595 596 outdata_error: 597 s5p_unset_indata(dev); 598 599 indata_error: 600 s5p_aes_complete(dev, err); 601 spin_unlock_irqrestore(&dev->lock, flags); 602 } 603 604 static void s5p_tasklet_cb(unsigned long data) 605 { 606 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data; 607 struct crypto_async_request *async_req, *backlog; 608 struct s5p_aes_reqctx *reqctx; 609 unsigned long flags; 610 611 spin_lock_irqsave(&dev->lock, flags); 612 backlog = crypto_get_backlog(&dev->queue); 613 async_req = crypto_dequeue_request(&dev->queue); 614 615 if (!async_req) { 616 dev->busy = false; 617 spin_unlock_irqrestore(&dev->lock, flags); 618 return; 619 } 620 spin_unlock_irqrestore(&dev->lock, flags); 621 622 if (backlog) 623 backlog->complete(backlog, -EINPROGRESS); 624 625 dev->req = ablkcipher_request_cast(async_req); 626 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm); 627 reqctx = ablkcipher_request_ctx(dev->req); 628 629 s5p_aes_crypt_start(dev, reqctx->mode); 630 } 631 632 static int s5p_aes_handle_req(struct s5p_aes_dev *dev, 633 struct ablkcipher_request *req) 634 { 635 unsigned long flags; 636 int err; 637 638 spin_lock_irqsave(&dev->lock, flags); 639 err = ablkcipher_enqueue_request(&dev->queue, req); 640 if (dev->busy) { 641 spin_unlock_irqrestore(&dev->lock, flags); 642 goto exit; 643 } 644 dev->busy = true; 645 646 spin_unlock_irqrestore(&dev->lock, flags); 647 648 tasklet_schedule(&dev->tasklet); 649 650 exit: 651 return err; 652 } 653 654 static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 655 { 656 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 657 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); 658 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 659 struct s5p_aes_dev *dev = ctx->dev; 660 661 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { 662 dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); 663 return -EINVAL; 664 } 665 666 reqctx->mode = mode; 667 668 return s5p_aes_handle_req(dev, req); 669 } 670 671 static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, 672 const uint8_t *key, unsigned int keylen) 673 { 674 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 675 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 676 677 if (keylen != AES_KEYSIZE_128 && 678 keylen != AES_KEYSIZE_192 && 679 keylen != AES_KEYSIZE_256) 680 return -EINVAL; 681 682 memcpy(ctx->aes_key, key, keylen); 683 ctx->keylen = keylen; 684 685 return 0; 686 } 687 688 static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req) 689 { 690 return s5p_aes_crypt(req, 0); 691 } 692 693 static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req) 694 { 695 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT); 696 } 697 698 static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req) 699 { 700 return s5p_aes_crypt(req, FLAGS_AES_CBC); 701 } 702 703 static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req) 704 { 705 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC); 706 } 707 708 static int s5p_aes_cra_init(struct crypto_tfm *tfm) 709 { 710 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 711 712 ctx->dev = s5p_dev; 713 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx); 714 715 return 0; 716 } 717 718 static struct crypto_alg algs[] = { 719 { 720 .cra_name = "ecb(aes)", 721 .cra_driver_name = "ecb-aes-s5p", 722 .cra_priority = 100, 723 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 724 CRYPTO_ALG_ASYNC | 725 CRYPTO_ALG_KERN_DRIVER_ONLY, 726 .cra_blocksize = AES_BLOCK_SIZE, 727 .cra_ctxsize = sizeof(struct s5p_aes_ctx), 728 .cra_alignmask = 0x0f, 729 .cra_type = &crypto_ablkcipher_type, 730 .cra_module = THIS_MODULE, 731 .cra_init = s5p_aes_cra_init, 732 .cra_u.ablkcipher = { 733 .min_keysize = AES_MIN_KEY_SIZE, 734 .max_keysize = AES_MAX_KEY_SIZE, 735 .setkey = s5p_aes_setkey, 736 .encrypt = s5p_aes_ecb_encrypt, 737 .decrypt = s5p_aes_ecb_decrypt, 738 } 739 }, 740 { 741 .cra_name = "cbc(aes)", 742 .cra_driver_name = "cbc-aes-s5p", 743 .cra_priority = 100, 744 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 745 CRYPTO_ALG_ASYNC | 746 CRYPTO_ALG_KERN_DRIVER_ONLY, 747 .cra_blocksize = AES_BLOCK_SIZE, 748 .cra_ctxsize = sizeof(struct s5p_aes_ctx), 749 .cra_alignmask = 0x0f, 750 .cra_type = &crypto_ablkcipher_type, 751 .cra_module = THIS_MODULE, 752 .cra_init = s5p_aes_cra_init, 753 .cra_u.ablkcipher = { 754 .min_keysize = AES_MIN_KEY_SIZE, 755 .max_keysize = AES_MAX_KEY_SIZE, 756 .ivsize = AES_BLOCK_SIZE, 757 .setkey = s5p_aes_setkey, 758 .encrypt = s5p_aes_cbc_encrypt, 759 .decrypt = s5p_aes_cbc_decrypt, 760 } 761 }, 762 }; 763 764 static int s5p_aes_probe(struct platform_device *pdev) 765 { 766 struct device *dev = &pdev->dev; 767 int i, j, err = -ENODEV; 768 struct samsung_aes_variant *variant; 769 struct s5p_aes_dev *pdata; 770 struct resource *res; 771 772 if (s5p_dev) 773 return -EEXIST; 774 775 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 776 if (!pdata) 777 return -ENOMEM; 778 779 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 780 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); 781 if (IS_ERR(pdata->ioaddr)) 782 return PTR_ERR(pdata->ioaddr); 783 784 variant = find_s5p_sss_version(pdev); 785 786 pdata->clk = devm_clk_get(dev, "secss"); 787 if (IS_ERR(pdata->clk)) { 788 dev_err(dev, "failed to find secss clock source\n"); 789 return -ENOENT; 790 } 791 792 err = clk_prepare_enable(pdata->clk); 793 if (err < 0) { 794 dev_err(dev, "Enabling SSS clk failed, err %d\n", err); 795 return err; 796 } 797 798 spin_lock_init(&pdata->lock); 799 800 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; 801 802 pdata->irq_fc = platform_get_irq(pdev, 0); 803 if (pdata->irq_fc < 0) { 804 err = pdata->irq_fc; 805 dev_warn(dev, "feed control interrupt is not available.\n"); 806 goto err_irq; 807 } 808 err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, 809 IRQF_SHARED, pdev->name, pdev); 810 if (err < 0) { 811 dev_warn(dev, "feed control interrupt is not available.\n"); 812 goto err_irq; 813 } 814 815 pdata->busy = false; 816 pdata->variant = variant; 817 pdata->dev = dev; 818 platform_set_drvdata(pdev, pdata); 819 s5p_dev = pdata; 820 821 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata); 822 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); 823 824 for (i = 0; i < ARRAY_SIZE(algs); i++) { 825 err = crypto_register_alg(&algs[i]); 826 if (err) 827 goto err_algs; 828 } 829 830 dev_info(dev, "s5p-sss driver registered\n"); 831 832 return 0; 833 834 err_algs: 835 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err); 836 837 for (j = 0; j < i; j++) 838 crypto_unregister_alg(&algs[j]); 839 840 tasklet_kill(&pdata->tasklet); 841 842 err_irq: 843 clk_disable_unprepare(pdata->clk); 844 845 s5p_dev = NULL; 846 847 return err; 848 } 849 850 static int s5p_aes_remove(struct platform_device *pdev) 851 { 852 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); 853 int i; 854 855 if (!pdata) 856 return -ENODEV; 857 858 for (i = 0; i < ARRAY_SIZE(algs); i++) 859 crypto_unregister_alg(&algs[i]); 860 861 tasklet_kill(&pdata->tasklet); 862 863 clk_disable_unprepare(pdata->clk); 864 865 s5p_dev = NULL; 866 867 return 0; 868 } 869 870 static struct platform_driver s5p_aes_crypto = { 871 .probe = s5p_aes_probe, 872 .remove = s5p_aes_remove, 873 .driver = { 874 .name = "s5p-secss", 875 .of_match_table = s5p_sss_dt_match, 876 }, 877 }; 878 879 module_platform_driver(s5p_aes_crypto); 880 881 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); 882 MODULE_LICENSE("GPL v2"); 883 MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); 884