1 /* 2 * Cryptographic API. 3 * 4 * Support for Samsung S5PV210 HW acceleration. 5 * 6 * Copyright (C) 2011 NetUP Inc. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as published 10 * by the Free Software Foundation. 11 * 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/crypto.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/err.h> 18 #include <linux/errno.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/platform_device.h> 26 #include <linux/scatterlist.h> 27 28 #include <crypto/ctr.h> 29 #include <crypto/aes.h> 30 #include <crypto/algapi.h> 31 #include <crypto/scatterwalk.h> 32 33 #define _SBF(s, v) ((v) << (s)) 34 35 /* Feed control registers */ 36 #define SSS_REG_FCINTSTAT 0x0000 37 #define SSS_FCINTSTAT_BRDMAINT BIT(3) 38 #define SSS_FCINTSTAT_BTDMAINT BIT(2) 39 #define SSS_FCINTSTAT_HRDMAINT BIT(1) 40 #define SSS_FCINTSTAT_PKDMAINT BIT(0) 41 42 #define SSS_REG_FCINTENSET 0x0004 43 #define SSS_FCINTENSET_BRDMAINTENSET BIT(3) 44 #define SSS_FCINTENSET_BTDMAINTENSET BIT(2) 45 #define SSS_FCINTENSET_HRDMAINTENSET BIT(1) 46 #define SSS_FCINTENSET_PKDMAINTENSET BIT(0) 47 48 #define SSS_REG_FCINTENCLR 0x0008 49 #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3) 50 #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2) 51 #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1) 52 #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0) 53 54 #define SSS_REG_FCINTPEND 0x000C 55 #define SSS_FCINTPEND_BRDMAINTP BIT(3) 56 #define SSS_FCINTPEND_BTDMAINTP BIT(2) 57 #define SSS_FCINTPEND_HRDMAINTP BIT(1) 58 #define SSS_FCINTPEND_PKDMAINTP BIT(0) 59 60 #define SSS_REG_FCFIFOSTAT 0x0010 61 #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7) 62 #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6) 63 #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5) 64 #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4) 65 #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3) 66 #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2) 67 #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1) 68 #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0) 69 70 #define SSS_REG_FCFIFOCTRL 0x0014 71 #define SSS_FCFIFOCTRL_DESSEL BIT(2) 72 #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) 73 #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) 74 #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) 75 76 #define SSS_REG_FCBRDMAS 0x0020 77 #define SSS_REG_FCBRDMAL 0x0024 78 #define SSS_REG_FCBRDMAC 0x0028 79 #define SSS_FCBRDMAC_BYTESWAP BIT(1) 80 #define SSS_FCBRDMAC_FLUSH BIT(0) 81 82 #define SSS_REG_FCBTDMAS 0x0030 83 #define SSS_REG_FCBTDMAL 0x0034 84 #define SSS_REG_FCBTDMAC 0x0038 85 #define SSS_FCBTDMAC_BYTESWAP BIT(1) 86 #define SSS_FCBTDMAC_FLUSH BIT(0) 87 88 #define SSS_REG_FCHRDMAS 0x0040 89 #define SSS_REG_FCHRDMAL 0x0044 90 #define SSS_REG_FCHRDMAC 0x0048 91 #define SSS_FCHRDMAC_BYTESWAP BIT(1) 92 #define SSS_FCHRDMAC_FLUSH BIT(0) 93 94 #define SSS_REG_FCPKDMAS 0x0050 95 #define SSS_REG_FCPKDMAL 0x0054 96 #define SSS_REG_FCPKDMAC 0x0058 97 #define SSS_FCPKDMAC_BYTESWAP BIT(3) 98 #define SSS_FCPKDMAC_DESCEND BIT(2) 99 #define SSS_FCPKDMAC_TRANSMIT BIT(1) 100 #define SSS_FCPKDMAC_FLUSH BIT(0) 101 102 #define SSS_REG_FCPKDMAO 0x005C 103 104 /* AES registers */ 105 #define SSS_REG_AES_CONTROL 0x00 106 #define SSS_AES_BYTESWAP_DI BIT(11) 107 #define SSS_AES_BYTESWAP_DO BIT(10) 108 #define SSS_AES_BYTESWAP_IV BIT(9) 109 #define SSS_AES_BYTESWAP_CNT BIT(8) 110 #define SSS_AES_BYTESWAP_KEY BIT(7) 111 #define SSS_AES_KEY_CHANGE_MODE BIT(6) 112 #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) 113 #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) 114 #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) 115 #define SSS_AES_FIFO_MODE BIT(3) 116 #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) 117 #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) 118 #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) 119 #define SSS_AES_MODE_DECRYPT BIT(0) 120 121 #define SSS_REG_AES_STATUS 0x04 122 #define SSS_AES_BUSY BIT(2) 123 #define SSS_AES_INPUT_READY BIT(1) 124 #define SSS_AES_OUTPUT_READY BIT(0) 125 126 #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) 127 #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) 128 #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2)) 129 #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2)) 130 #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2)) 131 132 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) 133 #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) 134 #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) 135 136 #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) 137 #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \ 138 SSS_AES_REG(dev, reg)) 139 140 /* HW engine modes */ 141 #define FLAGS_AES_DECRYPT BIT(0) 142 #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) 143 #define FLAGS_AES_CBC _SBF(1, 0x01) 144 #define FLAGS_AES_CTR _SBF(1, 0x02) 145 146 #define AES_KEY_LEN 16 147 #define CRYPTO_QUEUE_LEN 1 148 149 /** 150 * struct samsung_aes_variant - platform specific SSS driver data 151 * @aes_offset: AES register offset from SSS module's base. 152 * 153 * Specifies platform specific configuration of SSS module. 154 * Note: A structure for driver specific platform data is used for future 155 * expansion of its usage. 156 */ 157 struct samsung_aes_variant { 158 unsigned int aes_offset; 159 }; 160 161 struct s5p_aes_reqctx { 162 unsigned long mode; 163 }; 164 165 struct s5p_aes_ctx { 166 struct s5p_aes_dev *dev; 167 168 uint8_t aes_key[AES_MAX_KEY_SIZE]; 169 uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; 170 int keylen; 171 }; 172 173 struct s5p_aes_dev { 174 struct device *dev; 175 struct clk *clk; 176 void __iomem *ioaddr; 177 void __iomem *aes_ioaddr; 178 int irq_fc; 179 180 struct ablkcipher_request *req; 181 struct s5p_aes_ctx *ctx; 182 struct scatterlist *sg_src; 183 struct scatterlist *sg_dst; 184 185 /* In case of unaligned access: */ 186 struct scatterlist *sg_src_cpy; 187 struct scatterlist *sg_dst_cpy; 188 189 struct tasklet_struct tasklet; 190 struct crypto_queue queue; 191 bool busy; 192 spinlock_t lock; 193 194 struct samsung_aes_variant *variant; 195 }; 196 197 static struct s5p_aes_dev *s5p_dev; 198 199 static const struct samsung_aes_variant s5p_aes_data = { 200 .aes_offset = 0x4000, 201 }; 202 203 static const struct samsung_aes_variant exynos_aes_data = { 204 .aes_offset = 0x200, 205 }; 206 207 static const struct of_device_id s5p_sss_dt_match[] = { 208 { 209 .compatible = "samsung,s5pv210-secss", 210 .data = &s5p_aes_data, 211 }, 212 { 213 .compatible = "samsung,exynos4210-secss", 214 .data = &exynos_aes_data, 215 }, 216 { }, 217 }; 218 MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); 219 220 static inline struct samsung_aes_variant *find_s5p_sss_version 221 (struct platform_device *pdev) 222 { 223 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { 224 const struct of_device_id *match; 225 226 match = of_match_node(s5p_sss_dt_match, 227 pdev->dev.of_node); 228 return (struct samsung_aes_variant *)match->data; 229 } 230 return (struct samsung_aes_variant *) 231 platform_get_device_id(pdev)->driver_data; 232 } 233 234 static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 235 { 236 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); 237 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg)); 238 } 239 240 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) 241 { 242 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg)); 243 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); 244 } 245 246 static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg) 247 { 248 int len; 249 250 if (!*sg) 251 return; 252 253 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE); 254 free_pages((unsigned long)sg_virt(*sg), get_order(len)); 255 256 kfree(*sg); 257 *sg = NULL; 258 } 259 260 static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, 261 unsigned int nbytes, int out) 262 { 263 struct scatter_walk walk; 264 265 if (!nbytes) 266 return; 267 268 scatterwalk_start(&walk, sg); 269 scatterwalk_copychunks(buf, &walk, nbytes, out); 270 scatterwalk_done(&walk, out, 0); 271 } 272 273 static void s5p_sg_done(struct s5p_aes_dev *dev) 274 { 275 if (dev->sg_dst_cpy) { 276 dev_dbg(dev->dev, 277 "Copying %d bytes of output data back to original place\n", 278 dev->req->nbytes); 279 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst, 280 dev->req->nbytes, 1); 281 } 282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 284 } 285 286 /* Calls the completion. Cannot be called with dev->lock hold. */ 287 static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 288 { 289 dev->req->base.complete(&dev->req->base, err); 290 dev->busy = false; 291 } 292 293 static void s5p_unset_outdata(struct s5p_aes_dev *dev) 294 { 295 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE); 296 } 297 298 static void s5p_unset_indata(struct s5p_aes_dev *dev) 299 { 300 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE); 301 } 302 303 static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, 304 struct scatterlist **dst) 305 { 306 void *pages; 307 int len; 308 309 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC); 310 if (!*dst) 311 return -ENOMEM; 312 313 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE); 314 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len)); 315 if (!pages) { 316 kfree(*dst); 317 *dst = NULL; 318 return -ENOMEM; 319 } 320 321 s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0); 322 323 sg_init_table(*dst, 1); 324 sg_set_buf(*dst, pages, len); 325 326 return 0; 327 } 328 329 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) 330 { 331 int err; 332 333 if (!sg->length) { 334 err = -EINVAL; 335 goto exit; 336 } 337 338 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE); 339 if (!err) { 340 err = -ENOMEM; 341 goto exit; 342 } 343 344 dev->sg_dst = sg; 345 err = 0; 346 347 exit: 348 return err; 349 } 350 351 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 352 { 353 int err; 354 355 if (!sg->length) { 356 err = -EINVAL; 357 goto exit; 358 } 359 360 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE); 361 if (!err) { 362 err = -ENOMEM; 363 goto exit; 364 } 365 366 dev->sg_src = sg; 367 err = 0; 368 369 exit: 370 return err; 371 } 372 373 /* 374 * Returns -ERRNO on error (mapping of new data failed). 375 * On success returns: 376 * - 0 if there is no more data, 377 * - 1 if new transmitting (output) data is ready and its address+length 378 * have to be written to device (by calling s5p_set_dma_outdata()). 379 */ 380 static int s5p_aes_tx(struct s5p_aes_dev *dev) 381 { 382 int ret = 0; 383 384 s5p_unset_outdata(dev); 385 386 if (!sg_is_last(dev->sg_dst)) { 387 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst)); 388 if (!ret) 389 ret = 1; 390 } 391 392 return ret; 393 } 394 395 /* 396 * Returns -ERRNO on error (mapping of new data failed). 397 * On success returns: 398 * - 0 if there is no more data, 399 * - 1 if new receiving (input) data is ready and its address+length 400 * have to be written to device (by calling s5p_set_dma_indata()). 401 */ 402 static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/) 403 { 404 int ret = 0; 405 406 s5p_unset_indata(dev); 407 408 if (!sg_is_last(dev->sg_src)) { 409 ret = s5p_set_indata(dev, sg_next(dev->sg_src)); 410 if (!ret) 411 ret = 1; 412 } 413 414 return ret; 415 } 416 417 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) 418 { 419 struct platform_device *pdev = dev_id; 420 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 421 int err_dma_tx = 0; 422 int err_dma_rx = 0; 423 bool tx_end = false; 424 unsigned long flags; 425 uint32_t status; 426 int err; 427 428 spin_lock_irqsave(&dev->lock, flags); 429 430 /* 431 * Handle rx or tx interrupt. If there is still data (scatterlist did not 432 * reach end), then map next scatterlist entry. 433 * In case of such mapping error, s5p_aes_complete() should be called. 434 * 435 * If there is no more data in tx scatter list, call s5p_aes_complete() 436 * and schedule new tasklet. 437 */ 438 status = SSS_READ(dev, FCINTSTAT); 439 if (status & SSS_FCINTSTAT_BRDMAINT) 440 err_dma_rx = s5p_aes_rx(dev); 441 442 if (status & SSS_FCINTSTAT_BTDMAINT) { 443 if (sg_is_last(dev->sg_dst)) 444 tx_end = true; 445 err_dma_tx = s5p_aes_tx(dev); 446 } 447 448 SSS_WRITE(dev, FCINTPEND, status); 449 450 if (err_dma_rx < 0) { 451 err = err_dma_rx; 452 goto error; 453 } 454 if (err_dma_tx < 0) { 455 err = err_dma_tx; 456 goto error; 457 } 458 459 if (tx_end) { 460 s5p_sg_done(dev); 461 462 spin_unlock_irqrestore(&dev->lock, flags); 463 464 s5p_aes_complete(dev, 0); 465 dev->busy = true; 466 tasklet_schedule(&dev->tasklet); 467 } else { 468 /* 469 * Writing length of DMA block (either receiving or 470 * transmitting) will start the operation immediately, so this 471 * should be done at the end (even after clearing pending 472 * interrupts to not miss the interrupt). 473 */ 474 if (err_dma_tx == 1) 475 s5p_set_dma_outdata(dev, dev->sg_dst); 476 if (err_dma_rx == 1) 477 s5p_set_dma_indata(dev, dev->sg_src); 478 479 spin_unlock_irqrestore(&dev->lock, flags); 480 } 481 482 return IRQ_HANDLED; 483 484 error: 485 s5p_sg_done(dev); 486 spin_unlock_irqrestore(&dev->lock, flags); 487 s5p_aes_complete(dev, err); 488 489 return IRQ_HANDLED; 490 } 491 492 static void s5p_set_aes(struct s5p_aes_dev *dev, 493 uint8_t *key, uint8_t *iv, unsigned int keylen) 494 { 495 void __iomem *keystart; 496 497 if (iv) 498 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); 499 500 if (keylen == AES_KEYSIZE_256) 501 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); 502 else if (keylen == AES_KEYSIZE_192) 503 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2); 504 else 505 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4); 506 507 memcpy_toio(keystart, key, keylen); 508 } 509 510 static bool s5p_is_sg_aligned(struct scatterlist *sg) 511 { 512 while (sg) { 513 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) 514 return false; 515 sg = sg_next(sg); 516 } 517 518 return true; 519 } 520 521 static int s5p_set_indata_start(struct s5p_aes_dev *dev, 522 struct ablkcipher_request *req) 523 { 524 struct scatterlist *sg; 525 int err; 526 527 dev->sg_src_cpy = NULL; 528 sg = req->src; 529 if (!s5p_is_sg_aligned(sg)) { 530 dev_dbg(dev->dev, 531 "At least one unaligned source scatter list, making a copy\n"); 532 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy); 533 if (err) 534 return err; 535 536 sg = dev->sg_src_cpy; 537 } 538 539 err = s5p_set_indata(dev, sg); 540 if (err) { 541 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 542 return err; 543 } 544 545 return 0; 546 } 547 548 static int s5p_set_outdata_start(struct s5p_aes_dev *dev, 549 struct ablkcipher_request *req) 550 { 551 struct scatterlist *sg; 552 int err; 553 554 dev->sg_dst_cpy = NULL; 555 sg = req->dst; 556 if (!s5p_is_sg_aligned(sg)) { 557 dev_dbg(dev->dev, 558 "At least one unaligned dest scatter list, making a copy\n"); 559 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy); 560 if (err) 561 return err; 562 563 sg = dev->sg_dst_cpy; 564 } 565 566 err = s5p_set_outdata(dev, sg); 567 if (err) { 568 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 569 return err; 570 } 571 572 return 0; 573 } 574 575 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) 576 { 577 struct ablkcipher_request *req = dev->req; 578 uint32_t aes_control; 579 unsigned long flags; 580 int err; 581 582 aes_control = SSS_AES_KEY_CHANGE_MODE; 583 if (mode & FLAGS_AES_DECRYPT) 584 aes_control |= SSS_AES_MODE_DECRYPT; 585 586 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) 587 aes_control |= SSS_AES_CHAIN_MODE_CBC; 588 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) 589 aes_control |= SSS_AES_CHAIN_MODE_CTR; 590 591 if (dev->ctx->keylen == AES_KEYSIZE_192) 592 aes_control |= SSS_AES_KEY_SIZE_192; 593 else if (dev->ctx->keylen == AES_KEYSIZE_256) 594 aes_control |= SSS_AES_KEY_SIZE_256; 595 596 aes_control |= SSS_AES_FIFO_MODE; 597 598 /* as a variant it is possible to use byte swapping on DMA side */ 599 aes_control |= SSS_AES_BYTESWAP_DI 600 | SSS_AES_BYTESWAP_DO 601 | SSS_AES_BYTESWAP_IV 602 | SSS_AES_BYTESWAP_KEY 603 | SSS_AES_BYTESWAP_CNT; 604 605 spin_lock_irqsave(&dev->lock, flags); 606 607 SSS_WRITE(dev, FCINTENCLR, 608 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR); 609 SSS_WRITE(dev, FCFIFOCTRL, 0x00); 610 611 err = s5p_set_indata_start(dev, req); 612 if (err) 613 goto indata_error; 614 615 err = s5p_set_outdata_start(dev, req); 616 if (err) 617 goto outdata_error; 618 619 SSS_AES_WRITE(dev, AES_CONTROL, aes_control); 620 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); 621 622 s5p_set_dma_indata(dev, dev->sg_src); 623 s5p_set_dma_outdata(dev, dev->sg_dst); 624 625 SSS_WRITE(dev, FCINTENSET, 626 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET); 627 628 spin_unlock_irqrestore(&dev->lock, flags); 629 630 return; 631 632 outdata_error: 633 s5p_unset_indata(dev); 634 635 indata_error: 636 s5p_sg_done(dev); 637 spin_unlock_irqrestore(&dev->lock, flags); 638 s5p_aes_complete(dev, err); 639 } 640 641 static void s5p_tasklet_cb(unsigned long data) 642 { 643 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data; 644 struct crypto_async_request *async_req, *backlog; 645 struct s5p_aes_reqctx *reqctx; 646 unsigned long flags; 647 648 spin_lock_irqsave(&dev->lock, flags); 649 backlog = crypto_get_backlog(&dev->queue); 650 async_req = crypto_dequeue_request(&dev->queue); 651 652 if (!async_req) { 653 dev->busy = false; 654 spin_unlock_irqrestore(&dev->lock, flags); 655 return; 656 } 657 spin_unlock_irqrestore(&dev->lock, flags); 658 659 if (backlog) 660 backlog->complete(backlog, -EINPROGRESS); 661 662 dev->req = ablkcipher_request_cast(async_req); 663 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm); 664 reqctx = ablkcipher_request_ctx(dev->req); 665 666 s5p_aes_crypt_start(dev, reqctx->mode); 667 } 668 669 static int s5p_aes_handle_req(struct s5p_aes_dev *dev, 670 struct ablkcipher_request *req) 671 { 672 unsigned long flags; 673 int err; 674 675 spin_lock_irqsave(&dev->lock, flags); 676 err = ablkcipher_enqueue_request(&dev->queue, req); 677 if (dev->busy) { 678 spin_unlock_irqrestore(&dev->lock, flags); 679 goto exit; 680 } 681 dev->busy = true; 682 683 spin_unlock_irqrestore(&dev->lock, flags); 684 685 tasklet_schedule(&dev->tasklet); 686 687 exit: 688 return err; 689 } 690 691 static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 692 { 693 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 694 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); 695 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 696 struct s5p_aes_dev *dev = ctx->dev; 697 698 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { 699 dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); 700 return -EINVAL; 701 } 702 703 reqctx->mode = mode; 704 705 return s5p_aes_handle_req(dev, req); 706 } 707 708 static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, 709 const uint8_t *key, unsigned int keylen) 710 { 711 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 712 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 713 714 if (keylen != AES_KEYSIZE_128 && 715 keylen != AES_KEYSIZE_192 && 716 keylen != AES_KEYSIZE_256) 717 return -EINVAL; 718 719 memcpy(ctx->aes_key, key, keylen); 720 ctx->keylen = keylen; 721 722 return 0; 723 } 724 725 static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req) 726 { 727 return s5p_aes_crypt(req, 0); 728 } 729 730 static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req) 731 { 732 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT); 733 } 734 735 static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req) 736 { 737 return s5p_aes_crypt(req, FLAGS_AES_CBC); 738 } 739 740 static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req) 741 { 742 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC); 743 } 744 745 static int s5p_aes_cra_init(struct crypto_tfm *tfm) 746 { 747 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 748 749 ctx->dev = s5p_dev; 750 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx); 751 752 return 0; 753 } 754 755 static struct crypto_alg algs[] = { 756 { 757 .cra_name = "ecb(aes)", 758 .cra_driver_name = "ecb-aes-s5p", 759 .cra_priority = 100, 760 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 761 CRYPTO_ALG_ASYNC | 762 CRYPTO_ALG_KERN_DRIVER_ONLY, 763 .cra_blocksize = AES_BLOCK_SIZE, 764 .cra_ctxsize = sizeof(struct s5p_aes_ctx), 765 .cra_alignmask = 0x0f, 766 .cra_type = &crypto_ablkcipher_type, 767 .cra_module = THIS_MODULE, 768 .cra_init = s5p_aes_cra_init, 769 .cra_u.ablkcipher = { 770 .min_keysize = AES_MIN_KEY_SIZE, 771 .max_keysize = AES_MAX_KEY_SIZE, 772 .setkey = s5p_aes_setkey, 773 .encrypt = s5p_aes_ecb_encrypt, 774 .decrypt = s5p_aes_ecb_decrypt, 775 } 776 }, 777 { 778 .cra_name = "cbc(aes)", 779 .cra_driver_name = "cbc-aes-s5p", 780 .cra_priority = 100, 781 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 782 CRYPTO_ALG_ASYNC | 783 CRYPTO_ALG_KERN_DRIVER_ONLY, 784 .cra_blocksize = AES_BLOCK_SIZE, 785 .cra_ctxsize = sizeof(struct s5p_aes_ctx), 786 .cra_alignmask = 0x0f, 787 .cra_type = &crypto_ablkcipher_type, 788 .cra_module = THIS_MODULE, 789 .cra_init = s5p_aes_cra_init, 790 .cra_u.ablkcipher = { 791 .min_keysize = AES_MIN_KEY_SIZE, 792 .max_keysize = AES_MAX_KEY_SIZE, 793 .ivsize = AES_BLOCK_SIZE, 794 .setkey = s5p_aes_setkey, 795 .encrypt = s5p_aes_cbc_encrypt, 796 .decrypt = s5p_aes_cbc_decrypt, 797 } 798 }, 799 }; 800 801 static int s5p_aes_probe(struct platform_device *pdev) 802 { 803 struct device *dev = &pdev->dev; 804 int i, j, err = -ENODEV; 805 struct samsung_aes_variant *variant; 806 struct s5p_aes_dev *pdata; 807 struct resource *res; 808 809 if (s5p_dev) 810 return -EEXIST; 811 812 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 813 if (!pdata) 814 return -ENOMEM; 815 816 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 817 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); 818 if (IS_ERR(pdata->ioaddr)) 819 return PTR_ERR(pdata->ioaddr); 820 821 variant = find_s5p_sss_version(pdev); 822 823 pdata->clk = devm_clk_get(dev, "secss"); 824 if (IS_ERR(pdata->clk)) { 825 dev_err(dev, "failed to find secss clock source\n"); 826 return -ENOENT; 827 } 828 829 err = clk_prepare_enable(pdata->clk); 830 if (err < 0) { 831 dev_err(dev, "Enabling SSS clk failed, err %d\n", err); 832 return err; 833 } 834 835 spin_lock_init(&pdata->lock); 836 837 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; 838 839 pdata->irq_fc = platform_get_irq(pdev, 0); 840 if (pdata->irq_fc < 0) { 841 err = pdata->irq_fc; 842 dev_warn(dev, "feed control interrupt is not available.\n"); 843 goto err_irq; 844 } 845 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL, 846 s5p_aes_interrupt, IRQF_ONESHOT, 847 pdev->name, pdev); 848 if (err < 0) { 849 dev_warn(dev, "feed control interrupt is not available.\n"); 850 goto err_irq; 851 } 852 853 pdata->busy = false; 854 pdata->variant = variant; 855 pdata->dev = dev; 856 platform_set_drvdata(pdev, pdata); 857 s5p_dev = pdata; 858 859 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata); 860 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); 861 862 for (i = 0; i < ARRAY_SIZE(algs); i++) { 863 err = crypto_register_alg(&algs[i]); 864 if (err) 865 goto err_algs; 866 } 867 868 dev_info(dev, "s5p-sss driver registered\n"); 869 870 return 0; 871 872 err_algs: 873 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err); 874 875 for (j = 0; j < i; j++) 876 crypto_unregister_alg(&algs[j]); 877 878 tasklet_kill(&pdata->tasklet); 879 880 err_irq: 881 clk_disable_unprepare(pdata->clk); 882 883 s5p_dev = NULL; 884 885 return err; 886 } 887 888 static int s5p_aes_remove(struct platform_device *pdev) 889 { 890 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); 891 int i; 892 893 if (!pdata) 894 return -ENODEV; 895 896 for (i = 0; i < ARRAY_SIZE(algs); i++) 897 crypto_unregister_alg(&algs[i]); 898 899 tasklet_kill(&pdata->tasklet); 900 901 clk_disable_unprepare(pdata->clk); 902 903 s5p_dev = NULL; 904 905 return 0; 906 } 907 908 static struct platform_driver s5p_aes_crypto = { 909 .probe = s5p_aes_probe, 910 .remove = s5p_aes_remove, 911 .driver = { 912 .name = "s5p-secss", 913 .of_match_table = s5p_sss_dt_match, 914 }, 915 }; 916 917 module_platform_driver(s5p_aes_crypto); 918 919 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); 920 MODULE_LICENSE("GPL v2"); 921 MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); 922