1 /* 2 * Cryptographic API. 3 * 4 * Support for OMAP SHA1/MD5 HW acceleration. 5 * 6 * Copyright (c) 2010 Nokia Corporation 7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as published 11 * by the Free Software Foundation. 12 * 13 * Some ideas are from old omap-sha1-md5.c driver. 14 */ 15 16 #define pr_fmt(fmt) "%s: " fmt, __func__ 17 18 #include <linux/err.h> 19 #include <linux/device.h> 20 #include <linux/module.h> 21 #include <linux/init.h> 22 #include <linux/errno.h> 23 #include <linux/interrupt.h> 24 #include <linux/kernel.h> 25 #include <linux/clk.h> 26 #include <linux/irq.h> 27 #include <linux/io.h> 28 #include <linux/platform_device.h> 29 #include <linux/scatterlist.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/delay.h> 32 #include <linux/crypto.h> 33 #include <linux/cryptohash.h> 34 #include <crypto/scatterwalk.h> 35 #include <crypto/algapi.h> 36 #include <crypto/sha.h> 37 #include <crypto/hash.h> 38 #include <crypto/internal/hash.h> 39 40 #include <plat/cpu.h> 41 #include <plat/dma.h> 42 #include <mach/irqs.h> 43 44 #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04)) 45 #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04)) 46 47 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE 48 #define MD5_DIGEST_SIZE 16 49 50 #define SHA_REG_DIGCNT 0x14 51 52 #define SHA_REG_CTRL 0x18 53 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) 54 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4) 55 #define SHA_REG_CTRL_ALGO_CONST (1 << 3) 56 #define SHA_REG_CTRL_ALGO (1 << 2) 57 #define SHA_REG_CTRL_INPUT_READY (1 << 1) 58 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) 59 60 #define SHA_REG_REV 0x5C 61 #define SHA_REG_REV_MAJOR 0xF0 62 #define SHA_REG_REV_MINOR 0x0F 63 64 #define SHA_REG_MASK 0x60 65 #define SHA_REG_MASK_DMA_EN (1 << 3) 66 #define SHA_REG_MASK_IT_EN (1 << 2) 67 #define SHA_REG_MASK_SOFTRESET (1 << 1) 68 #define SHA_REG_AUTOIDLE (1 << 0) 69 70 #define SHA_REG_SYSSTATUS 0x64 71 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) 72 73 #define DEFAULT_TIMEOUT_INTERVAL HZ 74 75 /* mostly device flags */ 76 #define FLAGS_BUSY 0 77 #define FLAGS_FINAL 1 78 #define FLAGS_DMA_ACTIVE 2 79 #define FLAGS_OUTPUT_READY 3 80 #define FLAGS_INIT 4 81 #define FLAGS_CPU 5 82 #define FLAGS_DMA_READY 6 83 /* context flags */ 84 #define FLAGS_FINUP 16 85 #define FLAGS_SG 17 86 #define FLAGS_SHA1 18 87 #define FLAGS_HMAC 19 88 #define FLAGS_ERROR 20 89 90 #define OP_UPDATE 1 91 #define OP_FINAL 2 92 93 #define OMAP_ALIGN_MASK (sizeof(u32)-1) 94 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) 95 96 #define BUFLEN PAGE_SIZE 97 98 struct omap_sham_dev; 99 100 struct omap_sham_reqctx { 101 struct omap_sham_dev *dd; 102 unsigned long flags; 103 unsigned long op; 104 105 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; 106 size_t digcnt; 107 size_t bufcnt; 108 size_t buflen; 109 dma_addr_t dma_addr; 110 111 /* walk state */ 112 struct scatterlist *sg; 113 unsigned int offset; /* offset in current sg */ 114 unsigned int total; /* total request */ 115 116 u8 buffer[0] OMAP_ALIGNED; 117 }; 118 119 struct omap_sham_hmac_ctx { 120 struct crypto_shash *shash; 121 u8 ipad[SHA1_MD5_BLOCK_SIZE]; 122 u8 opad[SHA1_MD5_BLOCK_SIZE]; 123 }; 124 125 struct omap_sham_ctx { 126 struct omap_sham_dev *dd; 127 128 unsigned long flags; 129 130 /* fallback stuff */ 131 struct crypto_shash *fallback; 132 133 struct omap_sham_hmac_ctx base[0]; 134 }; 135 136 #define OMAP_SHAM_QUEUE_LENGTH 1 137 138 struct omap_sham_dev { 139 struct list_head list; 140 unsigned long phys_base; 141 struct device *dev; 142 void __iomem *io_base; 143 int irq; 144 struct clk *iclk; 145 spinlock_t lock; 146 int err; 147 int dma; 148 int dma_lch; 149 struct tasklet_struct done_task; 150 151 unsigned long flags; 152 struct crypto_queue queue; 153 struct ahash_request *req; 154 }; 155 156 struct omap_sham_drv { 157 struct list_head dev_list; 158 spinlock_t lock; 159 unsigned long flags; 160 }; 161 162 static struct omap_sham_drv sham = { 163 .dev_list = LIST_HEAD_INIT(sham.dev_list), 164 .lock = __SPIN_LOCK_UNLOCKED(sham.lock), 165 }; 166 167 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) 168 { 169 return __raw_readl(dd->io_base + offset); 170 } 171 172 static inline void omap_sham_write(struct omap_sham_dev *dd, 173 u32 offset, u32 value) 174 { 175 __raw_writel(value, dd->io_base + offset); 176 } 177 178 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, 179 u32 value, u32 mask) 180 { 181 u32 val; 182 183 val = omap_sham_read(dd, address); 184 val &= ~mask; 185 val |= value; 186 omap_sham_write(dd, address, val); 187 } 188 189 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) 190 { 191 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL; 192 193 while (!(omap_sham_read(dd, offset) & bit)) { 194 if (time_is_before_jiffies(timeout)) 195 return -ETIMEDOUT; 196 } 197 198 return 0; 199 } 200 201 static void omap_sham_copy_hash(struct ahash_request *req, int out) 202 { 203 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 204 u32 *hash = (u32 *)ctx->digest; 205 int i; 206 207 /* MD5 is almost unused. So copy sha1 size to reduce code */ 208 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { 209 if (out) 210 hash[i] = omap_sham_read(ctx->dd, 211 SHA_REG_DIGEST(i)); 212 else 213 omap_sham_write(ctx->dd, 214 SHA_REG_DIGEST(i), hash[i]); 215 } 216 } 217 218 static void omap_sham_copy_ready_hash(struct ahash_request *req) 219 { 220 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 221 u32 *in = (u32 *)ctx->digest; 222 u32 *hash = (u32 *)req->result; 223 int i; 224 225 if (!hash) 226 return; 227 228 if (likely(ctx->flags & BIT(FLAGS_SHA1))) { 229 /* SHA1 results are in big endian */ 230 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) 231 hash[i] = be32_to_cpu(in[i]); 232 } else { 233 /* MD5 results are in little endian */ 234 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) 235 hash[i] = le32_to_cpu(in[i]); 236 } 237 } 238 239 static int omap_sham_hw_init(struct omap_sham_dev *dd) 240 { 241 clk_enable(dd->iclk); 242 243 if (!test_bit(FLAGS_INIT, &dd->flags)) { 244 omap_sham_write_mask(dd, SHA_REG_MASK, 245 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); 246 247 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, 248 SHA_REG_SYSSTATUS_RESETDONE)) 249 return -ETIMEDOUT; 250 251 set_bit(FLAGS_INIT, &dd->flags); 252 dd->err = 0; 253 } 254 255 return 0; 256 } 257 258 static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, 259 int final, int dma) 260 { 261 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 262 u32 val = length << 5, mask; 263 264 if (likely(ctx->digcnt)) 265 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); 266 267 omap_sham_write_mask(dd, SHA_REG_MASK, 268 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), 269 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); 270 /* 271 * Setting ALGO_CONST only for the first iteration 272 * and CLOSE_HASH only for the last one. 273 */ 274 if (ctx->flags & BIT(FLAGS_SHA1)) 275 val |= SHA_REG_CTRL_ALGO; 276 if (!ctx->digcnt) 277 val |= SHA_REG_CTRL_ALGO_CONST; 278 if (final) 279 val |= SHA_REG_CTRL_CLOSE_HASH; 280 281 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH | 282 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; 283 284 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); 285 } 286 287 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, 288 size_t length, int final) 289 { 290 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 291 int count, len32; 292 const u32 *buffer = (const u32 *)buf; 293 294 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", 295 ctx->digcnt, length, final); 296 297 omap_sham_write_ctrl(dd, length, final, 0); 298 299 /* should be non-zero before next lines to disable clocks later */ 300 ctx->digcnt += length; 301 302 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) 303 return -ETIMEDOUT; 304 305 if (final) 306 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ 307 308 set_bit(FLAGS_CPU, &dd->flags); 309 310 len32 = DIV_ROUND_UP(length, sizeof(u32)); 311 312 for (count = 0; count < len32; count++) 313 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]); 314 315 return -EINPROGRESS; 316 } 317 318 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, 319 size_t length, int final) 320 { 321 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 322 int len32; 323 324 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", 325 ctx->digcnt, length, final); 326 327 len32 = DIV_ROUND_UP(length, sizeof(u32)); 328 329 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 330 1, OMAP_DMA_SYNC_PACKET, dd->dma, 331 OMAP_DMA_DST_SYNC_PREFETCH); 332 333 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 334 dma_addr, 0, 0); 335 336 omap_sham_write_ctrl(dd, length, final, 1); 337 338 ctx->digcnt += length; 339 340 if (final) 341 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ 342 343 set_bit(FLAGS_DMA_ACTIVE, &dd->flags); 344 345 omap_start_dma(dd->dma_lch); 346 347 return -EINPROGRESS; 348 } 349 350 static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, 351 const u8 *data, size_t length) 352 { 353 size_t count = min(length, ctx->buflen - ctx->bufcnt); 354 355 count = min(count, ctx->total); 356 if (count <= 0) 357 return 0; 358 memcpy(ctx->buffer + ctx->bufcnt, data, count); 359 ctx->bufcnt += count; 360 361 return count; 362 } 363 364 static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) 365 { 366 size_t count; 367 368 while (ctx->sg) { 369 count = omap_sham_append_buffer(ctx, 370 sg_virt(ctx->sg) + ctx->offset, 371 ctx->sg->length - ctx->offset); 372 if (!count) 373 break; 374 ctx->offset += count; 375 ctx->total -= count; 376 if (ctx->offset == ctx->sg->length) { 377 ctx->sg = sg_next(ctx->sg); 378 if (ctx->sg) 379 ctx->offset = 0; 380 else 381 ctx->total = 0; 382 } 383 } 384 385 return 0; 386 } 387 388 static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, 389 struct omap_sham_reqctx *ctx, 390 size_t length, int final) 391 { 392 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, 393 DMA_TO_DEVICE); 394 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 395 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); 396 return -EINVAL; 397 } 398 399 ctx->flags &= ~BIT(FLAGS_SG); 400 401 /* next call does not fail... so no unmap in the case of error */ 402 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); 403 } 404 405 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) 406 { 407 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 408 unsigned int final; 409 size_t count; 410 411 omap_sham_append_sg(ctx); 412 413 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; 414 415 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", 416 ctx->bufcnt, ctx->digcnt, final); 417 418 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { 419 count = ctx->bufcnt; 420 ctx->bufcnt = 0; 421 return omap_sham_xmit_dma_map(dd, ctx, count, final); 422 } 423 424 return 0; 425 } 426 427 /* Start address alignment */ 428 #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) 429 /* SHA1 block size alignment */ 430 #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) 431 432 static int omap_sham_update_dma_start(struct omap_sham_dev *dd) 433 { 434 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 435 unsigned int length, final, tail; 436 struct scatterlist *sg; 437 438 if (!ctx->total) 439 return 0; 440 441 if (ctx->bufcnt || ctx->offset) 442 return omap_sham_update_dma_slow(dd); 443 444 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", 445 ctx->digcnt, ctx->bufcnt, ctx->total); 446 447 sg = ctx->sg; 448 449 if (!SG_AA(sg)) 450 return omap_sham_update_dma_slow(dd); 451 452 if (!sg_is_last(sg) && !SG_SA(sg)) 453 /* size is not SHA1_BLOCK_SIZE aligned */ 454 return omap_sham_update_dma_slow(dd); 455 456 length = min(ctx->total, sg->length); 457 458 if (sg_is_last(sg)) { 459 if (!(ctx->flags & BIT(FLAGS_FINUP))) { 460 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ 461 tail = length & (SHA1_MD5_BLOCK_SIZE - 1); 462 /* without finup() we need one block to close hash */ 463 if (!tail) 464 tail = SHA1_MD5_BLOCK_SIZE; 465 length -= tail; 466 } 467 } 468 469 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 470 dev_err(dd->dev, "dma_map_sg error\n"); 471 return -EINVAL; 472 } 473 474 ctx->flags |= BIT(FLAGS_SG); 475 476 ctx->total -= length; 477 ctx->offset = length; /* offset where to start slow */ 478 479 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; 480 481 /* next call does not fail... so no unmap in the case of error */ 482 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); 483 } 484 485 static int omap_sham_update_cpu(struct omap_sham_dev *dd) 486 { 487 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 488 int bufcnt; 489 490 omap_sham_append_sg(ctx); 491 bufcnt = ctx->bufcnt; 492 ctx->bufcnt = 0; 493 494 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); 495 } 496 497 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) 498 { 499 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 500 501 omap_stop_dma(dd->dma_lch); 502 if (ctx->flags & BIT(FLAGS_SG)) { 503 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 504 if (ctx->sg->length == ctx->offset) { 505 ctx->sg = sg_next(ctx->sg); 506 if (ctx->sg) 507 ctx->offset = 0; 508 } 509 } else { 510 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, 511 DMA_TO_DEVICE); 512 } 513 514 return 0; 515 } 516 517 static int omap_sham_init(struct ahash_request *req) 518 { 519 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 520 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 521 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 522 struct omap_sham_dev *dd = NULL, *tmp; 523 524 spin_lock_bh(&sham.lock); 525 if (!tctx->dd) { 526 list_for_each_entry(tmp, &sham.dev_list, list) { 527 dd = tmp; 528 break; 529 } 530 tctx->dd = dd; 531 } else { 532 dd = tctx->dd; 533 } 534 spin_unlock_bh(&sham.lock); 535 536 ctx->dd = dd; 537 538 ctx->flags = 0; 539 540 dev_dbg(dd->dev, "init: digest size: %d\n", 541 crypto_ahash_digestsize(tfm)); 542 543 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) 544 ctx->flags |= BIT(FLAGS_SHA1); 545 546 ctx->bufcnt = 0; 547 ctx->digcnt = 0; 548 ctx->buflen = BUFLEN; 549 550 if (tctx->flags & BIT(FLAGS_HMAC)) { 551 struct omap_sham_hmac_ctx *bctx = tctx->base; 552 553 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); 554 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; 555 ctx->flags |= BIT(FLAGS_HMAC); 556 } 557 558 return 0; 559 560 } 561 562 static int omap_sham_update_req(struct omap_sham_dev *dd) 563 { 564 struct ahash_request *req = dd->req; 565 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 566 int err; 567 568 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", 569 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); 570 571 if (ctx->flags & BIT(FLAGS_CPU)) 572 err = omap_sham_update_cpu(dd); 573 else 574 err = omap_sham_update_dma_start(dd); 575 576 /* wait for dma completion before can take more data */ 577 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); 578 579 return err; 580 } 581 582 static int omap_sham_final_req(struct omap_sham_dev *dd) 583 { 584 struct ahash_request *req = dd->req; 585 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 586 int err = 0, use_dma = 1; 587 588 if (ctx->bufcnt <= 64) 589 /* faster to handle last block with cpu */ 590 use_dma = 0; 591 592 if (use_dma) 593 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); 594 else 595 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); 596 597 ctx->bufcnt = 0; 598 599 dev_dbg(dd->dev, "final_req: err: %d\n", err); 600 601 return err; 602 } 603 604 static int omap_sham_finish_hmac(struct ahash_request *req) 605 { 606 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 607 struct omap_sham_hmac_ctx *bctx = tctx->base; 608 int bs = crypto_shash_blocksize(bctx->shash); 609 int ds = crypto_shash_digestsize(bctx->shash); 610 struct { 611 struct shash_desc shash; 612 char ctx[crypto_shash_descsize(bctx->shash)]; 613 } desc; 614 615 desc.shash.tfm = bctx->shash; 616 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ 617 618 return crypto_shash_init(&desc.shash) ?: 619 crypto_shash_update(&desc.shash, bctx->opad, bs) ?: 620 crypto_shash_finup(&desc.shash, req->result, ds, req->result); 621 } 622 623 static int omap_sham_finish(struct ahash_request *req) 624 { 625 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 626 struct omap_sham_dev *dd = ctx->dd; 627 int err = 0; 628 629 if (ctx->digcnt) { 630 omap_sham_copy_ready_hash(req); 631 if (ctx->flags & BIT(FLAGS_HMAC)) 632 err = omap_sham_finish_hmac(req); 633 } 634 635 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); 636 637 return err; 638 } 639 640 static void omap_sham_finish_req(struct ahash_request *req, int err) 641 { 642 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 643 struct omap_sham_dev *dd = ctx->dd; 644 645 if (!err) { 646 omap_sham_copy_hash(req, 1); 647 if (test_bit(FLAGS_FINAL, &dd->flags)) 648 err = omap_sham_finish(req); 649 } else { 650 ctx->flags |= BIT(FLAGS_ERROR); 651 } 652 653 /* atomic operation is not needed here */ 654 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | 655 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); 656 clk_disable(dd->iclk); 657 658 if (req->base.complete) 659 req->base.complete(&req->base, err); 660 661 /* handle new request */ 662 tasklet_schedule(&dd->done_task); 663 } 664 665 static int omap_sham_handle_queue(struct omap_sham_dev *dd, 666 struct ahash_request *req) 667 { 668 struct crypto_async_request *async_req, *backlog; 669 struct omap_sham_reqctx *ctx; 670 unsigned long flags; 671 int err = 0, ret = 0; 672 673 spin_lock_irqsave(&dd->lock, flags); 674 if (req) 675 ret = ahash_enqueue_request(&dd->queue, req); 676 if (test_bit(FLAGS_BUSY, &dd->flags)) { 677 spin_unlock_irqrestore(&dd->lock, flags); 678 return ret; 679 } 680 backlog = crypto_get_backlog(&dd->queue); 681 async_req = crypto_dequeue_request(&dd->queue); 682 if (async_req) 683 set_bit(FLAGS_BUSY, &dd->flags); 684 spin_unlock_irqrestore(&dd->lock, flags); 685 686 if (!async_req) 687 return ret; 688 689 if (backlog) 690 backlog->complete(backlog, -EINPROGRESS); 691 692 req = ahash_request_cast(async_req); 693 dd->req = req; 694 ctx = ahash_request_ctx(req); 695 696 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 697 ctx->op, req->nbytes); 698 699 err = omap_sham_hw_init(dd); 700 if (err) 701 goto err1; 702 703 omap_set_dma_dest_params(dd->dma_lch, 0, 704 OMAP_DMA_AMODE_CONSTANT, 705 dd->phys_base + SHA_REG_DIN(0), 0, 16); 706 707 omap_set_dma_dest_burst_mode(dd->dma_lch, 708 OMAP_DMA_DATA_BURST_16); 709 710 omap_set_dma_src_burst_mode(dd->dma_lch, 711 OMAP_DMA_DATA_BURST_4); 712 713 if (ctx->digcnt) 714 /* request has changed - restore hash */ 715 omap_sham_copy_hash(req, 0); 716 717 if (ctx->op == OP_UPDATE) { 718 err = omap_sham_update_req(dd); 719 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) 720 /* no final() after finup() */ 721 err = omap_sham_final_req(dd); 722 } else if (ctx->op == OP_FINAL) { 723 err = omap_sham_final_req(dd); 724 } 725 err1: 726 if (err != -EINPROGRESS) 727 /* done_task will not finish it, so do it here */ 728 omap_sham_finish_req(req, err); 729 730 dev_dbg(dd->dev, "exit, err: %d\n", err); 731 732 return ret; 733 } 734 735 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) 736 { 737 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 738 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 739 struct omap_sham_dev *dd = tctx->dd; 740 741 ctx->op = op; 742 743 return omap_sham_handle_queue(dd, req); 744 } 745 746 static int omap_sham_update(struct ahash_request *req) 747 { 748 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 749 750 if (!req->nbytes) 751 return 0; 752 753 ctx->total = req->nbytes; 754 ctx->sg = req->src; 755 ctx->offset = 0; 756 757 if (ctx->flags & BIT(FLAGS_FINUP)) { 758 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { 759 /* 760 * OMAP HW accel works only with buffers >= 9 761 * will switch to bypass in final() 762 * final has the same request and data 763 */ 764 omap_sham_append_sg(ctx); 765 return 0; 766 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { 767 /* 768 * faster to use CPU for short transfers 769 */ 770 ctx->flags |= BIT(FLAGS_CPU); 771 } 772 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { 773 omap_sham_append_sg(ctx); 774 return 0; 775 } 776 777 return omap_sham_enqueue(req, OP_UPDATE); 778 } 779 780 static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags, 781 const u8 *data, unsigned int len, u8 *out) 782 { 783 struct { 784 struct shash_desc shash; 785 char ctx[crypto_shash_descsize(shash)]; 786 } desc; 787 788 desc.shash.tfm = shash; 789 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP; 790 791 return crypto_shash_digest(&desc.shash, data, len, out); 792 } 793 794 static int omap_sham_final_shash(struct ahash_request *req) 795 { 796 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 797 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 798 799 return omap_sham_shash_digest(tctx->fallback, req->base.flags, 800 ctx->buffer, ctx->bufcnt, req->result); 801 } 802 803 static int omap_sham_final(struct ahash_request *req) 804 { 805 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 806 807 ctx->flags |= BIT(FLAGS_FINUP); 808 809 if (ctx->flags & BIT(FLAGS_ERROR)) 810 return 0; /* uncompleted hash is not needed */ 811 812 /* OMAP HW accel works only with buffers >= 9 */ 813 /* HMAC is always >= 9 because ipad == block size */ 814 if ((ctx->digcnt + ctx->bufcnt) < 9) 815 return omap_sham_final_shash(req); 816 else if (ctx->bufcnt) 817 return omap_sham_enqueue(req, OP_FINAL); 818 819 /* copy ready hash (+ finalize hmac) */ 820 return omap_sham_finish(req); 821 } 822 823 static int omap_sham_finup(struct ahash_request *req) 824 { 825 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 826 int err1, err2; 827 828 ctx->flags |= BIT(FLAGS_FINUP); 829 830 err1 = omap_sham_update(req); 831 if (err1 == -EINPROGRESS || err1 == -EBUSY) 832 return err1; 833 /* 834 * final() has to be always called to cleanup resources 835 * even if udpate() failed, except EINPROGRESS 836 */ 837 err2 = omap_sham_final(req); 838 839 return err1 ?: err2; 840 } 841 842 static int omap_sham_digest(struct ahash_request *req) 843 { 844 return omap_sham_init(req) ?: omap_sham_finup(req); 845 } 846 847 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, 848 unsigned int keylen) 849 { 850 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 851 struct omap_sham_hmac_ctx *bctx = tctx->base; 852 int bs = crypto_shash_blocksize(bctx->shash); 853 int ds = crypto_shash_digestsize(bctx->shash); 854 int err, i; 855 err = crypto_shash_setkey(tctx->fallback, key, keylen); 856 if (err) 857 return err; 858 859 if (keylen > bs) { 860 err = omap_sham_shash_digest(bctx->shash, 861 crypto_shash_get_flags(bctx->shash), 862 key, keylen, bctx->ipad); 863 if (err) 864 return err; 865 keylen = ds; 866 } else { 867 memcpy(bctx->ipad, key, keylen); 868 } 869 870 memset(bctx->ipad + keylen, 0, bs - keylen); 871 memcpy(bctx->opad, bctx->ipad, bs); 872 873 for (i = 0; i < bs; i++) { 874 bctx->ipad[i] ^= 0x36; 875 bctx->opad[i] ^= 0x5c; 876 } 877 878 return err; 879 } 880 881 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) 882 { 883 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 884 const char *alg_name = crypto_tfm_alg_name(tfm); 885 886 /* Allocate a fallback and abort if it failed. */ 887 tctx->fallback = crypto_alloc_shash(alg_name, 0, 888 CRYPTO_ALG_NEED_FALLBACK); 889 if (IS_ERR(tctx->fallback)) { 890 pr_err("omap-sham: fallback driver '%s' " 891 "could not be loaded.\n", alg_name); 892 return PTR_ERR(tctx->fallback); 893 } 894 895 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 896 sizeof(struct omap_sham_reqctx) + BUFLEN); 897 898 if (alg_base) { 899 struct omap_sham_hmac_ctx *bctx = tctx->base; 900 tctx->flags |= BIT(FLAGS_HMAC); 901 bctx->shash = crypto_alloc_shash(alg_base, 0, 902 CRYPTO_ALG_NEED_FALLBACK); 903 if (IS_ERR(bctx->shash)) { 904 pr_err("omap-sham: base driver '%s' " 905 "could not be loaded.\n", alg_base); 906 crypto_free_shash(tctx->fallback); 907 return PTR_ERR(bctx->shash); 908 } 909 910 } 911 912 return 0; 913 } 914 915 static int omap_sham_cra_init(struct crypto_tfm *tfm) 916 { 917 return omap_sham_cra_init_alg(tfm, NULL); 918 } 919 920 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) 921 { 922 return omap_sham_cra_init_alg(tfm, "sha1"); 923 } 924 925 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) 926 { 927 return omap_sham_cra_init_alg(tfm, "md5"); 928 } 929 930 static void omap_sham_cra_exit(struct crypto_tfm *tfm) 931 { 932 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 933 934 crypto_free_shash(tctx->fallback); 935 tctx->fallback = NULL; 936 937 if (tctx->flags & BIT(FLAGS_HMAC)) { 938 struct omap_sham_hmac_ctx *bctx = tctx->base; 939 crypto_free_shash(bctx->shash); 940 } 941 } 942 943 static struct ahash_alg algs[] = { 944 { 945 .init = omap_sham_init, 946 .update = omap_sham_update, 947 .final = omap_sham_final, 948 .finup = omap_sham_finup, 949 .digest = omap_sham_digest, 950 .halg.digestsize = SHA1_DIGEST_SIZE, 951 .halg.base = { 952 .cra_name = "sha1", 953 .cra_driver_name = "omap-sha1", 954 .cra_priority = 100, 955 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 956 CRYPTO_ALG_KERN_DRIVER_ONLY | 957 CRYPTO_ALG_ASYNC | 958 CRYPTO_ALG_NEED_FALLBACK, 959 .cra_blocksize = SHA1_BLOCK_SIZE, 960 .cra_ctxsize = sizeof(struct omap_sham_ctx), 961 .cra_alignmask = 0, 962 .cra_module = THIS_MODULE, 963 .cra_init = omap_sham_cra_init, 964 .cra_exit = omap_sham_cra_exit, 965 } 966 }, 967 { 968 .init = omap_sham_init, 969 .update = omap_sham_update, 970 .final = omap_sham_final, 971 .finup = omap_sham_finup, 972 .digest = omap_sham_digest, 973 .halg.digestsize = MD5_DIGEST_SIZE, 974 .halg.base = { 975 .cra_name = "md5", 976 .cra_driver_name = "omap-md5", 977 .cra_priority = 100, 978 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 979 CRYPTO_ALG_KERN_DRIVER_ONLY | 980 CRYPTO_ALG_ASYNC | 981 CRYPTO_ALG_NEED_FALLBACK, 982 .cra_blocksize = SHA1_BLOCK_SIZE, 983 .cra_ctxsize = sizeof(struct omap_sham_ctx), 984 .cra_alignmask = OMAP_ALIGN_MASK, 985 .cra_module = THIS_MODULE, 986 .cra_init = omap_sham_cra_init, 987 .cra_exit = omap_sham_cra_exit, 988 } 989 }, 990 { 991 .init = omap_sham_init, 992 .update = omap_sham_update, 993 .final = omap_sham_final, 994 .finup = omap_sham_finup, 995 .digest = omap_sham_digest, 996 .setkey = omap_sham_setkey, 997 .halg.digestsize = SHA1_DIGEST_SIZE, 998 .halg.base = { 999 .cra_name = "hmac(sha1)", 1000 .cra_driver_name = "omap-hmac-sha1", 1001 .cra_priority = 100, 1002 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1003 CRYPTO_ALG_KERN_DRIVER_ONLY | 1004 CRYPTO_ALG_ASYNC | 1005 CRYPTO_ALG_NEED_FALLBACK, 1006 .cra_blocksize = SHA1_BLOCK_SIZE, 1007 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1008 sizeof(struct omap_sham_hmac_ctx), 1009 .cra_alignmask = OMAP_ALIGN_MASK, 1010 .cra_module = THIS_MODULE, 1011 .cra_init = omap_sham_cra_sha1_init, 1012 .cra_exit = omap_sham_cra_exit, 1013 } 1014 }, 1015 { 1016 .init = omap_sham_init, 1017 .update = omap_sham_update, 1018 .final = omap_sham_final, 1019 .finup = omap_sham_finup, 1020 .digest = omap_sham_digest, 1021 .setkey = omap_sham_setkey, 1022 .halg.digestsize = MD5_DIGEST_SIZE, 1023 .halg.base = { 1024 .cra_name = "hmac(md5)", 1025 .cra_driver_name = "omap-hmac-md5", 1026 .cra_priority = 100, 1027 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1028 CRYPTO_ALG_KERN_DRIVER_ONLY | 1029 CRYPTO_ALG_ASYNC | 1030 CRYPTO_ALG_NEED_FALLBACK, 1031 .cra_blocksize = SHA1_BLOCK_SIZE, 1032 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1033 sizeof(struct omap_sham_hmac_ctx), 1034 .cra_alignmask = OMAP_ALIGN_MASK, 1035 .cra_module = THIS_MODULE, 1036 .cra_init = omap_sham_cra_md5_init, 1037 .cra_exit = omap_sham_cra_exit, 1038 } 1039 } 1040 }; 1041 1042 static void omap_sham_done_task(unsigned long data) 1043 { 1044 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1045 int err = 0; 1046 1047 if (!test_bit(FLAGS_BUSY, &dd->flags)) { 1048 omap_sham_handle_queue(dd, NULL); 1049 return; 1050 } 1051 1052 if (test_bit(FLAGS_CPU, &dd->flags)) { 1053 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) 1054 goto finish; 1055 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { 1056 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { 1057 omap_sham_update_dma_stop(dd); 1058 if (dd->err) { 1059 err = dd->err; 1060 goto finish; 1061 } 1062 } 1063 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { 1064 /* hash or semi-hash ready */ 1065 clear_bit(FLAGS_DMA_READY, &dd->flags); 1066 err = omap_sham_update_dma_start(dd); 1067 if (err != -EINPROGRESS) 1068 goto finish; 1069 } 1070 } 1071 1072 return; 1073 1074 finish: 1075 dev_dbg(dd->dev, "update done: err: %d\n", err); 1076 /* finish curent request */ 1077 omap_sham_finish_req(dd->req, err); 1078 } 1079 1080 static irqreturn_t omap_sham_irq(int irq, void *dev_id) 1081 { 1082 struct omap_sham_dev *dd = dev_id; 1083 1084 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) 1085 /* final -> allow device to go to power-saving mode */ 1086 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); 1087 1088 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, 1089 SHA_REG_CTRL_OUTPUT_READY); 1090 omap_sham_read(dd, SHA_REG_CTRL); 1091 1092 if (!test_bit(FLAGS_BUSY, &dd->flags)) { 1093 dev_warn(dd->dev, "Interrupt when no active requests.\n"); 1094 return IRQ_HANDLED; 1095 } 1096 1097 set_bit(FLAGS_OUTPUT_READY, &dd->flags); 1098 tasklet_schedule(&dd->done_task); 1099 1100 return IRQ_HANDLED; 1101 } 1102 1103 static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) 1104 { 1105 struct omap_sham_dev *dd = data; 1106 1107 if (ch_status != OMAP_DMA_BLOCK_IRQ) { 1108 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); 1109 dd->err = -EIO; 1110 clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */ 1111 } 1112 1113 set_bit(FLAGS_DMA_READY, &dd->flags); 1114 tasklet_schedule(&dd->done_task); 1115 } 1116 1117 static int omap_sham_dma_init(struct omap_sham_dev *dd) 1118 { 1119 int err; 1120 1121 dd->dma_lch = -1; 1122 1123 err = omap_request_dma(dd->dma, dev_name(dd->dev), 1124 omap_sham_dma_callback, dd, &dd->dma_lch); 1125 if (err) { 1126 dev_err(dd->dev, "Unable to request DMA channel\n"); 1127 return err; 1128 } 1129 1130 return 0; 1131 } 1132 1133 static void omap_sham_dma_cleanup(struct omap_sham_dev *dd) 1134 { 1135 if (dd->dma_lch >= 0) { 1136 omap_free_dma(dd->dma_lch); 1137 dd->dma_lch = -1; 1138 } 1139 } 1140 1141 static int __devinit omap_sham_probe(struct platform_device *pdev) 1142 { 1143 struct omap_sham_dev *dd; 1144 struct device *dev = &pdev->dev; 1145 struct resource *res; 1146 int err, i, j; 1147 1148 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); 1149 if (dd == NULL) { 1150 dev_err(dev, "unable to alloc data struct.\n"); 1151 err = -ENOMEM; 1152 goto data_err; 1153 } 1154 dd->dev = dev; 1155 platform_set_drvdata(pdev, dd); 1156 1157 INIT_LIST_HEAD(&dd->list); 1158 spin_lock_init(&dd->lock); 1159 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); 1160 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); 1161 1162 dd->irq = -1; 1163 1164 /* Get the base address */ 1165 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1166 if (!res) { 1167 dev_err(dev, "no MEM resource info\n"); 1168 err = -ENODEV; 1169 goto res_err; 1170 } 1171 dd->phys_base = res->start; 1172 1173 /* Get the DMA */ 1174 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1175 if (!res) { 1176 dev_err(dev, "no DMA resource info\n"); 1177 err = -ENODEV; 1178 goto res_err; 1179 } 1180 dd->dma = res->start; 1181 1182 /* Get the IRQ */ 1183 dd->irq = platform_get_irq(pdev, 0); 1184 if (dd->irq < 0) { 1185 dev_err(dev, "no IRQ resource info\n"); 1186 err = dd->irq; 1187 goto res_err; 1188 } 1189 1190 err = request_irq(dd->irq, omap_sham_irq, 1191 IRQF_TRIGGER_LOW, dev_name(dev), dd); 1192 if (err) { 1193 dev_err(dev, "unable to request irq.\n"); 1194 goto res_err; 1195 } 1196 1197 err = omap_sham_dma_init(dd); 1198 if (err) 1199 goto dma_err; 1200 1201 /* Initializing the clock */ 1202 dd->iclk = clk_get(dev, "ick"); 1203 if (IS_ERR(dd->iclk)) { 1204 dev_err(dev, "clock intialization failed.\n"); 1205 err = PTR_ERR(dd->iclk); 1206 goto clk_err; 1207 } 1208 1209 dd->io_base = ioremap(dd->phys_base, SZ_4K); 1210 if (!dd->io_base) { 1211 dev_err(dev, "can't ioremap\n"); 1212 err = -ENOMEM; 1213 goto io_err; 1214 } 1215 1216 clk_enable(dd->iclk); 1217 dev_info(dev, "hw accel on OMAP rev %u.%u\n", 1218 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4, 1219 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR); 1220 clk_disable(dd->iclk); 1221 1222 spin_lock(&sham.lock); 1223 list_add_tail(&dd->list, &sham.dev_list); 1224 spin_unlock(&sham.lock); 1225 1226 for (i = 0; i < ARRAY_SIZE(algs); i++) { 1227 err = crypto_register_ahash(&algs[i]); 1228 if (err) 1229 goto err_algs; 1230 } 1231 1232 return 0; 1233 1234 err_algs: 1235 for (j = 0; j < i; j++) 1236 crypto_unregister_ahash(&algs[j]); 1237 iounmap(dd->io_base); 1238 io_err: 1239 clk_put(dd->iclk); 1240 clk_err: 1241 omap_sham_dma_cleanup(dd); 1242 dma_err: 1243 if (dd->irq >= 0) 1244 free_irq(dd->irq, dd); 1245 res_err: 1246 kfree(dd); 1247 dd = NULL; 1248 data_err: 1249 dev_err(dev, "initialization failed.\n"); 1250 1251 return err; 1252 } 1253 1254 static int __devexit omap_sham_remove(struct platform_device *pdev) 1255 { 1256 static struct omap_sham_dev *dd; 1257 int i; 1258 1259 dd = platform_get_drvdata(pdev); 1260 if (!dd) 1261 return -ENODEV; 1262 spin_lock(&sham.lock); 1263 list_del(&dd->list); 1264 spin_unlock(&sham.lock); 1265 for (i = 0; i < ARRAY_SIZE(algs); i++) 1266 crypto_unregister_ahash(&algs[i]); 1267 tasklet_kill(&dd->done_task); 1268 iounmap(dd->io_base); 1269 clk_put(dd->iclk); 1270 omap_sham_dma_cleanup(dd); 1271 if (dd->irq >= 0) 1272 free_irq(dd->irq, dd); 1273 kfree(dd); 1274 dd = NULL; 1275 1276 return 0; 1277 } 1278 1279 static struct platform_driver omap_sham_driver = { 1280 .probe = omap_sham_probe, 1281 .remove = omap_sham_remove, 1282 .driver = { 1283 .name = "omap-sham", 1284 .owner = THIS_MODULE, 1285 }, 1286 }; 1287 1288 static int __init omap_sham_mod_init(void) 1289 { 1290 pr_info("loading %s driver\n", "omap-sham"); 1291 1292 if (!cpu_class_is_omap2() || 1293 (omap_type() != OMAP2_DEVICE_TYPE_SEC && 1294 omap_type() != OMAP2_DEVICE_TYPE_EMU)) { 1295 pr_err("Unsupported cpu\n"); 1296 return -ENODEV; 1297 } 1298 1299 return platform_driver_register(&omap_sham_driver); 1300 } 1301 1302 static void __exit omap_sham_mod_exit(void) 1303 { 1304 platform_driver_unregister(&omap_sham_driver); 1305 } 1306 1307 module_init(omap_sham_mod_init); 1308 module_exit(omap_sham_mod_exit); 1309 1310 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); 1311 MODULE_LICENSE("GPL v2"); 1312 MODULE_AUTHOR("Dmitry Kasatkin"); 1313