Lines Matching +full:at91sam9g46 +full:- +full:sha
1 // SPDX-License-Identifier: GPL-2.0
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
10 * Some ideas are from omap-sham.c drivers.
30 #include <linux/dma-mapping.h>
40 #include "atmel-sha-regs.h"
41 #include "atmel-authenc.h"
45 /* SHA flags */
213 snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2); in atmel_sha_reg_name()
234 16u + ((offset - SHA_REG_DIGEST(0)) >> 2)); in atmel_sha_reg_name()
237 (offset - SHA_REG_DIGEST(0)) >> 2); in atmel_sha_reg_name()
255 u32 value = readl_relaxed(dd->io_base + offset); in atmel_sha_read()
258 if (dd->flags & SHA_FLAGS_DUMP_REG) { in atmel_sha_read()
261 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, in atmel_sha_read()
273 if (dd->flags & SHA_FLAGS_DUMP_REG) { in atmel_sha_write()
276 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, in atmel_sha_write()
281 writel_relaxed(value, dd->io_base + offset); in atmel_sha_write()
286 struct ahash_request *req = dd->req; in atmel_sha_complete()
288 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | in atmel_sha_complete()
292 clk_disable(dd->iclk); in atmel_sha_complete()
294 if ((dd->is_async || dd->force_complete) && req->base.complete) in atmel_sha_complete()
298 tasklet_schedule(&dd->queue_task); in atmel_sha_complete()
307 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { in atmel_sha_append_sg()
308 count = min(ctx->sg->length - ctx->offset, ctx->total); in atmel_sha_append_sg()
309 count = min(count, ctx->buflen - ctx->bufcnt); in atmel_sha_append_sg()
318 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { in atmel_sha_append_sg()
319 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
326 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, in atmel_sha_append_sg()
327 ctx->offset, count, 0); in atmel_sha_append_sg()
329 ctx->bufcnt += count; in atmel_sha_append_sg()
330 ctx->offset += count; in atmel_sha_append_sg()
331 ctx->total -= count; in atmel_sha_append_sg()
333 if (ctx->offset == ctx->sg->length) { in atmel_sha_append_sg()
334 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
335 if (ctx->sg) in atmel_sha_append_sg()
336 ctx->offset = 0; in atmel_sha_append_sg()
338 ctx->total = 0; in atmel_sha_append_sg()
349 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
354 * - if message length < 56 bytes then padlen = 56 - message length
355 * - else padlen = 64 + 56 - message length
358 * - if message length < 112 bytes then padlen = 112 - message length
359 * - else padlen = 128 + 112 - message length
367 size[0] = ctx->digcnt[0]; in atmel_sha_fill_padding()
368 size[1] = ctx->digcnt[1]; in atmel_sha_fill_padding()
370 size[0] += ctx->bufcnt; in atmel_sha_fill_padding()
371 if (size[0] < ctx->bufcnt) in atmel_sha_fill_padding()
381 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_fill_padding()
384 index = ctx->bufcnt & 0x7f; in atmel_sha_fill_padding()
385 padlen = (index < 112) ? (112 - index) : ((128+112) - index); in atmel_sha_fill_padding()
386 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
387 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
388 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); in atmel_sha_fill_padding()
389 ctx->bufcnt += padlen + 16; in atmel_sha_fill_padding()
390 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
394 index = ctx->bufcnt & 0x3f; in atmel_sha_fill_padding()
395 padlen = (index < 56) ? (56 - index) : ((64+56) - index); in atmel_sha_fill_padding()
396 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
397 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
398 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); in atmel_sha_fill_padding()
399 ctx->bufcnt += padlen + 8; in atmel_sha_fill_padding()
400 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
411 if (!tctx->dd) { in atmel_sha_find_dev()
416 tctx->dd = dd; in atmel_sha_find_dev()
418 dd = tctx->dd; in atmel_sha_find_dev()
433 ctx->dd = dd; in atmel_sha_init()
435 ctx->flags = 0; in atmel_sha_init()
437 dev_dbg(dd->dev, "init: digest size: %u\n", in atmel_sha_init()
442 ctx->flags |= SHA_FLAGS_SHA1; in atmel_sha_init()
443 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_init()
446 ctx->flags |= SHA_FLAGS_SHA224; in atmel_sha_init()
447 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_init()
450 ctx->flags |= SHA_FLAGS_SHA256; in atmel_sha_init()
451 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_init()
454 ctx->flags |= SHA_FLAGS_SHA384; in atmel_sha_init()
455 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_init()
458 ctx->flags |= SHA_FLAGS_SHA512; in atmel_sha_init()
459 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_init()
462 return -EINVAL; in atmel_sha_init()
465 ctx->bufcnt = 0; in atmel_sha_init()
466 ctx->digcnt[0] = 0; in atmel_sha_init()
467 ctx->digcnt[1] = 0; in atmel_sha_init()
468 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_init()
475 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_write_ctrl()
480 if (!dd->caps.has_dma) in atmel_sha_write_ctrl()
483 if (dd->caps.has_dualbuff) in atmel_sha_write_ctrl()
489 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_write_ctrl()
520 if (!(ctx->digcnt[0] || ctx->digcnt[1])) { in atmel_sha_write_ctrl()
522 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { in atmel_sha_write_ctrl()
523 const u32 *hash = (const u32 *)ctx->digest; in atmel_sha_write_ctrl()
531 ctx->flags &= ~SHA_FLAGS_RESTORE; in atmel_sha_write_ctrl()
556 dd->resume = resume; in atmel_sha_wait_for_data_ready()
558 return -EINPROGRESS; in atmel_sha_wait_for_data_ready()
564 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_cpu()
568 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_cpu()
569 ctx->digcnt[1], ctx->digcnt[0], length, final); in atmel_sha_xmit_cpu()
573 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_cpu()
574 ctx->digcnt[0] += length; in atmel_sha_xmit_cpu()
575 if (ctx->digcnt[0] < length) in atmel_sha_xmit_cpu()
576 ctx->digcnt[1]++; in atmel_sha_xmit_cpu()
579 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_cpu()
583 dd->flags |= SHA_FLAGS_CPU; in atmel_sha_xmit_cpu()
588 return -EINPROGRESS; in atmel_sha_xmit_cpu()
594 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_pdc()
597 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_pdc()
598 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_pdc()
611 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_pdc()
612 ctx->digcnt[0] += length1; in atmel_sha_xmit_pdc()
613 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_pdc()
614 ctx->digcnt[1]++; in atmel_sha_xmit_pdc()
617 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_pdc()
619 dd->flags |= SHA_FLAGS_DMA_ACTIVE; in atmel_sha_xmit_pdc()
624 return -EINPROGRESS; in atmel_sha_xmit_pdc()
631 dd->is_async = true; in atmel_sha_dma_callback()
633 /* dma_lch_in - completed - wait DATRDY */ in atmel_sha_dma_callback()
640 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_dma()
644 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_dma()
645 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_dma()
647 dd->dma_lch_in.dma_conf.src_maxburst = 16; in atmel_sha_xmit_dma()
648 dd->dma_lch_in.dma_conf.dst_maxburst = 16; in atmel_sha_xmit_dma()
650 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); in atmel_sha_xmit_dma()
658 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, in atmel_sha_xmit_dma()
664 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, in atmel_sha_xmit_dma()
668 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_xmit_dma()
670 in_desc->callback = atmel_sha_dma_callback; in atmel_sha_xmit_dma()
671 in_desc->callback_param = dd; in atmel_sha_xmit_dma()
675 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_dma()
676 ctx->digcnt[0] += length1; in atmel_sha_xmit_dma()
677 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_dma()
678 ctx->digcnt[1]++; in atmel_sha_xmit_dma()
681 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_dma()
683 dd->flags |= SHA_FLAGS_DMA_ACTIVE; in atmel_sha_xmit_dma()
687 dma_async_issue_pending(dd->dma_lch_in.chan); in atmel_sha_xmit_dma()
689 return -EINPROGRESS; in atmel_sha_xmit_dma()
695 if (dd->caps.has_dma) in atmel_sha_xmit_start()
705 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_cpu()
710 bufcnt = ctx->bufcnt; in atmel_sha_update_cpu()
711 ctx->bufcnt = 0; in atmel_sha_update_cpu()
713 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); in atmel_sha_update_cpu()
720 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_xmit_dma_map()
721 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_xmit_dma_map()
722 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_xmit_dma_map()
723 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen + in atmel_sha_xmit_dma_map()
724 ctx->block_size); in atmel_sha_xmit_dma_map()
725 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_xmit_dma_map()
728 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_xmit_dma_map()
731 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); in atmel_sha_xmit_dma_map()
736 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_slow()
742 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_slow()
744 dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n", in atmel_sha_update_dma_slow()
745 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); in atmel_sha_update_dma_slow()
750 if (final || (ctx->bufcnt == ctx->buflen)) { in atmel_sha_update_dma_slow()
751 count = ctx->bufcnt; in atmel_sha_update_dma_slow()
752 ctx->bufcnt = 0; in atmel_sha_update_dma_slow()
761 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_start()
766 if (!ctx->total) in atmel_sha_update_dma_start()
769 if (ctx->bufcnt || ctx->offset) in atmel_sha_update_dma_start()
772 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n", in atmel_sha_update_dma_start()
773 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); in atmel_sha_update_dma_start()
775 sg = ctx->sg; in atmel_sha_update_dma_start()
777 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in atmel_sha_update_dma_start()
780 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) in atmel_sha_update_dma_start()
781 /* size is not ctx->block_size aligned */ in atmel_sha_update_dma_start()
784 length = min(ctx->total, sg->length); in atmel_sha_update_dma_start()
787 if (!(ctx->flags & SHA_FLAGS_FINUP)) { in atmel_sha_update_dma_start()
788 /* not last sg must be ctx->block_size aligned */ in atmel_sha_update_dma_start()
789 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
790 length -= tail; in atmel_sha_update_dma_start()
794 ctx->total -= length; in atmel_sha_update_dma_start()
795 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
797 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_start()
801 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
802 length -= tail; in atmel_sha_update_dma_start()
803 ctx->total += tail; in atmel_sha_update_dma_start()
804 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
806 sg = ctx->sg; in atmel_sha_update_dma_start()
811 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_update_dma_start()
812 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_start()
813 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_update_dma_start()
814 dev_err(dd->dev, "dma %zu bytes error\n", in atmel_sha_update_dma_start()
815 ctx->buflen + ctx->block_size); in atmel_sha_update_dma_start()
816 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
820 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_update_dma_start()
821 count = ctx->bufcnt; in atmel_sha_update_dma_start()
822 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
823 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, in atmel_sha_update_dma_start()
826 ctx->sg = sg; in atmel_sha_update_dma_start()
827 if (!dma_map_sg(dd->dev, ctx->sg, 1, in atmel_sha_update_dma_start()
829 dev_err(dd->dev, "dma_map_sg error\n"); in atmel_sha_update_dma_start()
830 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
833 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
835 count = ctx->bufcnt; in atmel_sha_update_dma_start()
836 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
837 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), in atmel_sha_update_dma_start()
838 length, ctx->dma_addr, count, final); in atmel_sha_update_dma_start()
842 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in atmel_sha_update_dma_start()
843 dev_err(dd->dev, "dma_map_sg error\n"); in atmel_sha_update_dma_start()
844 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
847 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
850 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, in atmel_sha_update_dma_start()
856 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_stop()
858 if (ctx->flags & SHA_FLAGS_SG) { in atmel_sha_update_dma_stop()
859 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
860 if (ctx->sg->length == ctx->offset) { in atmel_sha_update_dma_stop()
861 ctx->sg = sg_next(ctx->sg); in atmel_sha_update_dma_stop()
862 if (ctx->sg) in atmel_sha_update_dma_stop()
863 ctx->offset = 0; in atmel_sha_update_dma_stop()
865 if (ctx->flags & SHA_FLAGS_PAD) { in atmel_sha_update_dma_stop()
866 dma_unmap_single(dd->dev, ctx->dma_addr, in atmel_sha_update_dma_stop()
867 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
870 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + in atmel_sha_update_dma_stop()
871 ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
877 struct ahash_request *req = dd->req; in atmel_sha_update_req()
881 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", in atmel_sha_update_req()
882 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
884 if (ctx->flags & SHA_FLAGS_CPU) in atmel_sha_update_req()
890 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", in atmel_sha_update_req()
891 err, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
898 struct ahash_request *req = dd->req; in atmel_sha_final_req()
903 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { in atmel_sha_final_req()
905 count = ctx->bufcnt; in atmel_sha_final_req()
906 ctx->bufcnt = 0; in atmel_sha_final_req()
912 count = ctx->bufcnt; in atmel_sha_final_req()
913 ctx->bufcnt = 0; in atmel_sha_final_req()
914 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); in atmel_sha_final_req()
917 dev_dbg(dd->dev, "final_req: err: %d\n", err); in atmel_sha_final_req()
925 u32 *hash = (u32 *)ctx->digest; in atmel_sha_copy_hash()
928 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_copy_hash()
949 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
950 ctx->flags |= SHA_FLAGS_RESTORE; in atmel_sha_copy_hash()
957 if (!req->result) in atmel_sha_copy_ready_hash()
960 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_copy_ready_hash()
963 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
967 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
971 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
975 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
979 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
987 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish()
989 if (ctx->digcnt[0] || ctx->digcnt[1]) in atmel_sha_finish()
992 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1], in atmel_sha_finish()
993 ctx->digcnt[0], ctx->bufcnt); in atmel_sha_finish()
1001 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish_req()
1005 if (SHA_FLAGS_FINAL & dd->flags) in atmel_sha_finish_req()
1008 ctx->flags |= SHA_FLAGS_ERROR; in atmel_sha_finish_req()
1019 err = clk_enable(dd->iclk); in atmel_sha_hw_init()
1023 if (!(SHA_FLAGS_INIT & dd->flags)) { in atmel_sha_hw_init()
1025 dd->flags |= SHA_FLAGS_INIT; in atmel_sha_hw_init()
1044 dd->hw_version = atmel_sha_get_version(dd); in atmel_sha_hw_version_init()
1046 dev_info(dd->dev, in atmel_sha_hw_version_init()
1047 "version: 0x%x\n", dd->hw_version); in atmel_sha_hw_version_init()
1049 clk_disable(dd->iclk); in atmel_sha_hw_version_init()
1063 spin_lock_irqsave(&dd->lock, flags); in atmel_sha_handle_queue()
1065 ret = ahash_enqueue_request(&dd->queue, req); in atmel_sha_handle_queue()
1067 if (SHA_FLAGS_BUSY & dd->flags) { in atmel_sha_handle_queue()
1068 spin_unlock_irqrestore(&dd->lock, flags); in atmel_sha_handle_queue()
1072 backlog = crypto_get_backlog(&dd->queue); in atmel_sha_handle_queue()
1073 async_req = crypto_dequeue_request(&dd->queue); in atmel_sha_handle_queue()
1075 dd->flags |= SHA_FLAGS_BUSY; in atmel_sha_handle_queue()
1077 spin_unlock_irqrestore(&dd->lock, flags); in atmel_sha_handle_queue()
1083 crypto_request_complete(backlog, -EINPROGRESS); in atmel_sha_handle_queue()
1085 ctx = crypto_tfm_ctx(async_req->tfm); in atmel_sha_handle_queue()
1087 dd->req = ahash_request_cast(async_req); in atmel_sha_handle_queue()
1088 start_async = (dd->req != req); in atmel_sha_handle_queue()
1089 dd->is_async = start_async; in atmel_sha_handle_queue()
1090 dd->force_complete = false; in atmel_sha_handle_queue()
1092 /* WARNING: ctx->start() MAY change dd->is_async. */ in atmel_sha_handle_queue()
1093 err = ctx->start(dd); in atmel_sha_handle_queue()
1101 struct ahash_request *req = dd->req; in atmel_sha_start()
1105 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n", in atmel_sha_start()
1106 ctx->op, req->nbytes); in atmel_sha_start()
1114 * -EINPROGRESS: the hardware is busy and the SHA driver will resume in atmel_sha_start()
1118 * 0: the SHA driver can continue its job then release the hardware in atmel_sha_start()
1124 * The SHA driver must stop its job without calling in atmel_sha_start()
1131 dd->resume = atmel_sha_done; in atmel_sha_start()
1132 if (ctx->op == SHA_OP_UPDATE) { in atmel_sha_start()
1134 if (!err && (ctx->flags & SHA_FLAGS_FINUP)) in atmel_sha_start()
1137 } else if (ctx->op == SHA_OP_FINAL) { in atmel_sha_start()
1145 dev_dbg(dd->dev, "exit, err: %d\n", err); in atmel_sha_start()
1153 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in atmel_sha_enqueue()
1154 struct atmel_sha_dev *dd = tctx->dd; in atmel_sha_enqueue()
1156 ctx->op = op; in atmel_sha_enqueue()
1165 if (!req->nbytes) in atmel_sha_update()
1168 ctx->total = req->nbytes; in atmel_sha_update()
1169 ctx->sg = req->src; in atmel_sha_update()
1170 ctx->offset = 0; in atmel_sha_update()
1172 if (ctx->flags & SHA_FLAGS_FINUP) { in atmel_sha_update()
1173 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) in atmel_sha_update()
1175 ctx->flags |= SHA_FLAGS_CPU; in atmel_sha_update()
1176 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { in atmel_sha_update()
1187 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_final()
1189 if (ctx->flags & SHA_FLAGS_ERROR) in atmel_sha_final()
1192 if (ctx->flags & SHA_FLAGS_PAD) in atmel_sha_final()
1204 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_finup()
1207 if (err1 == -EINPROGRESS || in atmel_sha_finup()
1208 (err1 == -EBUSY && (ahash_request_flags(req) & in atmel_sha_finup()
1249 ctx->start = atmel_sha_start; in atmel_sha_cra_init()
1256 alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; in atmel_sha_alg_init()
1257 alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; in atmel_sha_alg_init()
1258 alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx); in atmel_sha_alg_init()
1259 alg->halg.base.cra_module = THIS_MODULE; in atmel_sha_alg_init()
1260 alg->halg.base.cra_init = atmel_sha_cra_init; in atmel_sha_alg_init()
1262 alg->halg.statesize = sizeof(struct atmel_sha_reqctx); in atmel_sha_alg_init()
1264 alg->init = atmel_sha_init; in atmel_sha_alg_init()
1265 alg->update = atmel_sha_update; in atmel_sha_alg_init()
1266 alg->final = atmel_sha_final; in atmel_sha_alg_init()
1267 alg->finup = atmel_sha_finup; in atmel_sha_alg_init()
1268 alg->digest = atmel_sha_digest; in atmel_sha_alg_init()
1269 alg->export = atmel_sha_export; in atmel_sha_alg_init()
1270 alg->import = atmel_sha_import; in atmel_sha_alg_init()
1276 .halg.base.cra_driver_name = "atmel-sha1",
1283 .halg.base.cra_driver_name = "atmel-sha256",
1292 .halg.base.cra_driver_name = "atmel-sha224",
1301 .halg.base.cra_driver_name = "atmel-sha384",
1308 .halg.base.cra_driver_name = "atmel-sha512",
1326 if (SHA_FLAGS_CPU & dd->flags) { in atmel_sha_done()
1327 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { in atmel_sha_done()
1328 dd->flags &= ~SHA_FLAGS_OUTPUT_READY; in atmel_sha_done()
1331 } else if (SHA_FLAGS_DMA_READY & dd->flags) { in atmel_sha_done()
1332 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { in atmel_sha_done()
1333 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; in atmel_sha_done()
1336 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { in atmel_sha_done()
1337 /* hash or semi-hash ready */ in atmel_sha_done()
1338 dd->flags &= ~(SHA_FLAGS_DMA_READY | in atmel_sha_done()
1341 if (err != -EINPROGRESS) in atmel_sha_done()
1349 atmel_sha_finish_req(dd->req, err); in atmel_sha_done()
1358 dd->is_async = true; in atmel_sha_done_task()
1359 (void)dd->resume(dd); in atmel_sha_done_task()
1370 if (SHA_FLAGS_BUSY & sha_dd->flags) { in atmel_sha_irq()
1371 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; in atmel_sha_irq()
1372 if (!(SHA_FLAGS_CPU & sha_dd->flags)) in atmel_sha_irq()
1373 sha_dd->flags |= SHA_FLAGS_DMA_READY; in atmel_sha_irq()
1374 tasklet_schedule(&sha_dd->done_task); in atmel_sha_irq()
1376 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); in atmel_sha_irq()
1391 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_check_aligned()
1392 struct ahash_request *req = dd->req; in atmel_sha_dma_check_aligned()
1394 size_t bs = ctx->block_size; in atmel_sha_dma_check_aligned()
1398 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in atmel_sha_dma_check_aligned()
1405 if (len <= sg->length) { in atmel_sha_dma_check_aligned()
1406 dma->nents = nents + 1; in atmel_sha_dma_check_aligned()
1407 dma->last_sg_length = sg->length; in atmel_sha_dma_check_aligned()
1408 sg->length = ALIGN(len, sizeof(u32)); in atmel_sha_dma_check_aligned()
1413 if (!IS_ALIGNED(sg->length, bs)) in atmel_sha_dma_check_aligned()
1416 len -= sg->length; in atmel_sha_dma_check_aligned()
1425 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_callback2()
1429 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_callback2()
1431 sg = dma->sg; in atmel_sha_dma_callback2()
1432 for (nents = 0; nents < dma->nents - 1; ++nents) in atmel_sha_dma_callback2()
1434 sg->length = dma->last_sg_length; in atmel_sha_dma_callback2()
1436 dd->is_async = true; in atmel_sha_dma_callback2()
1437 (void)atmel_sha_wait_for_data_ready(dd, dd->resume); in atmel_sha_dma_callback2()
1445 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_start()
1446 struct dma_slave_config *config = &dma->dma_conf; in atmel_sha_dma_start()
1447 struct dma_chan *chan = dma->chan; in atmel_sha_dma_start()
1453 dd->resume = resume; in atmel_sha_dma_start()
1456 * dma->nents has already been initialized by in atmel_sha_dma_start()
1459 dma->sg = src; in atmel_sha_dma_start()
1460 sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_start()
1462 err = -ENOMEM; in atmel_sha_dma_start()
1466 config->src_maxburst = 16; in atmel_sha_dma_start()
1467 config->dst_maxburst = 16; in atmel_sha_dma_start()
1472 desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV, in atmel_sha_dma_start()
1475 err = -ENOMEM; in atmel_sha_dma_start()
1479 desc->callback = atmel_sha_dma_callback2; in atmel_sha_dma_start()
1480 desc->callback_param = dd; in atmel_sha_dma_start()
1488 return -EINPROGRESS; in atmel_sha_dma_start()
1491 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_start()
1501 struct ahash_request *req = dd->req; in atmel_sha_cpu_transfer()
1503 const u32 *words = (const u32 *)ctx->buffer; in atmel_sha_cpu_transfer()
1507 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1; in atmel_sha_cpu_transfer()
1510 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32)); in atmel_sha_cpu_transfer()
1514 ctx->offset += ctx->bufcnt; in atmel_sha_cpu_transfer()
1515 ctx->total -= ctx->bufcnt; in atmel_sha_cpu_transfer()
1517 if (!ctx->total) in atmel_sha_cpu_transfer()
1522 * Fill ctx->buffer now with the next data to be written into in atmel_sha_cpu_transfer()
1523 * IDATARx: it gives time for the SHA hardware to process in atmel_sha_cpu_transfer()
1528 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); in atmel_sha_cpu_transfer()
1529 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, in atmel_sha_cpu_transfer()
1530 ctx->offset, ctx->bufcnt, 0); in atmel_sha_cpu_transfer()
1536 dd->resume = atmel_sha_cpu_transfer; in atmel_sha_cpu_transfer()
1538 return -EINPROGRESS; in atmel_sha_cpu_transfer()
1542 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY))) in atmel_sha_cpu_transfer()
1543 return dd->cpu_transfer_complete(dd); in atmel_sha_cpu_transfer()
1545 return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete); in atmel_sha_cpu_transfer()
1555 struct ahash_request *req = dd->req; in atmel_sha_cpu_start()
1561 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY); in atmel_sha_cpu_start()
1564 ctx->flags |= SHA_FLAGS_IDATAR0; in atmel_sha_cpu_start()
1567 ctx->flags |= SHA_FLAGS_WAIT_DATARDY; in atmel_sha_cpu_start()
1569 ctx->sg = sg; in atmel_sha_cpu_start()
1570 ctx->total = len; in atmel_sha_cpu_start()
1571 ctx->offset = 0; in atmel_sha_cpu_start()
1574 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); in atmel_sha_cpu_start()
1575 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, in atmel_sha_cpu_start()
1576 ctx->offset, ctx->bufcnt, 0); in atmel_sha_cpu_start()
1578 dd->cpu_transfer_complete = resume; in atmel_sha_cpu_start()
1587 struct ahash_request *req = dd->req; in atmel_sha_cpu_hash()
1592 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding)) in atmel_sha_cpu_hash()
1593 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_cpu_hash()
1595 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); in atmel_sha_cpu_hash()
1601 sg_init_one(&dd->tmp, data, datalen); in atmel_sha_cpu_hash()
1602 return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume); in atmel_sha_cpu_hash()
1622 kfree(hkey->keydup); in atmel_sha_hmac_key_release()
1632 if (keylen > sizeof(hkey->buffer)) { in atmel_sha_hmac_key_set()
1633 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL); in atmel_sha_hmac_key_set()
1634 if (!hkey->keydup) in atmel_sha_hmac_key_set()
1635 return -ENOMEM; in atmel_sha_hmac_key_set()
1638 memcpy(hkey->buffer, key, keylen); in atmel_sha_hmac_key_set()
1641 hkey->valid = true; in atmel_sha_hmac_key_set()
1642 hkey->keylen = keylen; in atmel_sha_hmac_key_set()
1650 if (!hkey->valid) in atmel_sha_hmac_key_get()
1653 *keylen = hkey->keylen; in atmel_sha_hmac_key_get()
1654 *key = (hkey->keydup) ? hkey->keydup : hkey->buffer; in atmel_sha_hmac_key_get()
1685 struct ahash_request *req = dd->req; in atmel_sha_hmac_setup()
1693 hmac->resume = resume; in atmel_sha_hmac_setup()
1694 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_hmac_setup()
1696 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_hmac_setup()
1697 ctx->hash_size = SHA1_DIGEST_SIZE; in atmel_sha_hmac_setup()
1701 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_hmac_setup()
1702 ctx->hash_size = SHA256_DIGEST_SIZE; in atmel_sha_hmac_setup()
1706 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_hmac_setup()
1707 ctx->hash_size = SHA256_DIGEST_SIZE; in atmel_sha_hmac_setup()
1711 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_hmac_setup()
1712 ctx->hash_size = SHA512_DIGEST_SIZE; in atmel_sha_hmac_setup()
1716 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_hmac_setup()
1717 ctx->hash_size = SHA512_DIGEST_SIZE; in atmel_sha_hmac_setup()
1721 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_hmac_setup()
1723 bs = ctx->block_size; in atmel_sha_hmac_setup()
1725 if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen))) in atmel_sha_hmac_setup()
1733 memcpy((u8 *)hmac->ipad, key, keylen); in atmel_sha_hmac_setup()
1734 memset((u8 *)hmac->ipad + keylen, 0, bs - keylen); in atmel_sha_hmac_setup()
1747 struct ahash_request *req = dd->req; in atmel_sha_hmac_prehash_key_done()
1752 size_t bs = ctx->block_size; in atmel_sha_hmac_prehash_key_done()
1757 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_prehash_key_done()
1758 memset((u8 *)hmac->ipad + ds, 0, bs - ds); in atmel_sha_hmac_prehash_key_done()
1764 struct ahash_request *req = dd->req; in atmel_sha_hmac_compute_ipad_hash()
1768 size_t bs = ctx->block_size; in atmel_sha_hmac_compute_ipad_hash()
1771 unsafe_memcpy(hmac->opad, hmac->ipad, bs, in atmel_sha_hmac_compute_ipad_hash()
1772 "fortified memcpy causes -Wrestrict warning"); in atmel_sha_hmac_compute_ipad_hash()
1774 hmac->ipad[i] ^= 0x36363636; in atmel_sha_hmac_compute_ipad_hash()
1775 hmac->opad[i] ^= 0x5c5c5c5c; in atmel_sha_hmac_compute_ipad_hash()
1778 return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false, in atmel_sha_hmac_compute_ipad_hash()
1784 struct ahash_request *req = dd->req; in atmel_sha_hmac_compute_opad_hash()
1788 size_t bs = ctx->block_size; in atmel_sha_hmac_compute_opad_hash()
1789 size_t hs = ctx->hash_size; in atmel_sha_hmac_compute_opad_hash()
1793 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_compute_opad_hash()
1794 return atmel_sha_cpu_hash(dd, hmac->opad, bs, false, in atmel_sha_hmac_compute_opad_hash()
1800 struct ahash_request *req = dd->req; in atmel_sha_hmac_setup_done()
1804 size_t hs = ctx->hash_size; in atmel_sha_hmac_setup_done()
1808 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_setup_done()
1809 atmel_sha_hmac_key_release(&hmac->hkey); in atmel_sha_hmac_setup_done()
1810 return hmac->resume(dd); in atmel_sha_hmac_setup_done()
1815 struct ahash_request *req = dd->req; in atmel_sha_hmac_start()
1823 switch (ctx->op) { in atmel_sha_hmac_start()
1829 dd->resume = atmel_sha_done; in atmel_sha_hmac_start()
1834 dd->resume = atmel_sha_hmac_final; in atmel_sha_hmac_start()
1843 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_hmac_start()
1854 return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen); in atmel_sha_hmac_setkey()
1870 struct ahash_request *req = dd->req; in atmel_sha_hmac_init_done()
1874 size_t bs = ctx->block_size; in atmel_sha_hmac_init_done()
1875 size_t hs = ctx->hash_size; in atmel_sha_hmac_init_done()
1877 ctx->bufcnt = 0; in atmel_sha_hmac_init_done()
1878 ctx->digcnt[0] = bs; in atmel_sha_hmac_init_done()
1879 ctx->digcnt[1] = 0; in atmel_sha_hmac_init_done()
1880 ctx->flags |= SHA_FLAGS_RESTORE; in atmel_sha_hmac_init_done()
1881 memcpy(ctx->digest, hmac->ipad, hs); in atmel_sha_hmac_init_done()
1887 struct ahash_request *req = dd->req; in atmel_sha_hmac_final()
1891 u32 *digest = (u32 *)ctx->digest; in atmel_sha_hmac_final()
1893 size_t bs = ctx->block_size; in atmel_sha_hmac_final()
1894 size_t hs = ctx->hash_size; in atmel_sha_hmac_final()
1898 /* Save d = SHA((K' + ipad) | msg). */ in atmel_sha_hmac_final()
1903 /* Restore context to finish computing SHA((K' + opad) | d). */ in atmel_sha_hmac_final()
1907 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_hmac_final()
1910 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); in atmel_sha_hmac_final()
1916 sg_init_one(&dd->tmp, digest, ds); in atmel_sha_hmac_final()
1917 return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true, in atmel_sha_hmac_final()
1924 * req->result might not be sizeof(u32) aligned, so copy the in atmel_sha_hmac_final_done()
1925 * digest into ctx->digest[] before memcpy() the data into in atmel_sha_hmac_final_done()
1926 * req->result. in atmel_sha_hmac_final_done()
1928 atmel_sha_copy_hash(dd->req); in atmel_sha_hmac_final_done()
1929 atmel_sha_copy_ready_hash(dd->req); in atmel_sha_hmac_final_done()
1946 struct ahash_request *req = dd->req; in atmel_sha_hmac_digest2()
1951 size_t hs = ctx->hash_size; in atmel_sha_hmac_digest2()
1957 if (!req->nbytes) { in atmel_sha_hmac_digest2()
1958 req->nbytes = 0; in atmel_sha_hmac_digest2()
1959 ctx->bufcnt = 0; in atmel_sha_hmac_digest2()
1960 ctx->digcnt[0] = 0; in atmel_sha_hmac_digest2()
1961 ctx->digcnt[1] = 0; in atmel_sha_hmac_digest2()
1962 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_hmac_digest2()
1974 sg_init_one(&dd->tmp, ctx->buffer, ctx->bufcnt); in atmel_sha_hmac_digest2()
1978 if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD && in atmel_sha_hmac_digest2()
1979 atmel_sha_dma_check_aligned(dd, req->src, req->nbytes)) in atmel_sha_hmac_digest2()
1985 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); in atmel_sha_hmac_digest2()
1989 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_hmac_digest2()
1993 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; in atmel_sha_hmac_digest2()
2000 atmel_sha_write(dd, SHA_MSR, req->nbytes); in atmel_sha_hmac_digest2()
2001 atmel_sha_write(dd, SHA_BCR, req->nbytes); in atmel_sha_hmac_digest2()
2006 if (!req->nbytes) { in atmel_sha_hmac_digest2()
2007 sgbuf = &dd->tmp; in atmel_sha_hmac_digest2()
2008 req->nbytes = ctx->bufcnt; in atmel_sha_hmac_digest2()
2010 sgbuf = req->src; in atmel_sha_hmac_digest2()
2015 return atmel_sha_dma_start(dd, sgbuf, req->nbytes, in atmel_sha_hmac_digest2()
2018 return atmel_sha_cpu_start(dd, sgbuf, req->nbytes, false, true, in atmel_sha_hmac_digest2()
2028 hmac->base.start = atmel_sha_hmac_start; in atmel_sha_hmac_cra_init()
2029 atmel_sha_hmac_key_init(&hmac->hkey); in atmel_sha_hmac_cra_init()
2038 atmel_sha_hmac_key_release(&hmac->hkey); in atmel_sha_hmac_cra_exit()
2043 alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; in atmel_sha_hmac_alg_init()
2044 alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; in atmel_sha_hmac_alg_init()
2045 alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx); in atmel_sha_hmac_alg_init()
2046 alg->halg.base.cra_module = THIS_MODULE; in atmel_sha_hmac_alg_init()
2047 alg->halg.base.cra_init = atmel_sha_hmac_cra_init; in atmel_sha_hmac_alg_init()
2048 alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit; in atmel_sha_hmac_alg_init()
2050 alg->halg.statesize = sizeof(struct atmel_sha_reqctx); in atmel_sha_hmac_alg_init()
2052 alg->init = atmel_sha_hmac_init; in atmel_sha_hmac_alg_init()
2053 alg->update = atmel_sha_update; in atmel_sha_hmac_alg_init()
2054 alg->final = atmel_sha_final; in atmel_sha_hmac_alg_init()
2055 alg->digest = atmel_sha_hmac_digest; in atmel_sha_hmac_alg_init()
2056 alg->setkey = atmel_sha_hmac_setkey; in atmel_sha_hmac_alg_init()
2057 alg->export = atmel_sha_export; in atmel_sha_hmac_alg_init()
2058 alg->import = atmel_sha_import; in atmel_sha_hmac_alg_init()
2064 .halg.base.cra_driver_name = "atmel-hmac-sha1",
2071 .halg.base.cra_driver_name = "atmel-hmac-sha224",
2078 .halg.base.cra_driver_name = "atmel-hmac-sha256",
2085 .halg.base.cra_driver_name = "atmel-hmac-sha384",
2092 .halg.base.cra_driver_name = "atmel-hmac-sha512",
2132 authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); in atmel_sha_authenc_complete()
2137 struct ahash_request *req = dd->req; in atmel_sha_authenc_start()
2142 * Force atmel_sha_complete() to call req->base.complete(), ie in atmel_sha_authenc_start()
2143 * atmel_sha_authenc_complete(), which in turn calls authctx->cb(). in atmel_sha_authenc_start()
2145 dd->force_complete = true; in atmel_sha_authenc_start()
2148 return authctx->cb(authctx->aes_dev, err, dd->is_async); in atmel_sha_authenc_start()
2172 int err = -EINVAL; in atmel_sha_authenc_spawn()
2176 name = "atmel-hmac-sha1"; in atmel_sha_authenc_spawn()
2180 name = "atmel-hmac-sha224"; in atmel_sha_authenc_spawn()
2184 name = "atmel-hmac-sha256"; in atmel_sha_authenc_spawn()
2188 name = "atmel-hmac-sha384"; in atmel_sha_authenc_spawn()
2192 name = "atmel-hmac-sha512"; in atmel_sha_authenc_spawn()
2205 tctx->start = atmel_sha_authenc_start; in atmel_sha_authenc_spawn()
2206 tctx->flags = mode; in atmel_sha_authenc_spawn()
2210 err = -ENOMEM; in atmel_sha_authenc_spawn()
2213 auth->tfm = tfm; in atmel_sha_authenc_spawn()
2227 crypto_free_ahash(auth->tfm); in atmel_sha_authenc_free()
2235 struct crypto_ahash *tfm = auth->tfm; in atmel_sha_authenc_setkey()
2249 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_schedule()
2250 struct crypto_ahash *tfm = auth->tfm; in atmel_sha_authenc_schedule()
2257 /* Get SHA device. */ in atmel_sha_authenc_schedule()
2260 return cb(aes_dev, -ENODEV, false); in atmel_sha_authenc_schedule()
2263 ctx->dd = dd; in atmel_sha_authenc_schedule()
2264 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_authenc_schedule()
2265 authctx->cb = cb; in atmel_sha_authenc_schedule()
2266 authctx->aes_dev = aes_dev; in atmel_sha_authenc_schedule()
2281 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_init()
2284 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_init()
2287 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_authenc_init()
2289 authctx->cb = cb; in atmel_sha_authenc_init()
2290 authctx->aes_dev = aes_dev; in atmel_sha_authenc_init()
2291 authctx->assoc = assoc; in atmel_sha_authenc_init()
2292 authctx->assoclen = assoclen; in atmel_sha_authenc_init()
2293 authctx->textlen = textlen; in atmel_sha_authenc_init()
2295 ctx->flags = hmac->base.flags; in atmel_sha_authenc_init()
2302 struct ahash_request *req = dd->req; in atmel_sha_authenc_init2()
2304 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_init2()
2307 size_t hs = ctx->hash_size; in atmel_sha_authenc_init2()
2313 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); in atmel_sha_authenc_init2()
2317 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_authenc_init2()
2322 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; in atmel_sha_authenc_init2()
2325 msg_size = authctx->assoclen + authctx->textlen; in atmel_sha_authenc_init2()
2332 return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen, in atmel_sha_authenc_init2()
2339 struct ahash_request *req = dd->req; in atmel_sha_authenc_init_done()
2342 return authctx->cb(authctx->aes_dev, 0, dd->is_async); in atmel_sha_authenc_init_done()
2351 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_final()
2352 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_final()
2354 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_authenc_final()
2356 authctx->digestlen = SHA1_DIGEST_SIZE; in atmel_sha_authenc_final()
2360 authctx->digestlen = SHA224_DIGEST_SIZE; in atmel_sha_authenc_final()
2364 authctx->digestlen = SHA256_DIGEST_SIZE; in atmel_sha_authenc_final()
2368 authctx->digestlen = SHA384_DIGEST_SIZE; in atmel_sha_authenc_final()
2372 authctx->digestlen = SHA512_DIGEST_SIZE; in atmel_sha_authenc_final()
2376 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_authenc_final()
2378 if (authctx->digestlen > digestlen) in atmel_sha_authenc_final()
2379 authctx->digestlen = digestlen; in atmel_sha_authenc_final()
2381 authctx->cb = cb; in atmel_sha_authenc_final()
2382 authctx->aes_dev = aes_dev; in atmel_sha_authenc_final()
2383 authctx->digest = digest; in atmel_sha_authenc_final()
2391 struct ahash_request *req = dd->req; in atmel_sha_authenc_final_done()
2393 size_t i, num_words = authctx->digestlen / sizeof(u32); in atmel_sha_authenc_final_done()
2396 authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_authenc_final_done()
2404 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_abort()
2405 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_abort()
2407 /* Prevent atmel_sha_complete() from calling req->base.complete(). */ in atmel_sha_authenc_abort()
2408 dd->is_async = false; in atmel_sha_authenc_abort()
2409 dd->force_complete = false; in atmel_sha_authenc_abort()
2421 if (dd->caps.has_hmac) in atmel_sha_unregister_algs()
2428 if (dd->caps.has_sha224) in atmel_sha_unregister_algs()
2431 if (dd->caps.has_sha_384_512) { in atmel_sha_unregister_algs()
2449 if (dd->caps.has_sha224) { in atmel_sha_register_algs()
2457 if (dd->caps.has_sha_384_512) { in atmel_sha_register_algs()
2467 if (dd->caps.has_hmac) { in atmel_sha_register_algs()
2499 dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx"); in atmel_sha_dma_init()
2500 if (IS_ERR(dd->dma_lch_in.chan)) { in atmel_sha_dma_init()
2501 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_lch_in.chan), in atmel_sha_dma_init()
2505 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + in atmel_sha_dma_init()
2507 dd->dma_lch_in.dma_conf.src_maxburst = 1; in atmel_sha_dma_init()
2508 dd->dma_lch_in.dma_conf.src_addr_width = in atmel_sha_dma_init()
2510 dd->dma_lch_in.dma_conf.dst_maxburst = 1; in atmel_sha_dma_init()
2511 dd->dma_lch_in.dma_conf.dst_addr_width = in atmel_sha_dma_init()
2513 dd->dma_lch_in.dma_conf.device_fc = false; in atmel_sha_dma_init()
2520 dma_release_channel(dd->dma_lch_in.chan); in atmel_sha_dma_cleanup()
2526 dd->caps.has_dma = 0; in atmel_sha_get_cap()
2527 dd->caps.has_dualbuff = 0; in atmel_sha_get_cap()
2528 dd->caps.has_sha224 = 0; in atmel_sha_get_cap()
2529 dd->caps.has_sha_384_512 = 0; in atmel_sha_get_cap()
2530 dd->caps.has_uihv = 0; in atmel_sha_get_cap()
2531 dd->caps.has_hmac = 0; in atmel_sha_get_cap()
2534 switch (dd->hw_version & 0xff0) { in atmel_sha_get_cap()
2538 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2539 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2540 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2541 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2542 dd->caps.has_uihv = 1; in atmel_sha_get_cap()
2543 dd->caps.has_hmac = 1; in atmel_sha_get_cap()
2546 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2547 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2548 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2549 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2550 dd->caps.has_uihv = 1; in atmel_sha_get_cap()
2553 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2554 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2555 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2556 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2559 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2560 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2561 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2566 dev_warn(dd->dev, in atmel_sha_get_cap()
2567 "Unmanaged sha version, set minimum capabilities\n"); in atmel_sha_get_cap()
2573 { .compatible = "atmel,at91sam9g46-sha" },
2582 struct device *dev = &pdev->dev; in atmel_sha_probe()
2586 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); in atmel_sha_probe()
2588 return -ENOMEM; in atmel_sha_probe()
2590 sha_dd->dev = dev; in atmel_sha_probe()
2594 INIT_LIST_HEAD(&sha_dd->list); in atmel_sha_probe()
2595 spin_lock_init(&sha_dd->lock); in atmel_sha_probe()
2597 tasklet_init(&sha_dd->done_task, atmel_sha_done_task, in atmel_sha_probe()
2599 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, in atmel_sha_probe()
2602 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); in atmel_sha_probe()
2604 sha_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &sha_res); in atmel_sha_probe()
2605 if (IS_ERR(sha_dd->io_base)) { in atmel_sha_probe()
2606 err = PTR_ERR(sha_dd->io_base); in atmel_sha_probe()
2609 sha_dd->phys_base = sha_res->start; in atmel_sha_probe()
2612 sha_dd->irq = platform_get_irq(pdev, 0); in atmel_sha_probe()
2613 if (sha_dd->irq < 0) { in atmel_sha_probe()
2614 err = sha_dd->irq; in atmel_sha_probe()
2618 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, in atmel_sha_probe()
2619 IRQF_SHARED, "atmel-sha", sha_dd); in atmel_sha_probe()
2621 dev_err(dev, "unable to request sha irq.\n"); in atmel_sha_probe()
2626 sha_dd->iclk = devm_clk_get_prepared(&pdev->dev, "sha_clk"); in atmel_sha_probe()
2627 if (IS_ERR(sha_dd->iclk)) { in atmel_sha_probe()
2629 err = PTR_ERR(sha_dd->iclk); in atmel_sha_probe()
2639 if (sha_dd->caps.has_dma) { in atmel_sha_probe()
2645 dma_chan_name(sha_dd->dma_lch_in.chan)); in atmel_sha_probe()
2649 list_add_tail(&sha_dd->list, &atmel_sha.dev_list); in atmel_sha_probe()
2657 sha_dd->caps.has_sha224 ? "/SHA224" : "", in atmel_sha_probe()
2658 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); in atmel_sha_probe()
2664 list_del(&sha_dd->list); in atmel_sha_probe()
2666 if (sha_dd->caps.has_dma) in atmel_sha_probe()
2669 tasklet_kill(&sha_dd->queue_task); in atmel_sha_probe()
2670 tasklet_kill(&sha_dd->done_task); in atmel_sha_probe()
2680 list_del(&sha_dd->list); in atmel_sha_remove()
2685 tasklet_kill(&sha_dd->queue_task); in atmel_sha_remove()
2686 tasklet_kill(&sha_dd->done_task); in atmel_sha_remove()
2688 if (sha_dd->caps.has_dma) in atmel_sha_remove()
2703 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
2705 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");