caamalg_qi.c (30aa69e7bd9f7af3574120249eecb3726dcaf737) caamalg_qi.c (618b5dc48365cecc03daffa46800f20ab11e3f80)
1// SPDX-License-Identifier: GPL-2.0+
1/*
2 * Freescale FSL CAAM support for crypto API over QI backend.
3 * Based on caamalg.c
4 *
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
2/*
3 * Freescale FSL CAAM support for crypto API over QI backend.
4 * Based on caamalg.c
5 *
6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
7 * Copyright 2016-2018 NXP
7 */
8
9#include "compat.h"
10#include "ctrl.h"
11#include "regs.h"
12#include "intern.h"
13#include "desc_constr.h"
14#include "error.h"

--- 23 unchanged lines hidden (view full) ---

38};
39
40struct caam_aead_alg {
41 struct aead_alg aead;
42 struct caam_alg_entry caam;
43 bool registered;
44};
45
8 */
9
10#include "compat.h"
11#include "ctrl.h"
12#include "regs.h"
13#include "intern.h"
14#include "desc_constr.h"
15#include "error.h"

--- 23 unchanged lines hidden (view full) ---

39};
40
41struct caam_aead_alg {
42 struct aead_alg aead;
43 struct caam_alg_entry caam;
44 bool registered;
45};
46
47struct caam_skcipher_alg {
48 struct skcipher_alg skcipher;
49 struct caam_alg_entry caam;
50 bool registered;
51};
52
46/*
47 * per-session context
48 */
49struct caam_ctx {
50 struct device *jrdev;
51 u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53/*
54 * per-session context
55 */
56struct caam_ctx {
57 struct device *jrdev;
58 u32 sh_desc_enc[DESC_MAX_USED_LEN];
59 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 u8 key[CAAM_MAX_KEY_SIZE];
55 dma_addr_t key_dma;
56 enum dma_data_direction dir;
57 struct alginfo adata;
58 struct alginfo cdata;
59 unsigned int authsize;
60 struct device *qidev;
61 spinlock_t lock; /* Protects multiple init of driver context */

--- 522 unchanged lines hidden (view full) ---

584 dev_err(jrdev, "driver dec context update failed\n");
585 return ret;
586 }
587 }
588
589 return 0;
590}
591
60 u8 key[CAAM_MAX_KEY_SIZE];
61 dma_addr_t key_dma;
62 enum dma_data_direction dir;
63 struct alginfo adata;
64 struct alginfo cdata;
65 unsigned int authsize;
66 struct device *qidev;
67 spinlock_t lock; /* Protects multiple init of driver context */

--- 522 unchanged lines hidden (view full) ---

590 dev_err(jrdev, "driver dec context update failed\n");
591 return ret;
592 }
593 }
594
595 return 0;
596}
597
592static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
593 const u8 *key, unsigned int keylen)
598static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
599 unsigned int keylen)
594{
600{
595 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
596 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
597 const char *alg_name = crypto_tfm_alg_name(tfm);
601 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
602 struct caam_skcipher_alg *alg =
603 container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
604 skcipher);
598 struct device *jrdev = ctx->jrdev;
605 struct device *jrdev = ctx->jrdev;
599 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
606 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
600 u32 ctx1_iv_off = 0;
601 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
602 OP_ALG_AAI_CTR_MOD128);
607 u32 ctx1_iv_off = 0;
608 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
609 OP_ALG_AAI_CTR_MOD128);
603 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
610 const bool is_rfc3686 = alg->caam.rfc3686;
604 int ret = 0;
605
606#ifdef DEBUG
607 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
608 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
609#endif
610 /*
611 * AES-CTR needs to load IV in CONTEXT1 reg

--- 12 unchanged lines hidden (view full) ---

624 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
625 keylen -= CTR_RFC3686_NONCE_SIZE;
626 }
627
628 ctx->cdata.keylen = keylen;
629 ctx->cdata.key_virt = key;
630 ctx->cdata.key_inline = true;
631
611 int ret = 0;
612
613#ifdef DEBUG
614 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
615 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
616#endif
617 /*
618 * AES-CTR needs to load IV in CONTEXT1 reg

--- 12 unchanged lines hidden (view full) ---

631 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
632 keylen -= CTR_RFC3686_NONCE_SIZE;
633 }
634
635 ctx->cdata.keylen = keylen;
636 ctx->cdata.key_virt = key;
637 ctx->cdata.key_inline = true;
638
632 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
633 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
634 is_rfc3686, ctx1_iv_off);
635 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
636 is_rfc3686, ctx1_iv_off);
637 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
638 ivsize, is_rfc3686, ctx1_iv_off);
639 /* skcipher encrypt, decrypt shared descriptors */
640 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
641 is_rfc3686, ctx1_iv_off);
642 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
643 is_rfc3686, ctx1_iv_off);
639
640 /* Now update the driver contexts with the new shared descriptor */
641 if (ctx->drv_ctx[ENCRYPT]) {
642 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
643 ctx->sh_desc_enc);
644 if (ret) {
645 dev_err(jrdev, "driver enc context update failed\n");
646 goto badkey;

--- 4 unchanged lines hidden (view full) ---

651 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
652 ctx->sh_desc_dec);
653 if (ret) {
654 dev_err(jrdev, "driver dec context update failed\n");
655 goto badkey;
656 }
657 }
658
644
645 /* Now update the driver contexts with the new shared descriptor */
646 if (ctx->drv_ctx[ENCRYPT]) {
647 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
648 ctx->sh_desc_enc);
649 if (ret) {
650 dev_err(jrdev, "driver enc context update failed\n");
651 goto badkey;

--- 4 unchanged lines hidden (view full) ---

656 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
657 ctx->sh_desc_dec);
658 if (ret) {
659 dev_err(jrdev, "driver dec context update failed\n");
660 goto badkey;
661 }
662 }
663
659 if (ctx->drv_ctx[GIVENCRYPT]) {
660 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
661 ctx->sh_desc_givenc);
662 if (ret) {
663 dev_err(jrdev, "driver givenc context update failed\n");
664 goto badkey;
665 }
666 }
667
668 return ret;
669badkey:
664 return ret;
665badkey:
670 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
666 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
671 return -EINVAL;
672}
673
667 return -EINVAL;
668}
669
674static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
675 const u8 *key, unsigned int keylen)
670static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
671 unsigned int keylen)
676{
672{
677 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
673 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
678 struct device *jrdev = ctx->jrdev;
679 int ret = 0;
680
681 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
682 dev_err(jrdev, "key size mismatch\n");
683 goto badkey;
684 }
685
686 ctx->cdata.keylen = keylen;
687 ctx->cdata.key_virt = key;
688 ctx->cdata.key_inline = true;
689
674 struct device *jrdev = ctx->jrdev;
675 int ret = 0;
676
677 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
678 dev_err(jrdev, "key size mismatch\n");
679 goto badkey;
680 }
681
682 ctx->cdata.keylen = keylen;
683 ctx->cdata.key_virt = key;
684 ctx->cdata.key_inline = true;
685
690 /* xts ablkcipher encrypt, decrypt shared descriptors */
691 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
692 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
686 /* xts skcipher encrypt, decrypt shared descriptors */
687 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
688 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
693
694 /* Now update the driver contexts with the new shared descriptor */
695 if (ctx->drv_ctx[ENCRYPT]) {
696 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
697 ctx->sh_desc_enc);
698 if (ret) {
699 dev_err(jrdev, "driver enc context update failed\n");
700 goto badkey;

--- 6 unchanged lines hidden (view full) ---

707 if (ret) {
708 dev_err(jrdev, "driver dec context update failed\n");
709 goto badkey;
710 }
711 }
712
713 return ret;
714badkey:
689
690 /* Now update the driver contexts with the new shared descriptor */
691 if (ctx->drv_ctx[ENCRYPT]) {
692 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
693 ctx->sh_desc_enc);
694 if (ret) {
695 dev_err(jrdev, "driver enc context update failed\n");
696 goto badkey;

--- 6 unchanged lines hidden (view full) ---

703 if (ret) {
704 dev_err(jrdev, "driver dec context update failed\n");
705 goto badkey;
706 }
707 }
708
709 return ret;
710badkey:
715 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
711 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
716 return -EINVAL;
717}
718
719/*
720 * aead_edesc - s/w-extended aead descriptor
721 * @src_nents: number of segments in input scatterlist
722 * @dst_nents: number of segments in output scatterlist
723 * @iv_dma: dma address of iv for checking continuity and link table

--- 12 unchanged lines hidden (view full) ---

736 dma_addr_t qm_sg_dma;
737 unsigned int assoclen;
738 dma_addr_t assoclen_dma;
739 struct caam_drv_req drv_req;
740 struct qm_sg_entry sgt[0];
741};
742
743/*
712 return -EINVAL;
713}
714
715/*
716 * aead_edesc - s/w-extended aead descriptor
717 * @src_nents: number of segments in input scatterlist
718 * @dst_nents: number of segments in output scatterlist
719 * @iv_dma: dma address of iv for checking continuity and link table

--- 12 unchanged lines hidden (view full) ---

732 dma_addr_t qm_sg_dma;
733 unsigned int assoclen;
734 dma_addr_t assoclen_dma;
735 struct caam_drv_req drv_req;
736 struct qm_sg_entry sgt[0];
737};
738
739/*
744 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
740 * skcipher_edesc - s/w-extended skcipher descriptor
745 * @src_nents: number of segments in input scatterlist
746 * @dst_nents: number of segments in output scatterlist
747 * @iv_dma: dma address of iv for checking continuity and link table
748 * @qm_sg_bytes: length of dma mapped h/w link table
749 * @qm_sg_dma: bus physical mapped address of h/w link table
750 * @drv_req: driver-specific request structure
751 * @sgt: the h/w link table, followed by IV
752 */
741 * @src_nents: number of segments in input scatterlist
742 * @dst_nents: number of segments in output scatterlist
743 * @iv_dma: dma address of iv for checking continuity and link table
744 * @qm_sg_bytes: length of dma mapped h/w link table
745 * @qm_sg_dma: bus physical mapped address of h/w link table
746 * @drv_req: driver-specific request structure
747 * @sgt: the h/w link table, followed by IV
748 */
753struct ablkcipher_edesc {
749struct skcipher_edesc {
754 int src_nents;
755 int dst_nents;
756 dma_addr_t iv_dma;
757 int qm_sg_bytes;
758 dma_addr_t qm_sg_dma;
759 struct caam_drv_req drv_req;
760 struct qm_sg_entry sgt[0];
761};

--- 14 unchanged lines hidden (view full) ---

776
777 /* Read again to check if some other core init drv_ctx */
778 drv_ctx = ctx->drv_ctx[type];
779 if (!drv_ctx) {
780 int cpu;
781
782 if (type == ENCRYPT)
783 desc = ctx->sh_desc_enc;
750 int src_nents;
751 int dst_nents;
752 dma_addr_t iv_dma;
753 int qm_sg_bytes;
754 dma_addr_t qm_sg_dma;
755 struct caam_drv_req drv_req;
756 struct qm_sg_entry sgt[0];
757};

--- 14 unchanged lines hidden (view full) ---

772
773 /* Read again to check if some other core init drv_ctx */
774 drv_ctx = ctx->drv_ctx[type];
775 if (!drv_ctx) {
776 int cpu;
777
778 if (type == ENCRYPT)
779 desc = ctx->sh_desc_enc;
784 else if (type == DECRYPT)
780 else /* (type == DECRYPT) */
785 desc = ctx->sh_desc_dec;
781 desc = ctx->sh_desc_dec;
786 else /* (type == GIVENCRYPT) */
787 desc = ctx->sh_desc_givenc;
788
789 cpu = smp_processor_id();
790 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
791 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
792 drv_ctx->op_type = type;
793
794 ctx->drv_ctx[type] = drv_ctx;
795 }
796
797 spin_unlock(&ctx->lock);
798 }
799
800 return drv_ctx;
801}
802
803static void caam_unmap(struct device *dev, struct scatterlist *src,
804 struct scatterlist *dst, int src_nents,
805 int dst_nents, dma_addr_t iv_dma, int ivsize,
782
783 cpu = smp_processor_id();
784 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
785 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
786 drv_ctx->op_type = type;
787
788 ctx->drv_ctx[type] = drv_ctx;
789 }
790
791 spin_unlock(&ctx->lock);
792 }
793
794 return drv_ctx;
795}
796
797static void caam_unmap(struct device *dev, struct scatterlist *src,
798 struct scatterlist *dst, int src_nents,
799 int dst_nents, dma_addr_t iv_dma, int ivsize,
806 enum optype op_type, dma_addr_t qm_sg_dma,
807 int qm_sg_bytes)
800 dma_addr_t qm_sg_dma, int qm_sg_bytes)
808{
809 if (dst != src) {
810 if (src_nents)
811 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
812 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
813 } else {
814 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
815 }
816
817 if (iv_dma)
801{
802 if (dst != src) {
803 if (src_nents)
804 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
805 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
806 } else {
807 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
808 }
809
810 if (iv_dma)
818 dma_unmap_single(dev, iv_dma, ivsize,
819 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
820 DMA_TO_DEVICE);
811 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
821 if (qm_sg_bytes)
822 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
823}
824
825static void aead_unmap(struct device *dev,
826 struct aead_edesc *edesc,
827 struct aead_request *req)
828{
829 struct crypto_aead *aead = crypto_aead_reqtfm(req);
830 int ivsize = crypto_aead_ivsize(aead);
831
832 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
812 if (qm_sg_bytes)
813 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
814}
815
816static void aead_unmap(struct device *dev,
817 struct aead_edesc *edesc,
818 struct aead_request *req)
819{
820 struct crypto_aead *aead = crypto_aead_reqtfm(req);
821 int ivsize = crypto_aead_ivsize(aead);
822
823 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
833 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
834 edesc->qm_sg_dma, edesc->qm_sg_bytes);
824 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
835 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
836}
837
825 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
826}
827
838static void ablkcipher_unmap(struct device *dev,
839 struct ablkcipher_edesc *edesc,
840 struct ablkcipher_request *req)
828static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
829 struct skcipher_request *req)
841{
830{
842 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
843 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
831 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
832 int ivsize = crypto_skcipher_ivsize(skcipher);
844
845 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
833
834 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
846 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
847 edesc->qm_sg_dma, edesc->qm_sg_bytes);
835 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
848}
849
850static void aead_done(struct caam_drv_req *drv_req, u32 status)
851{
852 struct device *qidev;
853 struct aead_edesc *edesc;
854 struct aead_request *aead_req = drv_req->app_ctx;
855 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);

--- 41 unchanged lines hidden (view full) ---

897 struct aead_edesc *edesc;
898 dma_addr_t qm_sg_dma, iv_dma = 0;
899 int ivsize = 0;
900 unsigned int authsize = ctx->authsize;
901 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
902 int in_len, out_len;
903 struct qm_sg_entry *sg_table, *fd_sgt;
904 struct caam_drv_ctx *drv_ctx;
836}
837
838static void aead_done(struct caam_drv_req *drv_req, u32 status)
839{
840 struct device *qidev;
841 struct aead_edesc *edesc;
842 struct aead_request *aead_req = drv_req->app_ctx;
843 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);

--- 41 unchanged lines hidden (view full) ---

885 struct aead_edesc *edesc;
886 dma_addr_t qm_sg_dma, iv_dma = 0;
887 int ivsize = 0;
888 unsigned int authsize = ctx->authsize;
889 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
890 int in_len, out_len;
891 struct qm_sg_entry *sg_table, *fd_sgt;
892 struct caam_drv_ctx *drv_ctx;
905 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
906
893
907 drv_ctx = get_drv_ctx(ctx, op_type);
894 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
908 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
909 return (struct aead_edesc *)drv_ctx;
910
911 /* allocate space for base edesc and hw desc commands, link tables */
912 edesc = qi_cache_alloc(GFP_DMA | flags);
913 if (unlikely(!edesc)) {
914 dev_err(qidev, "could not allocate extended descriptor\n");
915 return ERR_PTR(-ENOMEM);

--- 73 unchanged lines hidden (view full) ---

989 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
990 sg_table = &edesc->sgt[0];
991 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
992 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
993 CAAM_QI_MEMCACHE_SIZE)) {
994 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
995 qm_sg_ents, ivsize);
996 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
895 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
896 return (struct aead_edesc *)drv_ctx;
897
898 /* allocate space for base edesc and hw desc commands, link tables */
899 edesc = qi_cache_alloc(GFP_DMA | flags);
900 if (unlikely(!edesc)) {
901 dev_err(qidev, "could not allocate extended descriptor\n");
902 return ERR_PTR(-ENOMEM);

--- 73 unchanged lines hidden (view full) ---

976 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
977 sg_table = &edesc->sgt[0];
978 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
979 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
980 CAAM_QI_MEMCACHE_SIZE)) {
981 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
982 qm_sg_ents, ivsize);
983 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
997 0, 0, 0, 0);
984 0, 0, 0);
998 qi_cache_free(edesc);
999 return ERR_PTR(-ENOMEM);
1000 }
1001
1002 if (ivsize) {
1003 u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1004
1005 /* Make sure IV is located in a DMAable area */
1006 memcpy(iv, req->iv, ivsize);
1007
1008 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1009 if (dma_mapping_error(qidev, iv_dma)) {
1010 dev_err(qidev, "unable to map IV\n");
1011 caam_unmap(qidev, req->src, req->dst, src_nents,
985 qi_cache_free(edesc);
986 return ERR_PTR(-ENOMEM);
987 }
988
989 if (ivsize) {
990 u8 *iv = (u8 *)(sg_table + qm_sg_ents);
991
992 /* Make sure IV is located in a DMAable area */
993 memcpy(iv, req->iv, ivsize);
994
995 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
996 if (dma_mapping_error(qidev, iv_dma)) {
997 dev_err(qidev, "unable to map IV\n");
998 caam_unmap(qidev, req->src, req->dst, src_nents,
1012 dst_nents, 0, 0, 0, 0, 0);
999 dst_nents, 0, 0, 0, 0);
1013 qi_cache_free(edesc);
1014 return ERR_PTR(-ENOMEM);
1015 }
1016 }
1017
1018 edesc->src_nents = src_nents;
1019 edesc->dst_nents = dst_nents;
1020 edesc->iv_dma = iv_dma;
1021 edesc->drv_req.app_ctx = req;
1022 edesc->drv_req.cbk = aead_done;
1023 edesc->drv_req.drv_ctx = drv_ctx;
1024
1025 edesc->assoclen = cpu_to_caam32(req->assoclen);
1026 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1027 DMA_TO_DEVICE);
1028 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1029 dev_err(qidev, "unable to map assoclen\n");
1030 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1000 qi_cache_free(edesc);
1001 return ERR_PTR(-ENOMEM);
1002 }
1003 }
1004
1005 edesc->src_nents = src_nents;
1006 edesc->dst_nents = dst_nents;
1007 edesc->iv_dma = iv_dma;
1008 edesc->drv_req.app_ctx = req;
1009 edesc->drv_req.cbk = aead_done;
1010 edesc->drv_req.drv_ctx = drv_ctx;
1011
1012 edesc->assoclen = cpu_to_caam32(req->assoclen);
1013 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1014 DMA_TO_DEVICE);
1015 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1016 dev_err(qidev, "unable to map assoclen\n");
1017 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1031 iv_dma, ivsize, op_type, 0, 0);
1018 iv_dma, ivsize, 0, 0);
1032 qi_cache_free(edesc);
1033 return ERR_PTR(-ENOMEM);
1034 }
1035
1036 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1037 qm_sg_index++;
1038 if (ivsize) {
1039 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);

--- 6 unchanged lines hidden (view full) ---

1046 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1047 qm_sg_index, 0);
1048
1049 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1050 if (dma_mapping_error(qidev, qm_sg_dma)) {
1051 dev_err(qidev, "unable to map S/G table\n");
1052 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1053 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1019 qi_cache_free(edesc);
1020 return ERR_PTR(-ENOMEM);
1021 }
1022
1023 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1024 qm_sg_index++;
1025 if (ivsize) {
1026 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);

--- 6 unchanged lines hidden (view full) ---

1033 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1034 qm_sg_index, 0);
1035
1036 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1037 if (dma_mapping_error(qidev, qm_sg_dma)) {
1038 dev_err(qidev, "unable to map S/G table\n");
1039 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1040 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1054 iv_dma, ivsize, op_type, 0, 0);
1041 iv_dma, ivsize, 0, 0);
1055 qi_cache_free(edesc);
1056 return ERR_PTR(-ENOMEM);
1057 }
1058
1059 edesc->qm_sg_dma = qm_sg_dma;
1060 edesc->qm_sg_bytes = qm_sg_bytes;
1061
1062 out_len = req->assoclen + req->cryptlen +

--- 70 unchanged lines hidden (view full) ---

1133static int ipsec_gcm_decrypt(struct aead_request *req)
1134{
1135 if (req->assoclen < 8)
1136 return -EINVAL;
1137
1138 return aead_crypt(req, false);
1139}
1140
1042 qi_cache_free(edesc);
1043 return ERR_PTR(-ENOMEM);
1044 }
1045
1046 edesc->qm_sg_dma = qm_sg_dma;
1047 edesc->qm_sg_bytes = qm_sg_bytes;
1048
1049 out_len = req->assoclen + req->cryptlen +

--- 70 unchanged lines hidden (view full) ---

1120static int ipsec_gcm_decrypt(struct aead_request *req)
1121{
1122 if (req->assoclen < 8)
1123 return -EINVAL;
1124
1125 return aead_crypt(req, false);
1126}
1127
1141static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1128static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
1142{
1129{
1143 struct ablkcipher_edesc *edesc;
1144 struct ablkcipher_request *req = drv_req->app_ctx;
1145 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1146 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
1130 struct skcipher_edesc *edesc;
1131 struct skcipher_request *req = drv_req->app_ctx;
1132 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1133 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
1147 struct device *qidev = caam_ctx->qidev;
1134 struct device *qidev = caam_ctx->qidev;
1148 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1135 int ivsize = crypto_skcipher_ivsize(skcipher);
1149
1150#ifdef DEBUG
1151 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1152#endif
1153
1154 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1155
1156 if (status)
1157 caam_jr_strstatus(qidev, status);
1158
1159#ifdef DEBUG
1160 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
1136
1137#ifdef DEBUG
1138 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1139#endif
1140
1141 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1142
1143 if (status)
1144 caam_jr_strstatus(qidev, status);
1145
1146#ifdef DEBUG
1147 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
1161 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1148 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1162 edesc->src_nents > 1 ? 100 : ivsize, 1);
1163 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1164 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1149 edesc->src_nents > 1 ? 100 : ivsize, 1);
1150 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1151 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1165 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1152 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1166#endif
1167
1153#endif
1154
1168 ablkcipher_unmap(qidev, edesc, req);
1155 skcipher_unmap(qidev, edesc, req);
1169
1156
1170 /* In case initial IV was generated, copy it in GIVCIPHER request */
1171 if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1172 u8 *iv;
1173 struct skcipher_givcrypt_request *greq;
1174
1175 greq = container_of(req, struct skcipher_givcrypt_request,
1176 creq);
1177 iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1178 memcpy(greq->giv, iv, ivsize);
1179 }
1180
1181 /*
1157 /*
1182 * The crypto API expects us to set the IV (req->info) to the last
1158 * The crypto API expects us to set the IV (req->iv) to the last
1183 * ciphertext block. This is used e.g. by the CTS mode.
1184 */
1159 * ciphertext block. This is used e.g. by the CTS mode.
1160 */
1185 if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
1186 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1161 if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
1162 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
1187 ivsize, ivsize, 0);
1188
1189 qi_cache_free(edesc);
1163 ivsize, ivsize, 0);
1164
1165 qi_cache_free(edesc);
1190 ablkcipher_request_complete(req, status);
1166 skcipher_request_complete(req, status);
1191}
1192
1167}
1168
1193static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1194 *req, bool encrypt)
1169static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1170 bool encrypt)
1195{
1171{
1196 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1197 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1172 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1173 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1198 struct device *qidev = ctx->qidev;
1199 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1200 GFP_KERNEL : GFP_ATOMIC;
1201 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1174 struct device *qidev = ctx->qidev;
1175 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1176 GFP_KERNEL : GFP_ATOMIC;
1177 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1202 struct ablkcipher_edesc *edesc;
1178 struct skcipher_edesc *edesc;
1203 dma_addr_t iv_dma;
1204 u8 *iv;
1179 dma_addr_t iv_dma;
1180 u8 *iv;
1205 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1181 int ivsize = crypto_skcipher_ivsize(skcipher);
1206 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1207 struct qm_sg_entry *sg_table, *fd_sgt;
1208 struct caam_drv_ctx *drv_ctx;
1182 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1183 struct qm_sg_entry *sg_table, *fd_sgt;
1184 struct caam_drv_ctx *drv_ctx;
1209 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1210
1185
1211 drv_ctx = get_drv_ctx(ctx, op_type);
1186 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
1212 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1187 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1213 return (struct ablkcipher_edesc *)drv_ctx;
1188 return (struct skcipher_edesc *)drv_ctx;
1214
1189
1215 src_nents = sg_nents_for_len(req->src, req->nbytes);
1190 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1216 if (unlikely(src_nents < 0)) {
1217 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1191 if (unlikely(src_nents < 0)) {
1192 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1218 req->nbytes);
1193 req->cryptlen);
1219 return ERR_PTR(src_nents);
1220 }
1221
1222 if (unlikely(req->src != req->dst)) {
1194 return ERR_PTR(src_nents);
1195 }
1196
1197 if (unlikely(req->src != req->dst)) {
1223 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1198 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1224 if (unlikely(dst_nents < 0)) {
1225 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1199 if (unlikely(dst_nents < 0)) {
1200 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1226 req->nbytes);
1201 req->cryptlen);
1227 return ERR_PTR(dst_nents);
1228 }
1229
1230 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1231 DMA_TO_DEVICE);
1232 if (unlikely(!mapped_src_nents)) {
1233 dev_err(qidev, "unable to map source\n");
1234 return ERR_PTR(-ENOMEM);

--- 15 unchanged lines hidden (view full) ---

1250 }
1251 }
1252
1253 qm_sg_ents = 1 + mapped_src_nents;
1254 dst_sg_idx = qm_sg_ents;
1255
1256 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1257 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1202 return ERR_PTR(dst_nents);
1203 }
1204
1205 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1206 DMA_TO_DEVICE);
1207 if (unlikely(!mapped_src_nents)) {
1208 dev_err(qidev, "unable to map source\n");
1209 return ERR_PTR(-ENOMEM);

--- 15 unchanged lines hidden (view full) ---

1225 }
1226 }
1227
1228 qm_sg_ents = 1 + mapped_src_nents;
1229 dst_sg_idx = qm_sg_ents;
1230
1231 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1232 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1258 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1233 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1259 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1260 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1261 qm_sg_ents, ivsize);
1262 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1234 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1235 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1236 qm_sg_ents, ivsize);
1237 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1263 0, 0, 0, 0);
1238 0, 0, 0);
1264 return ERR_PTR(-ENOMEM);
1265 }
1266
1267 /* allocate space for base edesc, link tables and IV */
1268 edesc = qi_cache_alloc(GFP_DMA | flags);
1269 if (unlikely(!edesc)) {
1270 dev_err(qidev, "could not allocate extended descriptor\n");
1271 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1239 return ERR_PTR(-ENOMEM);
1240 }
1241
1242 /* allocate space for base edesc, link tables and IV */
1243 edesc = qi_cache_alloc(GFP_DMA | flags);
1244 if (unlikely(!edesc)) {
1245 dev_err(qidev, "could not allocate extended descriptor\n");
1246 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1272 0, 0, 0, 0);
1247 0, 0, 0);
1273 return ERR_PTR(-ENOMEM);
1274 }
1275
1276 /* Make sure IV is located in a DMAable area */
1277 sg_table = &edesc->sgt[0];
1278 iv = (u8 *)(sg_table + qm_sg_ents);
1248 return ERR_PTR(-ENOMEM);
1249 }
1250
1251 /* Make sure IV is located in a DMAable area */
1252 sg_table = &edesc->sgt[0];
1253 iv = (u8 *)(sg_table + qm_sg_ents);
1279 memcpy(iv, req->info, ivsize);
1254 memcpy(iv, req->iv, ivsize);
1280
1281 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1282 if (dma_mapping_error(qidev, iv_dma)) {
1283 dev_err(qidev, "unable to map IV\n");
1284 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1255
1256 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1257 if (dma_mapping_error(qidev, iv_dma)) {
1258 dev_err(qidev, "unable to map IV\n");
1259 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1285 0, 0, 0, 0);
1260 0, 0, 0);
1286 qi_cache_free(edesc);
1287 return ERR_PTR(-ENOMEM);
1288 }
1289
1290 edesc->src_nents = src_nents;
1291 edesc->dst_nents = dst_nents;
1292 edesc->iv_dma = iv_dma;
1293 edesc->qm_sg_bytes = qm_sg_bytes;
1294 edesc->drv_req.app_ctx = req;
1261 qi_cache_free(edesc);
1262 return ERR_PTR(-ENOMEM);
1263 }
1264
1265 edesc->src_nents = src_nents;
1266 edesc->dst_nents = dst_nents;
1267 edesc->iv_dma = iv_dma;
1268 edesc->qm_sg_bytes = qm_sg_bytes;
1269 edesc->drv_req.app_ctx = req;
1295 edesc->drv_req.cbk = ablkcipher_done;
1270 edesc->drv_req.cbk = skcipher_done;
1296 edesc->drv_req.drv_ctx = drv_ctx;
1297
1298 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1299 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1300
1301 if (mapped_dst_nents > 1)
1302 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1303 dst_sg_idx, 0);
1304
1305 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1306 DMA_TO_DEVICE);
1307 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1308 dev_err(qidev, "unable to map S/G table\n");
1309 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1271 edesc->drv_req.drv_ctx = drv_ctx;
1272
1273 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1274 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1275
1276 if (mapped_dst_nents > 1)
1277 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1278 dst_sg_idx, 0);
1279
1280 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1281 DMA_TO_DEVICE);
1282 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1283 dev_err(qidev, "unable to map S/G table\n");
1284 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1310 iv_dma, ivsize, op_type, 0, 0);
1285 iv_dma, ivsize, 0, 0);
1311 qi_cache_free(edesc);
1312 return ERR_PTR(-ENOMEM);
1313 }
1314
1315 fd_sgt = &edesc->drv_req.fd_sgt[0];
1316
1317 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1286 qi_cache_free(edesc);
1287 return ERR_PTR(-ENOMEM);
1288 }
1289
1290 fd_sgt = &edesc->drv_req.fd_sgt[0];
1291
1292 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1318 ivsize + req->nbytes, 0);
1293 ivsize + req->cryptlen, 0);
1319
1320 if (req->src == req->dst) {
1321 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1294
1295 if (req->src == req->dst) {
1296 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1322 sizeof(*sg_table), req->nbytes, 0);
1297 sizeof(*sg_table), req->cryptlen, 0);
1323 } else if (mapped_dst_nents > 1) {
1324 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1298 } else if (mapped_dst_nents > 1) {
1299 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1325 sizeof(*sg_table), req->nbytes, 0);
1300 sizeof(*sg_table), req->cryptlen, 0);
1326 } else {
1327 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1301 } else {
1302 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1328 req->nbytes, 0);
1303 req->cryptlen, 0);
1329 }
1330
1331 return edesc;
1332}
1333
1304 }
1305
1306 return edesc;
1307}
1308
1334static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1335 struct skcipher_givcrypt_request *creq)
1309static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1336{
1310{
1337 struct ablkcipher_request *req = &creq->creq;
1338 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1339 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1340 struct device *qidev = ctx->qidev;
1341 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1342 GFP_KERNEL : GFP_ATOMIC;
1343 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1344 struct ablkcipher_edesc *edesc;
1345 dma_addr_t iv_dma;
1346 u8 *iv;
1347 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1348 struct qm_sg_entry *sg_table, *fd_sgt;
1349 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1350 struct caam_drv_ctx *drv_ctx;
1351
1352 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1353 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1354 return (struct ablkcipher_edesc *)drv_ctx;
1355
1356 src_nents = sg_nents_for_len(req->src, req->nbytes);
1357 if (unlikely(src_nents < 0)) {
1358 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1359 req->nbytes);
1360 return ERR_PTR(src_nents);
1361 }
1362
1363 if (unlikely(req->src != req->dst)) {
1364 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1365 if (unlikely(dst_nents < 0)) {
1366 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1367 req->nbytes);
1368 return ERR_PTR(dst_nents);
1369 }
1370
1371 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1372 DMA_TO_DEVICE);
1373 if (unlikely(!mapped_src_nents)) {
1374 dev_err(qidev, "unable to map source\n");
1375 return ERR_PTR(-ENOMEM);
1376 }
1377
1378 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1379 DMA_FROM_DEVICE);
1380 if (unlikely(!mapped_dst_nents)) {
1381 dev_err(qidev, "unable to map destination\n");
1382 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1383 return ERR_PTR(-ENOMEM);
1384 }
1385 } else {
1386 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1387 DMA_BIDIRECTIONAL);
1388 if (unlikely(!mapped_src_nents)) {
1389 dev_err(qidev, "unable to map source\n");
1390 return ERR_PTR(-ENOMEM);
1391 }
1392
1393 dst_nents = src_nents;
1394 mapped_dst_nents = src_nents;
1395 }
1396
1397 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1398 dst_sg_idx = qm_sg_ents;
1399
1400 qm_sg_ents += 1 + mapped_dst_nents;
1401 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1402 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1403 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1404 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1405 qm_sg_ents, ivsize);
1406 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1407 0, 0, 0, 0);
1408 return ERR_PTR(-ENOMEM);
1409 }
1410
1411 /* allocate space for base edesc, link tables and IV */
1412 edesc = qi_cache_alloc(GFP_DMA | flags);
1413 if (!edesc) {
1414 dev_err(qidev, "could not allocate extended descriptor\n");
1415 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1416 0, 0, 0, 0);
1417 return ERR_PTR(-ENOMEM);
1418 }
1419
1420 /* Make sure IV is located in a DMAable area */
1421 sg_table = &edesc->sgt[0];
1422 iv = (u8 *)(sg_table + qm_sg_ents);
1423 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1424 if (dma_mapping_error(qidev, iv_dma)) {
1425 dev_err(qidev, "unable to map IV\n");
1426 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1427 0, 0, 0, 0);
1428 qi_cache_free(edesc);
1429 return ERR_PTR(-ENOMEM);
1430 }
1431
1432 edesc->src_nents = src_nents;
1433 edesc->dst_nents = dst_nents;
1434 edesc->iv_dma = iv_dma;
1435 edesc->qm_sg_bytes = qm_sg_bytes;
1436 edesc->drv_req.app_ctx = req;
1437 edesc->drv_req.cbk = ablkcipher_done;
1438 edesc->drv_req.drv_ctx = drv_ctx;
1439
1440 if (mapped_src_nents > 1)
1441 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1442
1443 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1444 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1445 0);
1446
1447 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1448 DMA_TO_DEVICE);
1449 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1450 dev_err(qidev, "unable to map S/G table\n");
1451 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1452 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1453 qi_cache_free(edesc);
1454 return ERR_PTR(-ENOMEM);
1455 }
1456
1457 fd_sgt = &edesc->drv_req.fd_sgt[0];
1458
1459 if (mapped_src_nents > 1)
1460 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1461 0);
1462 else
1463 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1464 req->nbytes, 0);
1465
1466 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1467 sizeof(*sg_table), ivsize + req->nbytes, 0);
1468
1469 return edesc;
1470}
1471
1472static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1473{
1474 struct ablkcipher_edesc *edesc;
1475 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1476 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1477 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1311 struct skcipher_edesc *edesc;
1312 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1313 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1314 int ivsize = crypto_skcipher_ivsize(skcipher);
1478 int ret;
1479
1480 if (unlikely(caam_congested))
1481 return -EAGAIN;
1482
1483 /* allocate extended descriptor */
1315 int ret;
1316
1317 if (unlikely(caam_congested))
1318 return -EAGAIN;
1319
1320 /* allocate extended descriptor */
1484 edesc = ablkcipher_edesc_alloc(req, encrypt);
1321 edesc = skcipher_edesc_alloc(req, encrypt);
1485 if (IS_ERR(edesc))
1486 return PTR_ERR(edesc);
1487
1488 /*
1322 if (IS_ERR(edesc))
1323 return PTR_ERR(edesc);
1324
1325 /*
1489 * The crypto API expects us to set the IV (req->info) to the last
1326 * The crypto API expects us to set the IV (req->iv) to the last
1490 * ciphertext block.
1491 */
1492 if (!encrypt)
1327 * ciphertext block.
1328 */
1329 if (!encrypt)
1493 scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1330 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
1494 ivsize, ivsize, 0);
1495
1496 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1497 if (!ret) {
1498 ret = -EINPROGRESS;
1499 } else {
1331 ivsize, ivsize, 0);
1332
1333 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1334 if (!ret) {
1335 ret = -EINPROGRESS;
1336 } else {
1500 ablkcipher_unmap(ctx->qidev, edesc, req);
1337 skcipher_unmap(ctx->qidev, edesc, req);
1501 qi_cache_free(edesc);
1502 }
1503
1504 return ret;
1505}
1506
1338 qi_cache_free(edesc);
1339 }
1340
1341 return ret;
1342}
1343
1507static int ablkcipher_encrypt(struct ablkcipher_request *req)
1344static int skcipher_encrypt(struct skcipher_request *req)
1508{
1345{
1509 return ablkcipher_crypt(req, true);
1346 return skcipher_crypt(req, true);
1510}
1511
1347}
1348
1512static int ablkcipher_decrypt(struct ablkcipher_request *req)
1349static int skcipher_decrypt(struct skcipher_request *req)
1513{
1350{
1514 return ablkcipher_crypt(req, false);
1351 return skcipher_crypt(req, false);
1515}
1516
1352}
1353
1517static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1518{
1519 struct ablkcipher_request *req = &creq->creq;
1520 struct ablkcipher_edesc *edesc;
1521 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1522 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1523 int ret;
1524
1525 if (unlikely(caam_congested))
1526 return -EAGAIN;
1527
1528 /* allocate extended descriptor */
1529 edesc = ablkcipher_giv_edesc_alloc(creq);
1530 if (IS_ERR(edesc))
1531 return PTR_ERR(edesc);
1532
1533 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1534 if (!ret) {
1535 ret = -EINPROGRESS;
1536 } else {
1537 ablkcipher_unmap(ctx->qidev, edesc, req);
1538 qi_cache_free(edesc);
1539 }
1540
1541 return ret;
1542}
1543
1544#define template_ablkcipher template_u.ablkcipher
1545struct caam_alg_template {
1546 char name[CRYPTO_MAX_ALG_NAME];
1547 char driver_name[CRYPTO_MAX_ALG_NAME];
1548 unsigned int blocksize;
1549 u32 type;
1550 union {
1551 struct ablkcipher_alg ablkcipher;
1552 } template_u;
1553 u32 class1_alg_type;
1554 u32 class2_alg_type;
1555};
1556
1557static struct caam_alg_template driver_algs[] = {
1558 /* ablkcipher descriptor */
1354static struct caam_skcipher_alg driver_algs[] = {
1559 {
1355 {
1560 .name = "cbc(aes)",
1561 .driver_name = "cbc-aes-caam-qi",
1562 .blocksize = AES_BLOCK_SIZE,
1563 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1564 .template_ablkcipher = {
1565 .setkey = ablkcipher_setkey,
1566 .encrypt = ablkcipher_encrypt,
1567 .decrypt = ablkcipher_decrypt,
1568 .givencrypt = ablkcipher_givencrypt,
1569 .geniv = "<built-in>",
1356 .skcipher = {
1357 .base = {
1358 .cra_name = "cbc(aes)",
1359 .cra_driver_name = "cbc-aes-caam-qi",
1360 .cra_blocksize = AES_BLOCK_SIZE,
1361 },
1362 .setkey = skcipher_setkey,
1363 .encrypt = skcipher_encrypt,
1364 .decrypt = skcipher_decrypt,
1570 .min_keysize = AES_MIN_KEY_SIZE,
1571 .max_keysize = AES_MAX_KEY_SIZE,
1572 .ivsize = AES_BLOCK_SIZE,
1573 },
1365 .min_keysize = AES_MIN_KEY_SIZE,
1366 .max_keysize = AES_MAX_KEY_SIZE,
1367 .ivsize = AES_BLOCK_SIZE,
1368 },
1574 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1369 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1575 },
1576 {
1370 },
1371 {
1577 .name = "cbc(des3_ede)",
1578 .driver_name = "cbc-3des-caam-qi",
1579 .blocksize = DES3_EDE_BLOCK_SIZE,
1580 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1581 .template_ablkcipher = {
1582 .setkey = ablkcipher_setkey,
1583 .encrypt = ablkcipher_encrypt,
1584 .decrypt = ablkcipher_decrypt,
1585 .givencrypt = ablkcipher_givencrypt,
1586 .geniv = "<built-in>",
1372 .skcipher = {
1373 .base = {
1374 .cra_name = "cbc(des3_ede)",
1375 .cra_driver_name = "cbc-3des-caam-qi",
1376 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1377 },
1378 .setkey = skcipher_setkey,
1379 .encrypt = skcipher_encrypt,
1380 .decrypt = skcipher_decrypt,
1587 .min_keysize = DES3_EDE_KEY_SIZE,
1588 .max_keysize = DES3_EDE_KEY_SIZE,
1589 .ivsize = DES3_EDE_BLOCK_SIZE,
1590 },
1381 .min_keysize = DES3_EDE_KEY_SIZE,
1382 .max_keysize = DES3_EDE_KEY_SIZE,
1383 .ivsize = DES3_EDE_BLOCK_SIZE,
1384 },
1591 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1385 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1592 },
1593 {
1386 },
1387 {
1594 .name = "cbc(des)",
1595 .driver_name = "cbc-des-caam-qi",
1596 .blocksize = DES_BLOCK_SIZE,
1597 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1598 .template_ablkcipher = {
1599 .setkey = ablkcipher_setkey,
1600 .encrypt = ablkcipher_encrypt,
1601 .decrypt = ablkcipher_decrypt,
1602 .givencrypt = ablkcipher_givencrypt,
1603 .geniv = "<built-in>",
1388 .skcipher = {
1389 .base = {
1390 .cra_name = "cbc(des)",
1391 .cra_driver_name = "cbc-des-caam-qi",
1392 .cra_blocksize = DES_BLOCK_SIZE,
1393 },
1394 .setkey = skcipher_setkey,
1395 .encrypt = skcipher_encrypt,
1396 .decrypt = skcipher_decrypt,
1604 .min_keysize = DES_KEY_SIZE,
1605 .max_keysize = DES_KEY_SIZE,
1606 .ivsize = DES_BLOCK_SIZE,
1607 },
1397 .min_keysize = DES_KEY_SIZE,
1398 .max_keysize = DES_KEY_SIZE,
1399 .ivsize = DES_BLOCK_SIZE,
1400 },
1608 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1401 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1609 },
1610 {
1402 },
1403 {
1611 .name = "ctr(aes)",
1612 .driver_name = "ctr-aes-caam-qi",
1613 .blocksize = 1,
1614 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1615 .template_ablkcipher = {
1616 .setkey = ablkcipher_setkey,
1617 .encrypt = ablkcipher_encrypt,
1618 .decrypt = ablkcipher_decrypt,
1619 .geniv = "chainiv",
1404 .skcipher = {
1405 .base = {
1406 .cra_name = "ctr(aes)",
1407 .cra_driver_name = "ctr-aes-caam-qi",
1408 .cra_blocksize = 1,
1409 },
1410 .setkey = skcipher_setkey,
1411 .encrypt = skcipher_encrypt,
1412 .decrypt = skcipher_decrypt,
1620 .min_keysize = AES_MIN_KEY_SIZE,
1621 .max_keysize = AES_MAX_KEY_SIZE,
1622 .ivsize = AES_BLOCK_SIZE,
1413 .min_keysize = AES_MIN_KEY_SIZE,
1414 .max_keysize = AES_MAX_KEY_SIZE,
1415 .ivsize = AES_BLOCK_SIZE,
1416 .chunksize = AES_BLOCK_SIZE,
1623 },
1417 },
1624 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1418 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1419 OP_ALG_AAI_CTR_MOD128,
1625 },
1626 {
1420 },
1421 {
1627 .name = "rfc3686(ctr(aes))",
1628 .driver_name = "rfc3686-ctr-aes-caam-qi",
1629 .blocksize = 1,
1630 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1631 .template_ablkcipher = {
1632 .setkey = ablkcipher_setkey,
1633 .encrypt = ablkcipher_encrypt,
1634 .decrypt = ablkcipher_decrypt,
1635 .givencrypt = ablkcipher_givencrypt,
1636 .geniv = "<built-in>",
1422 .skcipher = {
1423 .base = {
1424 .cra_name = "rfc3686(ctr(aes))",
1425 .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
1426 .cra_blocksize = 1,
1427 },
1428 .setkey = skcipher_setkey,
1429 .encrypt = skcipher_encrypt,
1430 .decrypt = skcipher_decrypt,
1637 .min_keysize = AES_MIN_KEY_SIZE +
1638 CTR_RFC3686_NONCE_SIZE,
1639 .max_keysize = AES_MAX_KEY_SIZE +
1640 CTR_RFC3686_NONCE_SIZE,
1641 .ivsize = CTR_RFC3686_IV_SIZE,
1431 .min_keysize = AES_MIN_KEY_SIZE +
1432 CTR_RFC3686_NONCE_SIZE,
1433 .max_keysize = AES_MAX_KEY_SIZE +
1434 CTR_RFC3686_NONCE_SIZE,
1435 .ivsize = CTR_RFC3686_IV_SIZE,
1436 .chunksize = AES_BLOCK_SIZE,
1642 },
1437 },
1643 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1438 .caam = {
1439 .class1_alg_type = OP_ALG_ALGSEL_AES |
1440 OP_ALG_AAI_CTR_MOD128,
1441 .rfc3686 = true,
1442 },
1644 },
1645 {
1443 },
1444 {
1646 .name = "xts(aes)",
1647 .driver_name = "xts-aes-caam-qi",
1648 .blocksize = AES_BLOCK_SIZE,
1649 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1650 .template_ablkcipher = {
1651 .setkey = xts_ablkcipher_setkey,
1652 .encrypt = ablkcipher_encrypt,
1653 .decrypt = ablkcipher_decrypt,
1654 .geniv = "eseqiv",
1445 .skcipher = {
1446 .base = {
1447 .cra_name = "xts(aes)",
1448 .cra_driver_name = "xts-aes-caam-qi",
1449 .cra_blocksize = AES_BLOCK_SIZE,
1450 },
1451 .setkey = xts_skcipher_setkey,
1452 .encrypt = skcipher_encrypt,
1453 .decrypt = skcipher_decrypt,
1655 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1656 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1657 .ivsize = AES_BLOCK_SIZE,
1658 },
1454 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1455 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1456 .ivsize = AES_BLOCK_SIZE,
1457 },
1659 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1458 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1660 },
1661};
1662
1663static struct caam_aead_alg driver_aeads[] = {
1664 {
1665 .aead = {
1666 .base = {
1667 .cra_name = "rfc4106(gcm(aes))",

--- 855 unchanged lines hidden (view full) ---

2523 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2524 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2525 OP_ALG_AAI_HMAC_PRECOMP,
2526 .geniv = true,
2527 }
2528 },
2529};
2530
1459 },
1460};
1461
1462static struct caam_aead_alg driver_aeads[] = {
1463 {
1464 .aead = {
1465 .base = {
1466 .cra_name = "rfc4106(gcm(aes))",

--- 855 unchanged lines hidden (view full) ---

2322 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2323 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2324 OP_ALG_AAI_HMAC_PRECOMP,
2325 .geniv = true,
2326 }
2327 },
2328};
2329
2531struct caam_crypto_alg {
2532 struct list_head entry;
2533 struct crypto_alg crypto_alg;
2534 struct caam_alg_entry caam;
2535};
2536
2537static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2538 bool uses_dkp)
2539{
2540 struct caam_drv_private *priv;
2541
2542 /*
2543 * distribute tfms across job rings to ensure in-order
2544 * crypto request processing per tfm

--- 22 unchanged lines hidden (view full) ---

2567 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2568 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2569
2570 ctx->qidev = priv->qidev;
2571
2572 spin_lock_init(&ctx->lock);
2573 ctx->drv_ctx[ENCRYPT] = NULL;
2574 ctx->drv_ctx[DECRYPT] = NULL;
2330static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2331 bool uses_dkp)
2332{
2333 struct caam_drv_private *priv;
2334
2335 /*
2336 * distribute tfms across job rings to ensure in-order
2337 * crypto request processing per tfm

--- 22 unchanged lines hidden (view full) ---

2360 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2361 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2362
2363 ctx->qidev = priv->qidev;
2364
2365 spin_lock_init(&ctx->lock);
2366 ctx->drv_ctx[ENCRYPT] = NULL;
2367 ctx->drv_ctx[DECRYPT] = NULL;
2575 ctx->drv_ctx[GIVENCRYPT] = NULL;
2576
2577 return 0;
2578}
2579
2368
2369 return 0;
2370}
2371
2580static int caam_cra_init(struct crypto_tfm *tfm)
2372static int caam_cra_init(struct crypto_skcipher *tfm)
2581{
2373{
2582 struct crypto_alg *alg = tfm->__crt_alg;
2583 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2584 crypto_alg);
2585 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2374 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
2375 struct caam_skcipher_alg *caam_alg =
2376 container_of(alg, typeof(*caam_alg), skcipher);
2586
2377
2587 return caam_init_common(ctx, &caam_alg->caam, false);
2378 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
2379 false);
2588}
2589
2590static int caam_aead_init(struct crypto_aead *tfm)
2591{
2592 struct aead_alg *alg = crypto_aead_alg(tfm);
2593 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2594 aead);
2595 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2596
2597 return caam_init_common(ctx, &caam_alg->caam,
2598 alg->setkey == aead_setkey);
2599}
2600
2601static void caam_exit_common(struct caam_ctx *ctx)
2602{
2603 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2604 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2380}
2381
2382static int caam_aead_init(struct crypto_aead *tfm)
2383{
2384 struct aead_alg *alg = crypto_aead_alg(tfm);
2385 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2386 aead);
2387 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2388
2389 return caam_init_common(ctx, &caam_alg->caam,
2390 alg->setkey == aead_setkey);
2391}
2392
2393static void caam_exit_common(struct caam_ctx *ctx)
2394{
2395 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2396 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2605 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2606
2607 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2608
2609 caam_jr_free(ctx->jrdev);
2610}
2611
2397
2398 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2399
2400 caam_jr_free(ctx->jrdev);
2401}
2402
2612static void caam_cra_exit(struct crypto_tfm *tfm)
2403static void caam_cra_exit(struct crypto_skcipher *tfm)
2613{
2404{
2614 caam_exit_common(crypto_tfm_ctx(tfm));
2405 caam_exit_common(crypto_skcipher_ctx(tfm));
2615}
2616
2617static void caam_aead_exit(struct crypto_aead *tfm)
2618{
2619 caam_exit_common(crypto_aead_ctx(tfm));
2620}
2621
2406}
2407
2408static void caam_aead_exit(struct crypto_aead *tfm)
2409{
2410 caam_exit_common(crypto_aead_ctx(tfm));
2411}
2412
2622static struct list_head alg_list;
2623static void __exit caam_qi_algapi_exit(void)
2624{
2413static void __exit caam_qi_algapi_exit(void)
2414{
2625 struct caam_crypto_alg *t_alg, *n;
2626 int i;
2627
2628 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2629 struct caam_aead_alg *t_alg = driver_aeads + i;
2630
2631 if (t_alg->registered)
2632 crypto_unregister_aead(&t_alg->aead);
2633 }
2634
2415 int i;
2416
2417 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2418 struct caam_aead_alg *t_alg = driver_aeads + i;
2419
2420 if (t_alg->registered)
2421 crypto_unregister_aead(&t_alg->aead);
2422 }
2423
2635 if (!alg_list.next)
2636 return;
2424 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2425 struct caam_skcipher_alg *t_alg = driver_algs + i;
2637
2426
2638 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2639 crypto_unregister_alg(&t_alg->crypto_alg);
2640 list_del(&t_alg->entry);
2641 kfree(t_alg);
2427 if (t_alg->registered)
2428 crypto_unregister_skcipher(&t_alg->skcipher);
2642 }
2643}
2644
2429 }
2430}
2431
2645static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2646 *template)
2432static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2647{
2433{
2648 struct caam_crypto_alg *t_alg;
2649 struct crypto_alg *alg;
2434 struct skcipher_alg *alg = &t_alg->skcipher;
2650
2435
2651 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2652 if (!t_alg)
2653 return ERR_PTR(-ENOMEM);
2436 alg->base.cra_module = THIS_MODULE;
2437 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2438 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2439 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2654
2440
2655 alg = &t_alg->crypto_alg;
2656
2657 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2658 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2659 template->driver_name);
2660 alg->cra_module = THIS_MODULE;
2661 alg->cra_init = caam_cra_init;
2662 alg->cra_exit = caam_cra_exit;
2663 alg->cra_priority = CAAM_CRA_PRIORITY;
2664 alg->cra_blocksize = template->blocksize;
2665 alg->cra_alignmask = 0;
2666 alg->cra_ctxsize = sizeof(struct caam_ctx);
2667 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2668 template->type;
2669 switch (template->type) {
2670 case CRYPTO_ALG_TYPE_GIVCIPHER:
2671 alg->cra_type = &crypto_givcipher_type;
2672 alg->cra_ablkcipher = template->template_ablkcipher;
2673 break;
2674 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2675 alg->cra_type = &crypto_ablkcipher_type;
2676 alg->cra_ablkcipher = template->template_ablkcipher;
2677 break;
2678 }
2679
2680 t_alg->caam.class1_alg_type = template->class1_alg_type;
2681 t_alg->caam.class2_alg_type = template->class2_alg_type;
2682
2683 return t_alg;
2441 alg->init = caam_cra_init;
2442 alg->exit = caam_cra_exit;
2684}
2685
2686static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2687{
2688 struct aead_alg *alg = &t_alg->aead;
2689
2690 alg->base.cra_module = THIS_MODULE;
2691 alg->base.cra_priority = CAAM_CRA_PRIORITY;

--- 37 unchanged lines hidden (view full) ---

2729 if (!priv || !priv->qi_present)
2730 return -ENODEV;
2731
2732 if (caam_dpaa2) {
2733 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2734 return -ENODEV;
2735 }
2736
2443}
2444
2445static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2446{
2447 struct aead_alg *alg = &t_alg->aead;
2448
2449 alg->base.cra_module = THIS_MODULE;
2450 alg->base.cra_priority = CAAM_CRA_PRIORITY;

--- 37 unchanged lines hidden (view full) ---

2488 if (!priv || !priv->qi_present)
2489 return -ENODEV;
2490
2491 if (caam_dpaa2) {
2492 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2493 return -ENODEV;
2494 }
2495
2737 INIT_LIST_HEAD(&alg_list);
2738
2739 /*
2740 * Register crypto algorithms the device supports.
2741 * First, detect presence and attributes of DES, AES, and MD blocks.
2742 */
2743 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2744 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2745 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2746 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2747 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2748
2749 /* If MD is present, limit digest size based on LP256 */
2750 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2751 md_limit = SHA256_DIGEST_SIZE;
2752
2753 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2496 /*
2497 * Register crypto algorithms the device supports.
2498 * First, detect presence and attributes of DES, AES, and MD blocks.
2499 */
2500 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2501 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2502 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2503 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2504 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2505
2506 /* If MD is present, limit digest size based on LP256 */
2507 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2508 md_limit = SHA256_DIGEST_SIZE;
2509
2510 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2754 struct caam_crypto_alg *t_alg;
2755 struct caam_alg_template *alg = driver_algs + i;
2756 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2511 struct caam_skcipher_alg *t_alg = driver_algs + i;
2512 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
2757
2758 /* Skip DES algorithms if not supported by device */
2759 if (!des_inst &&
2760 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2761 (alg_sel == OP_ALG_ALGSEL_DES)))
2762 continue;
2763
2764 /* Skip AES algorithms if not supported by device */
2765 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2766 continue;
2767
2513
2514 /* Skip DES algorithms if not supported by device */
2515 if (!des_inst &&
2516 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2517 (alg_sel == OP_ALG_ALGSEL_DES)))
2518 continue;
2519
2520 /* Skip AES algorithms if not supported by device */
2521 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2522 continue;
2523
2768 t_alg = caam_alg_alloc(alg);
2769 if (IS_ERR(t_alg)) {
2770 err = PTR_ERR(t_alg);
2771 dev_warn(priv->qidev, "%s alg allocation failed\n",
2772 alg->driver_name);
2773 continue;
2774 }
2524 caam_skcipher_alg_init(t_alg);
2775
2525
2776 err = crypto_register_alg(&t_alg->crypto_alg);
2526 err = crypto_register_skcipher(&t_alg->skcipher);
2777 if (err) {
2778 dev_warn(priv->qidev, "%s alg registration failed\n",
2527 if (err) {
2528 dev_warn(priv->qidev, "%s alg registration failed\n",
2779 t_alg->crypto_alg.cra_driver_name);
2780 kfree(t_alg);
2529 t_alg->skcipher.base.cra_driver_name);
2781 continue;
2782 }
2783
2530 continue;
2531 }
2532
2784 list_add_tail(&t_alg->entry, &alg_list);
2533 t_alg->registered = true;
2785 registered = true;
2786 }
2787
2788 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2789 struct caam_aead_alg *t_alg = driver_aeads + i;
2790 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2791 OP_ALG_ALGSEL_MASK;
2792 u32 c2_alg_sel = t_alg->caam.class2_alg_type &

--- 54 unchanged lines hidden ---
2534 registered = true;
2535 }
2536
2537 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2538 struct caam_aead_alg *t_alg = driver_aeads + i;
2539 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2540 OP_ALG_ALGSEL_MASK;
2541 u32 c2_alg_sel = t_alg->caam.class2_alg_type &

--- 54 unchanged lines hidden ---