caamalg.c (90fb814b6c025da45f71db1703cffe3fe87f575c) | caamalg.c (618b5dc48365cecc03daffa46800f20ab11e3f80) |
---|---|
1// SPDX-License-Identifier: GPL-2.0+ |
|
1/* 2 * caam - Freescale FSL CAAM support for crypto API 3 * 4 * Copyright 2008-2011 Freescale Semiconductor, Inc. | 2/* 3 * caam - Freescale FSL CAAM support for crypto API 4 * 5 * Copyright 2008-2011 Freescale Semiconductor, Inc. |
5 * Copyright 2016 NXP | 6 * Copyright 2016-2018 NXP |
6 * 7 * Based on talitos crypto API driver. 8 * 9 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (PDB) | --- 62 unchanged lines hidden (view full) --- 76 77#ifdef DEBUG 78/* for print_hex_dumps with line references */ 79#define debug(format, arg...) printk(format, arg) 80#else 81#define debug(format, arg...) 82#endif 83 | 7 * 8 * Based on talitos crypto API driver. 9 * 10 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): 11 * 12 * --------------- --------------- 13 * | JobDesc #1 |-------------------->| ShareDesc | 14 * | *(packet 1) | | (PDB) | --- 62 unchanged lines hidden (view full) --- 77 78#ifdef DEBUG 79/* for print_hex_dumps with line references */ 80#define debug(format, arg...) printk(format, arg) 81#else 82#define debug(format, arg...) 83#endif 84 |
84static struct list_head alg_list; 85 | |
86struct caam_alg_entry { 87 int class1_alg_type; 88 int class2_alg_type; 89 bool rfc3686; 90 bool geniv; 91}; 92 93struct caam_aead_alg { 94 struct aead_alg aead; 95 struct caam_alg_entry caam; 96 bool registered; 97}; 98 | 85struct caam_alg_entry { 86 int class1_alg_type; 87 int class2_alg_type; 88 bool rfc3686; 89 bool geniv; 90}; 91 92struct caam_aead_alg { 93 struct aead_alg aead; 94 struct caam_alg_entry caam; 95 bool registered; 96}; 97 |
98struct caam_skcipher_alg { 99 struct skcipher_alg skcipher; 100 struct caam_alg_entry caam; 101 bool registered; 102}; 103 |
|
99/* 100 * per-session context 101 */ 102struct caam_ctx { 103 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 104 u32 sh_desc_dec[DESC_MAX_USED_LEN]; | 104/* 105 * per-session context 106 */ 107struct caam_ctx { 108 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 109 u32 sh_desc_dec[DESC_MAX_USED_LEN]; |
105 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; | |
106 u8 key[CAAM_MAX_KEY_SIZE]; 107 dma_addr_t sh_desc_enc_dma; 108 dma_addr_t sh_desc_dec_dma; | 110 u8 key[CAAM_MAX_KEY_SIZE]; 111 dma_addr_t sh_desc_enc_dma; 112 dma_addr_t sh_desc_dec_dma; |
109 dma_addr_t sh_desc_givenc_dma; | |
110 dma_addr_t key_dma; 111 enum dma_data_direction dir; 112 struct device *jrdev; 113 struct alginfo adata; 114 struct alginfo cdata; 115 unsigned int authsize; 116}; 117 --- 525 unchanged lines hidden (view full) --- 643 * in the nonce. Update the AES key length. 644 */ 645 ctx->cdata.keylen = keylen - 4; 646 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 647 ctx->dir); 648 return rfc4543_set_sh_desc(aead); 649} 650 | 113 dma_addr_t key_dma; 114 enum dma_data_direction dir; 115 struct device *jrdev; 116 struct alginfo adata; 117 struct alginfo cdata; 118 unsigned int authsize; 119}; 120 --- 525 unchanged lines hidden (view full) --- 646 * in the nonce. Update the AES key length. 647 */ 648 ctx->cdata.keylen = keylen - 4; 649 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 650 ctx->dir); 651 return rfc4543_set_sh_desc(aead); 652} 653 |
651static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 652 const u8 *key, unsigned int keylen) | 654static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 655 unsigned int keylen) |
653{ | 656{ |
654 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 655 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); 656 const char *alg_name = crypto_tfm_alg_name(tfm); | 657 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 658 struct caam_skcipher_alg *alg = 659 container_of(crypto_skcipher_alg(skcipher), typeof(*alg), 660 skcipher); |
657 struct device *jrdev = ctx->jrdev; | 661 struct device *jrdev = ctx->jrdev; |
658 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 662 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); |
659 u32 *desc; 660 u32 ctx1_iv_off = 0; 661 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 662 OP_ALG_AAI_CTR_MOD128); | 663 u32 *desc; 664 u32 ctx1_iv_off = 0; 665 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 666 OP_ALG_AAI_CTR_MOD128); |
663 const bool is_rfc3686 = (ctr_mode && 664 (strstr(alg_name, "rfc3686") != NULL)); | 667 const bool is_rfc3686 = alg->caam.rfc3686; |
665 666#ifdef DEBUG 667 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 668 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 669#endif 670 /* 671 * AES-CTR needs to load IV in CONTEXT1 reg 672 * at an offset of 128bits (16bytes) --- 11 unchanged lines hidden (view full) --- 684 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 685 keylen -= CTR_RFC3686_NONCE_SIZE; 686 } 687 688 ctx->cdata.keylen = keylen; 689 ctx->cdata.key_virt = key; 690 ctx->cdata.key_inline = true; 691 | 668 669#ifdef DEBUG 670 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 671 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 672#endif 673 /* 674 * AES-CTR needs to load IV in CONTEXT1 reg 675 * at an offset of 128bits (16bytes) --- 11 unchanged lines hidden (view full) --- 687 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 688 keylen -= CTR_RFC3686_NONCE_SIZE; 689 } 690 691 ctx->cdata.keylen = keylen; 692 ctx->cdata.key_virt = key; 693 ctx->cdata.key_inline = true; 694 |
692 /* ablkcipher_encrypt shared descriptor */ | 695 /* skcipher_encrypt shared descriptor */ |
693 desc = ctx->sh_desc_enc; | 696 desc = ctx->sh_desc_enc; |
694 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, 695 ctx1_iv_off); | 697 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, 698 ctx1_iv_off); |
696 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 697 desc_bytes(desc), ctx->dir); 698 | 699 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 700 desc_bytes(desc), ctx->dir); 701 |
699 /* ablkcipher_decrypt shared descriptor */ | 702 /* skcipher_decrypt shared descriptor */ |
700 desc = ctx->sh_desc_dec; | 703 desc = ctx->sh_desc_dec; |
701 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, 702 ctx1_iv_off); | 704 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, 705 ctx1_iv_off); |
703 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 704 desc_bytes(desc), ctx->dir); 705 | 706 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 707 desc_bytes(desc), ctx->dir); 708 |
706 /* ablkcipher_givencrypt shared descriptor */ 707 desc = ctx->sh_desc_givenc; 708 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, 709 ctx1_iv_off); 710 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, 711 desc_bytes(desc), ctx->dir); 712 | |
713 return 0; 714} 715 | 709 return 0; 710} 711 |
716static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 717 const u8 *key, unsigned int keylen) | 712static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 713 unsigned int keylen) |
718{ | 714{ |
719 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 715 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
720 struct device *jrdev = ctx->jrdev; 721 u32 *desc; 722 723 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { | 716 struct device *jrdev = ctx->jrdev; 717 u32 *desc; 718 719 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
724 crypto_ablkcipher_set_flags(ablkcipher, 725 CRYPTO_TFM_RES_BAD_KEY_LEN); | 720 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
726 dev_err(jrdev, "key size mismatch\n"); 727 return -EINVAL; 728 } 729 730 ctx->cdata.keylen = keylen; 731 ctx->cdata.key_virt = key; 732 ctx->cdata.key_inline = true; 733 | 721 dev_err(jrdev, "key size mismatch\n"); 722 return -EINVAL; 723 } 724 725 ctx->cdata.keylen = keylen; 726 ctx->cdata.key_virt = key; 727 ctx->cdata.key_inline = true; 728 |
734 /* xts_ablkcipher_encrypt shared descriptor */ | 729 /* xts_skcipher_encrypt shared descriptor */ |
735 desc = ctx->sh_desc_enc; | 730 desc = ctx->sh_desc_enc; |
736 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); | 731 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata); |
737 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 738 desc_bytes(desc), ctx->dir); 739 | 732 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 733 desc_bytes(desc), ctx->dir); 734 |
740 /* xts_ablkcipher_decrypt shared descriptor */ | 735 /* xts_skcipher_decrypt shared descriptor */ |
741 desc = ctx->sh_desc_dec; | 736 desc = ctx->sh_desc_dec; |
742 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); | 737 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata); |
743 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 744 desc_bytes(desc), ctx->dir); 745 746 return 0; 747} 748 749/* 750 * aead_edesc - s/w-extended aead descriptor --- 9 unchanged lines hidden (view full) --- 760 int dst_nents; 761 int sec4_sg_bytes; 762 dma_addr_t sec4_sg_dma; 763 struct sec4_sg_entry *sec4_sg; 764 u32 hw_desc[]; 765}; 766 767/* | 738 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 739 desc_bytes(desc), ctx->dir); 740 741 return 0; 742} 743 744/* 745 * aead_edesc - s/w-extended aead descriptor --- 9 unchanged lines hidden (view full) --- 755 int dst_nents; 756 int sec4_sg_bytes; 757 dma_addr_t sec4_sg_dma; 758 struct sec4_sg_entry *sec4_sg; 759 u32 hw_desc[]; 760}; 761 762/* |
768 * ablkcipher_edesc - s/w-extended ablkcipher descriptor | 763 * skcipher_edesc - s/w-extended skcipher descriptor |
769 * @src_nents: number of segments in input s/w scatterlist 770 * @dst_nents: number of segments in output s/w scatterlist 771 * @iv_dma: dma address of iv for checking continuity and link table | 764 * @src_nents: number of segments in input s/w scatterlist 765 * @dst_nents: number of segments in output s/w scatterlist 766 * @iv_dma: dma address of iv for checking continuity and link table |
772 * @iv_dir: DMA mapping direction for IV | |
773 * @sec4_sg_bytes: length of dma mapped sec4_sg space 774 * @sec4_sg_dma: bus physical mapped address of h/w link table 775 * @sec4_sg: pointer to h/w link table 776 * @hw_desc: the h/w job descriptor followed by any referenced link tables 777 * and IV 778 */ | 767 * @sec4_sg_bytes: length of dma mapped sec4_sg space 768 * @sec4_sg_dma: bus physical mapped address of h/w link table 769 * @sec4_sg: pointer to h/w link table 770 * @hw_desc: the h/w job descriptor followed by any referenced link tables 771 * and IV 772 */ |
779struct ablkcipher_edesc { | 773struct skcipher_edesc { |
780 int src_nents; 781 int dst_nents; 782 dma_addr_t iv_dma; | 774 int src_nents; 775 int dst_nents; 776 dma_addr_t iv_dma; |
783 enum dma_data_direction iv_dir; | |
784 int sec4_sg_bytes; 785 dma_addr_t sec4_sg_dma; 786 struct sec4_sg_entry *sec4_sg; 787 u32 hw_desc[0]; 788}; 789 790static void caam_unmap(struct device *dev, struct scatterlist *src, 791 struct scatterlist *dst, int src_nents, 792 int dst_nents, | 777 int sec4_sg_bytes; 778 dma_addr_t sec4_sg_dma; 779 struct sec4_sg_entry *sec4_sg; 780 u32 hw_desc[0]; 781}; 782 783static void caam_unmap(struct device *dev, struct scatterlist *src, 784 struct scatterlist *dst, int src_nents, 785 int dst_nents, |
793 dma_addr_t iv_dma, int ivsize, 794 enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma, | 786 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, |
795 int sec4_sg_bytes) 796{ 797 if (dst != src) { 798 if (src_nents) 799 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 800 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 801 } else { 802 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 803 } 804 805 if (iv_dma) | 787 int sec4_sg_bytes) 788{ 789 if (dst != src) { 790 if (src_nents) 791 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 792 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 793 } else { 794 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 795 } 796 797 if (iv_dma) |
806 dma_unmap_single(dev, iv_dma, ivsize, iv_dir); | 798 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
807 if (sec4_sg_bytes) 808 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 809 DMA_TO_DEVICE); 810} 811 812static void aead_unmap(struct device *dev, 813 struct aead_edesc *edesc, 814 struct aead_request *req) 815{ 816 caam_unmap(dev, req->src, req->dst, | 799 if (sec4_sg_bytes) 800 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 801 DMA_TO_DEVICE); 802} 803 804static void aead_unmap(struct device *dev, 805 struct aead_edesc *edesc, 806 struct aead_request *req) 807{ 808 caam_unmap(dev, req->src, req->dst, |
817 edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE, | 809 edesc->src_nents, edesc->dst_nents, 0, 0, |
818 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 819} 820 | 810 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 811} 812 |
821static void ablkcipher_unmap(struct device *dev, 822 struct ablkcipher_edesc *edesc, 823 struct ablkcipher_request *req) | 813static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, 814 struct skcipher_request *req) |
824{ | 815{ |
825 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 826 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 816 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 817 int ivsize = crypto_skcipher_ivsize(skcipher); |
827 828 caam_unmap(dev, req->src, req->dst, 829 edesc->src_nents, edesc->dst_nents, | 818 819 caam_unmap(dev, req->src, req->dst, 820 edesc->src_nents, edesc->dst_nents, |
830 edesc->iv_dma, ivsize, edesc->iv_dir, | 821 edesc->iv_dma, ivsize, |
831 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 832} 833 834static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 835 void *context) 836{ 837 struct aead_request *req = context; 838 struct aead_edesc *edesc; --- 37 unchanged lines hidden (view full) --- 876 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 877 err = -EBADMSG; 878 879 kfree(edesc); 880 881 aead_request_complete(req, err); 882} 883 | 822 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 823} 824 825static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 826 void *context) 827{ 828 struct aead_request *req = context; 829 struct aead_edesc *edesc; --- 37 unchanged lines hidden (view full) --- 867 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 868 err = -EBADMSG; 869 870 kfree(edesc); 871 872 aead_request_complete(req, err); 873} 874 |
884static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 885 void *context) | 875static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 876 void *context) |
886{ | 877{ |
887 struct ablkcipher_request *req = context; 888 struct ablkcipher_edesc *edesc; 889 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 890 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 878 struct skcipher_request *req = context; 879 struct skcipher_edesc *edesc; 880 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 881 int ivsize = crypto_skcipher_ivsize(skcipher); |
891 892#ifdef DEBUG 893 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 894#endif 895 | 882 883#ifdef DEBUG 884 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 885#endif 886 |
896 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); | 887 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); |
897 898 if (err) 899 caam_jr_strstatus(jrdev, err); 900 901#ifdef DEBUG 902 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | 888 889 if (err) 890 caam_jr_strstatus(jrdev, err); 891 892#ifdef DEBUG 893 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", |
903 DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 894 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
904 edesc->src_nents > 1 ? 100 : ivsize, 1); 905#endif 906 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 907 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 895 edesc->src_nents > 1 ? 100 : ivsize, 1); 896#endif 897 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 898 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
908 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | 899 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
909 | 900 |
910 ablkcipher_unmap(jrdev, edesc, req); | 901 skcipher_unmap(jrdev, edesc, req); |
911 912 /* | 902 903 /* |
913 * The crypto API expects us to set the IV (req->info) to the last | 904 * The crypto API expects us to set the IV (req->iv) to the last |
914 * ciphertext block. This is used e.g. by the CTS mode. 915 */ | 905 * ciphertext block. This is used e.g. by the CTS mode. 906 */ |
916 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, | 907 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, |
917 ivsize, 0); 918 | 908 ivsize, 0); 909 |
919 /* In case initial IV was generated, copy it in GIVCIPHER request */ 920 if (edesc->iv_dir == DMA_FROM_DEVICE) { 921 u8 *iv; 922 struct skcipher_givcrypt_request *greq; 923 924 greq = container_of(req, struct skcipher_givcrypt_request, 925 creq); 926 iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) + 927 edesc->sec4_sg_bytes; 928 memcpy(greq->giv, iv, ivsize); 929 } 930 | |
931 kfree(edesc); 932 | 910 kfree(edesc); 911 |
933 ablkcipher_request_complete(req, err); | 912 skcipher_request_complete(req, err); |
934} 935 | 913} 914 |
936static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 937 void *context) | 915static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 916 void *context) |
938{ | 917{ |
939 struct ablkcipher_request *req = context; 940 struct ablkcipher_edesc *edesc; | 918 struct skcipher_request *req = context; 919 struct skcipher_edesc *edesc; |
941#ifdef DEBUG | 920#ifdef DEBUG |
942 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 943 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 921 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 922 int ivsize = crypto_skcipher_ivsize(skcipher); |
944 945 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 946#endif 947 | 923 924 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 925#endif 926 |
948 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); | 927 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); |
949 if (err) 950 caam_jr_strstatus(jrdev, err); 951 952#ifdef DEBUG 953 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | 928 if (err) 929 caam_jr_strstatus(jrdev, err); 930 931#ifdef DEBUG 932 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", |
954 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 955 ivsize, 1); | 933 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); |
956#endif 957 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 958 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 934#endif 935 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 936 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
959 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | 937 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
960 | 938 |
961 ablkcipher_unmap(jrdev, edesc, req); | 939 skcipher_unmap(jrdev, edesc, req); |
962 kfree(edesc); 963 | 940 kfree(edesc); 941 |
964 ablkcipher_request_complete(req, err); | 942 skcipher_request_complete(req, err); |
965} 966 967/* 968 * Fill in aead job descriptor 969 */ 970static void init_aead_job(struct aead_request *req, 971 struct aead_edesc *edesc, 972 bool all_contig, bool encrypt) --- 125 unchanged lines hidden (view full) --- 1098 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) 1099 append_load_as_imm(desc, req->iv, ivsize, 1100 LDST_CLASS_1_CCB | 1101 LDST_SRCDST_BYTE_CONTEXT | 1102 (ivoffset << LDST_OFFSET_SHIFT)); 1103} 1104 1105/* | 943} 944 945/* 946 * Fill in aead job descriptor 947 */ 948static void init_aead_job(struct aead_request *req, 949 struct aead_edesc *edesc, 950 bool all_contig, bool encrypt) --- 125 unchanged lines hidden (view full) --- 1076 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) 1077 append_load_as_imm(desc, req->iv, ivsize, 1078 LDST_CLASS_1_CCB | 1079 LDST_SRCDST_BYTE_CONTEXT | 1080 (ivoffset << LDST_OFFSET_SHIFT)); 1081} 1082 1083/* |
1106 * Fill in ablkcipher job descriptor | 1084 * Fill in skcipher job descriptor |
1107 */ | 1085 */ |
1108static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, 1109 struct ablkcipher_edesc *edesc, 1110 struct ablkcipher_request *req) | 1086static void init_skcipher_job(struct skcipher_request *req, 1087 struct skcipher_edesc *edesc, 1088 const bool encrypt) |
1111{ | 1089{ |
1112 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1113 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 1090 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1091 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1092 int ivsize = crypto_skcipher_ivsize(skcipher); |
1114 u32 *desc = edesc->hw_desc; | 1093 u32 *desc = edesc->hw_desc; |
1094 u32 *sh_desc; |
|
1115 u32 out_options = 0; | 1095 u32 out_options = 0; |
1116 dma_addr_t dst_dma; | 1096 dma_addr_t dst_dma, ptr; |
1117 int len; 1118 1119#ifdef DEBUG 1120 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", | 1097 int len; 1098 1099#ifdef DEBUG 1100 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", |
1121 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1122 ivsize, 1); 1123 pr_err("asked=%d, nbytes%d\n", 1124 (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes); | 1101 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1102 pr_err("asked=%d, cryptlen%d\n", 1103 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); |
1125#endif 1126 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", 1127 DUMP_PREFIX_ADDRESS, 16, 4, req->src, | 1104#endif 1105 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", 1106 DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
1128 edesc->src_nents > 1 ? 100 : req->nbytes, 1); | 1107 edesc->src_nents > 1 ? 100 : req->cryptlen, 1); |
1129 | 1108 |
1109 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; 1110 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; 1111 |
|
1130 len = desc_len(sh_desc); 1131 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1132 | 1112 len = desc_len(sh_desc); 1113 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1114 |
1133 append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize, | 1115 append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize, |
1134 LDST_SGF); 1135 1136 if (likely(req->src == req->dst)) { 1137 dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); 1138 out_options = LDST_SGF; 1139 } else { 1140 if (edesc->dst_nents == 1) { 1141 dst_dma = sg_dma_address(req->dst); 1142 } else { 1143 dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) * 1144 sizeof(struct sec4_sg_entry); 1145 out_options = LDST_SGF; 1146 } 1147 } | 1116 LDST_SGF); 1117 1118 if (likely(req->src == req->dst)) { 1119 dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); 1120 out_options = LDST_SGF; 1121 } else { 1122 if (edesc->dst_nents == 1) { 1123 dst_dma = sg_dma_address(req->dst); 1124 } else { 1125 dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) * 1126 sizeof(struct sec4_sg_entry); 1127 out_options = LDST_SGF; 1128 } 1129 } |
1148 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); | 1130 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); |
1149} 1150 1151/* | 1131} 1132 1133/* |
1152 * Fill in ablkcipher givencrypt job descriptor 1153 */ 1154static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, 1155 struct ablkcipher_edesc *edesc, 1156 struct ablkcipher_request *req) 1157{ 1158 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1159 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1160 u32 *desc = edesc->hw_desc; 1161 u32 in_options; 1162 dma_addr_t dst_dma, src_dma; 1163 int len, sec4_sg_index = 0; 1164 1165#ifdef DEBUG 1166 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", 1167 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1168 ivsize, 1); 1169#endif 1170 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", 1171 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1172 edesc->src_nents > 1 ? 100 : req->nbytes, 1); 1173 1174 len = desc_len(sh_desc); 1175 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1176 1177 if (edesc->src_nents == 1) { 1178 src_dma = sg_dma_address(req->src); 1179 in_options = 0; 1180 } else { 1181 src_dma = edesc->sec4_sg_dma; 1182 sec4_sg_index += edesc->src_nents; 1183 in_options = LDST_SGF; 1184 } 1185 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); 1186 1187 dst_dma = edesc->sec4_sg_dma + sec4_sg_index * 1188 sizeof(struct sec4_sg_entry); 1189 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF); 1190} 1191 1192/* | |
1193 * allocate and map the aead extended descriptor 1194 */ 1195static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1196 int desc_bytes, bool *all_contig_ptr, 1197 bool encrypt) 1198{ 1199 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1200 struct caam_ctx *ctx = crypto_aead_ctx(aead); --- 69 unchanged lines hidden (view full) --- 1270 sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1271 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1272 1273 /* allocate space for base edesc and hw desc commands, link tables */ 1274 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, 1275 GFP_DMA | flags); 1276 if (!edesc) { 1277 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, | 1134 * allocate and map the aead extended descriptor 1135 */ 1136static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1137 int desc_bytes, bool *all_contig_ptr, 1138 bool encrypt) 1139{ 1140 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1141 struct caam_ctx *ctx = crypto_aead_ctx(aead); --- 69 unchanged lines hidden (view full) --- 1211 sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1212 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1213 1214 /* allocate space for base edesc and hw desc commands, link tables */ 1215 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, 1216 GFP_DMA | flags); 1217 if (!edesc) { 1218 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, |
1278 0, DMA_NONE, 0, 0); | 1219 0, 0, 0); |
1279 return ERR_PTR(-ENOMEM); 1280 } 1281 1282 edesc->src_nents = src_nents; 1283 edesc->dst_nents = dst_nents; 1284 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 1285 desc_bytes; 1286 *all_contig_ptr = !(mapped_src_nents > 1); --- 184 unchanged lines hidden (view full) --- 1471 aead_unmap(jrdev, edesc, req); 1472 kfree(edesc); 1473 } 1474 1475 return ret; 1476} 1477 1478/* | 1220 return ERR_PTR(-ENOMEM); 1221 } 1222 1223 edesc->src_nents = src_nents; 1224 edesc->dst_nents = dst_nents; 1225 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 1226 desc_bytes; 1227 *all_contig_ptr = !(mapped_src_nents > 1); --- 184 unchanged lines hidden (view full) --- 1412 aead_unmap(jrdev, edesc, req); 1413 kfree(edesc); 1414 } 1415 1416 return ret; 1417} 1418 1419/* |
1479 * allocate and map the ablkcipher extended descriptor for ablkcipher | 1420 * allocate and map the skcipher extended descriptor for skcipher |
1480 */ | 1421 */ |
1481static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1482 *req, int desc_bytes) | 1422static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1423 int desc_bytes) |
1483{ | 1424{ |
1484 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1485 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 1425 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1426 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
1486 struct device *jrdev = ctx->jrdev; 1487 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1488 GFP_KERNEL : GFP_ATOMIC; 1489 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 1427 struct device *jrdev = ctx->jrdev; 1428 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1429 GFP_KERNEL : GFP_ATOMIC; 1430 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
1490 struct ablkcipher_edesc *edesc; | 1431 struct skcipher_edesc *edesc; |
1491 dma_addr_t iv_dma; 1492 u8 *iv; | 1432 dma_addr_t iv_dma; 1433 u8 *iv; |
1493 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 1434 int ivsize = crypto_skcipher_ivsize(skcipher); |
1494 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; 1495 | 1435 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; 1436 |
1496 src_nents = sg_nents_for_len(req->src, req->nbytes); | 1437 src_nents = sg_nents_for_len(req->src, req->cryptlen); |
1497 if (unlikely(src_nents < 0)) { 1498 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", | 1438 if (unlikely(src_nents < 0)) { 1439 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", |
1499 req->nbytes); | 1440 req->cryptlen); |
1500 return ERR_PTR(src_nents); 1501 } 1502 1503 if (req->dst != req->src) { | 1441 return ERR_PTR(src_nents); 1442 } 1443 1444 if (req->dst != req->src) { |
1504 dst_nents = sg_nents_for_len(req->dst, req->nbytes); | 1445 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); |
1505 if (unlikely(dst_nents < 0)) { 1506 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", | 1446 if (unlikely(dst_nents < 0)) { 1447 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", |
1507 req->nbytes); | 1448 req->cryptlen); |
1508 return ERR_PTR(dst_nents); 1509 } 1510 } 1511 1512 if (likely(req->src == req->dst)) { 1513 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1514 DMA_BIDIRECTIONAL); 1515 if (unlikely(!mapped_src_nents)) { --- 25 unchanged lines hidden (view full) --- 1541 /* 1542 * allocate space for base edesc and hw desc commands, link tables, IV 1543 */ 1544 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, 1545 GFP_DMA | flags); 1546 if (!edesc) { 1547 dev_err(jrdev, "could not allocate extended descriptor\n"); 1548 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, | 1449 return ERR_PTR(dst_nents); 1450 } 1451 } 1452 1453 if (likely(req->src == req->dst)) { 1454 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1455 DMA_BIDIRECTIONAL); 1456 if (unlikely(!mapped_src_nents)) { --- 25 unchanged lines hidden (view full) --- 1482 /* 1483 * allocate space for base edesc and hw desc commands, link tables, IV 1484 */ 1485 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, 1486 GFP_DMA | flags); 1487 if (!edesc) { 1488 dev_err(jrdev, "could not allocate extended descriptor\n"); 1489 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, |
1549 0, DMA_NONE, 0, 0); | 1490 0, 0, 0); |
1550 return ERR_PTR(-ENOMEM); 1551 } 1552 1553 edesc->src_nents = src_nents; 1554 edesc->dst_nents = dst_nents; 1555 edesc->sec4_sg_bytes = sec4_sg_bytes; 1556 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + 1557 desc_bytes); | 1491 return ERR_PTR(-ENOMEM); 1492 } 1493 1494 edesc->src_nents = src_nents; 1495 edesc->dst_nents = dst_nents; 1496 edesc->sec4_sg_bytes = sec4_sg_bytes; 1497 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + 1498 desc_bytes); |
1558 edesc->iv_dir = DMA_TO_DEVICE; | |
1559 1560 /* Make sure IV is located in a DMAable area */ 1561 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; | 1499 1500 /* Make sure IV is located in a DMAable area */ 1501 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; |
1562 memcpy(iv, req->info, ivsize); | 1502 memcpy(iv, req->iv, ivsize); |
1563 1564 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); 1565 if (dma_mapping_error(jrdev, iv_dma)) { 1566 dev_err(jrdev, "unable to map IV\n"); 1567 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, | 1503 1504 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); 1505 if (dma_mapping_error(jrdev, iv_dma)) { 1506 dev_err(jrdev, "unable to map IV\n"); 1507 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, |
1568 0, DMA_NONE, 0, 0); | 1508 0, 0, 0); |
1569 kfree(edesc); 1570 return ERR_PTR(-ENOMEM); 1571 } 1572 1573 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1574 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); 1575 1576 if (mapped_dst_nents > 1) { 1577 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1578 edesc->sec4_sg + dst_sg_idx, 0); 1579 } 1580 1581 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1582 sec4_sg_bytes, DMA_TO_DEVICE); 1583 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1584 dev_err(jrdev, "unable to map S/G table\n"); 1585 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, | 1509 kfree(edesc); 1510 return ERR_PTR(-ENOMEM); 1511 } 1512 1513 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1514 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); 1515 1516 if (mapped_dst_nents > 1) { 1517 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1518 edesc->sec4_sg + dst_sg_idx, 0); 1519 } 1520 1521 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1522 sec4_sg_bytes, DMA_TO_DEVICE); 1523 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1524 dev_err(jrdev, "unable to map S/G table\n"); 1525 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, |
1586 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); | 1526 iv_dma, ivsize, 0, 0); |
1587 kfree(edesc); 1588 return ERR_PTR(-ENOMEM); 1589 } 1590 1591 edesc->iv_dma = iv_dma; 1592 1593#ifdef DEBUG | 1527 kfree(edesc); 1528 return ERR_PTR(-ENOMEM); 1529 } 1530 1531 edesc->iv_dma = iv_dma; 1532 1533#ifdef DEBUG |
1594 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", | 1534 print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ", |
1595 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1596 sec4_sg_bytes, 1); 1597#endif 1598 1599 return edesc; 1600} 1601 | 1535 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1536 sec4_sg_bytes, 1); 1537#endif 1538 1539 return edesc; 1540} 1541 |
1602static int ablkcipher_encrypt(struct ablkcipher_request *req) | 1542static int skcipher_encrypt(struct skcipher_request *req) |
1603{ | 1543{ |
1604 struct ablkcipher_edesc *edesc; 1605 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1606 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 1544 struct skcipher_edesc *edesc; 1545 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1546 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
1607 struct device *jrdev = ctx->jrdev; 1608 u32 *desc; 1609 int ret = 0; 1610 1611 /* allocate extended descriptor */ | 1547 struct device *jrdev = ctx->jrdev; 1548 u32 *desc; 1549 int ret = 0; 1550 1551 /* allocate extended descriptor */ |
1612 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); | 1552 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); |
1613 if (IS_ERR(edesc)) 1614 return PTR_ERR(edesc); 1615 1616 /* Create and submit job descriptor*/ | 1553 if (IS_ERR(edesc)) 1554 return PTR_ERR(edesc); 1555 1556 /* Create and submit job descriptor*/ |
1617 init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req); | 1557 init_skcipher_job(req, edesc, true); |
1618#ifdef DEBUG | 1558#ifdef DEBUG |
1619 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", | 1559 print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", |
1620 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1621 desc_bytes(edesc->hw_desc), 1); 1622#endif 1623 desc = edesc->hw_desc; | 1560 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1561 desc_bytes(edesc->hw_desc), 1); 1562#endif 1563 desc = edesc->hw_desc; |
1624 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); | 1564 ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); |
1625 1626 if (!ret) { 1627 ret = -EINPROGRESS; 1628 } else { | 1565 1566 if (!ret) { 1567 ret = -EINPROGRESS; 1568 } else { |
1629 ablkcipher_unmap(jrdev, edesc, req); | 1569 skcipher_unmap(jrdev, edesc, req); |
1630 kfree(edesc); 1631 } 1632 1633 return ret; 1634} 1635 | 1570 kfree(edesc); 1571 } 1572 1573 return ret; 1574} 1575 |
1636static int ablkcipher_decrypt(struct ablkcipher_request *req) | 1576static int skcipher_decrypt(struct skcipher_request *req) |
1637{ | 1577{ |
1638 struct ablkcipher_edesc *edesc; 1639 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1640 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1641 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 1578 struct skcipher_edesc *edesc; 1579 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1580 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1581 int ivsize = crypto_skcipher_ivsize(skcipher); |
1642 struct device *jrdev = ctx->jrdev; 1643 u32 *desc; 1644 int ret = 0; 1645 1646 /* allocate extended descriptor */ | 1582 struct device *jrdev = ctx->jrdev; 1583 u32 *desc; 1584 int ret = 0; 1585 1586 /* allocate extended descriptor */ |
1647 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); | 1587 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); |
1648 if (IS_ERR(edesc)) 1649 return PTR_ERR(edesc); 1650 1651 /* | 1588 if (IS_ERR(edesc)) 1589 return PTR_ERR(edesc); 1590 1591 /* |
1652 * The crypto API expects us to set the IV (req->info) to the last | 1592 * The crypto API expects us to set the IV (req->iv) to the last |
1653 * ciphertext block. 1654 */ | 1593 * ciphertext block. 1594 */ |
1655 scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, | 1595 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, |
1656 ivsize, 0); 1657 1658 /* Create and submit job descriptor*/ | 1596 ivsize, 0); 1597 1598 /* Create and submit job descriptor*/ |
1659 init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); | 1599 init_skcipher_job(req, edesc, false); |
1660 desc = edesc->hw_desc; 1661#ifdef DEBUG | 1600 desc = edesc->hw_desc; 1601#ifdef DEBUG |
1662 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", | 1602 print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", |
1663 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1664 desc_bytes(edesc->hw_desc), 1); 1665#endif 1666 | 1603 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1604 desc_bytes(edesc->hw_desc), 1); 1605#endif 1606 |
1667 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); | 1607 ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); |
1668 if (!ret) { 1669 ret = -EINPROGRESS; 1670 } else { | 1608 if (!ret) { 1609 ret = -EINPROGRESS; 1610 } else { |
1671 ablkcipher_unmap(jrdev, edesc, req); | 1611 skcipher_unmap(jrdev, edesc, req); |
1672 kfree(edesc); 1673 } 1674 1675 return ret; 1676} 1677 | 1612 kfree(edesc); 1613 } 1614 1615 return ret; 1616} 1617 |
1678/* 1679 * allocate and map the ablkcipher extended descriptor 1680 * for ablkcipher givencrypt 1681 */ 1682static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( 1683 struct skcipher_givcrypt_request *greq, 1684 int desc_bytes) 1685{ 1686 struct ablkcipher_request *req = &greq->creq; 1687 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1688 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1689 struct device *jrdev = ctx->jrdev; 1690 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1691 GFP_KERNEL : GFP_ATOMIC; 1692 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; 1693 struct ablkcipher_edesc *edesc; 1694 dma_addr_t iv_dma; 1695 u8 *iv; 1696 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1697 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; 1698 1699 src_nents = sg_nents_for_len(req->src, req->nbytes); 1700 if (unlikely(src_nents < 0)) { 1701 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1702 req->nbytes); 1703 return ERR_PTR(src_nents); 1704 } 1705 1706 if (likely(req->src == req->dst)) { 1707 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1708 DMA_BIDIRECTIONAL); 1709 if (unlikely(!mapped_src_nents)) { 1710 dev_err(jrdev, "unable to map source\n"); 1711 return ERR_PTR(-ENOMEM); 1712 } 1713 1714 dst_nents = src_nents; 1715 mapped_dst_nents = src_nents; 1716 } else { 1717 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1718 DMA_TO_DEVICE); 1719 if (unlikely(!mapped_src_nents)) { 1720 dev_err(jrdev, "unable to map source\n"); 1721 return ERR_PTR(-ENOMEM); 1722 } 1723 1724 dst_nents = sg_nents_for_len(req->dst, req->nbytes); 1725 if (unlikely(dst_nents < 0)) { 1726 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", 1727 req->nbytes); 1728 return ERR_PTR(dst_nents); 1729 } 1730 1731 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, 1732 DMA_FROM_DEVICE); 1733 if (unlikely(!mapped_dst_nents)) { 1734 dev_err(jrdev, "unable to map destination\n"); 1735 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1736 return ERR_PTR(-ENOMEM); 1737 } 1738 } 1739 1740 sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; 1741 dst_sg_idx = sec4_sg_ents; 1742 sec4_sg_ents += 1 + mapped_dst_nents; 1743 1744 /* 1745 * allocate space for base edesc and hw desc commands, link tables, IV 1746 */ 1747 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); 1748 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, 1749 GFP_DMA | flags); 1750 if (!edesc) { 1751 dev_err(jrdev, "could not allocate extended descriptor\n"); 1752 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1753 0, DMA_NONE, 0, 0); 1754 return ERR_PTR(-ENOMEM); 1755 } 1756 1757 edesc->src_nents = src_nents; 1758 edesc->dst_nents = dst_nents; 1759 edesc->sec4_sg_bytes = sec4_sg_bytes; 1760 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + 1761 desc_bytes); 1762 edesc->iv_dir = DMA_FROM_DEVICE; 1763 1764 /* Make sure IV is located in a DMAable area */ 1765 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; 1766 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE); 1767 if (dma_mapping_error(jrdev, iv_dma)) { 1768 dev_err(jrdev, "unable to map IV\n"); 1769 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1770 0, DMA_NONE, 0, 0); 1771 kfree(edesc); 1772 return ERR_PTR(-ENOMEM); 1773 } 1774 1775 if (mapped_src_nents > 1) 1776 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, 1777 0); 1778 1779 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0); 1780 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + 1781 dst_sg_idx + 1, 0); 1782 1783 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1784 sec4_sg_bytes, DMA_TO_DEVICE); 1785 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1786 dev_err(jrdev, "unable to map S/G table\n"); 1787 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 1788 iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); 1789 kfree(edesc); 1790 return ERR_PTR(-ENOMEM); 1791 } 1792 edesc->iv_dma = iv_dma; 1793 1794#ifdef DEBUG 1795 print_hex_dump(KERN_ERR, 1796 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ", 1797 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1798 sec4_sg_bytes, 1); 1799#endif 1800 1801 return edesc; 1802} 1803 1804static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) 1805{ 1806 struct ablkcipher_request *req = &creq->creq; 1807 struct ablkcipher_edesc *edesc; 1808 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1809 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1810 struct device *jrdev = ctx->jrdev; 1811 u32 *desc; 1812 int ret = 0; 1813 1814 /* allocate extended descriptor */ 1815 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ); 1816 if (IS_ERR(edesc)) 1817 return PTR_ERR(edesc); 1818 1819 /* Create and submit job descriptor*/ 1820 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma, 1821 edesc, req); 1822#ifdef DEBUG 1823 print_hex_dump(KERN_ERR, 1824 "ablkcipher jobdesc@" __stringify(__LINE__) ": ", 1825 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1826 desc_bytes(edesc->hw_desc), 1); 1827#endif 1828 desc = edesc->hw_desc; 1829 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1830 1831 if (!ret) { 1832 ret = -EINPROGRESS; 1833 } else { 1834 ablkcipher_unmap(jrdev, edesc, req); 1835 kfree(edesc); 1836 } 1837 1838 return ret; 1839} 1840 1841#define template_aead template_u.aead 1842#define template_ablkcipher template_u.ablkcipher 1843struct caam_alg_template { 1844 char name[CRYPTO_MAX_ALG_NAME]; 1845 char driver_name[CRYPTO_MAX_ALG_NAME]; 1846 unsigned int blocksize; 1847 u32 type; 1848 union { 1849 struct ablkcipher_alg ablkcipher; 1850 } template_u; 1851 u32 class1_alg_type; 1852 u32 class2_alg_type; 1853}; 1854 1855static struct caam_alg_template driver_algs[] = { 1856 /* ablkcipher descriptor */ | 1618static struct caam_skcipher_alg driver_algs[] = { |
1857 { | 1619 { |
1858 .name = "cbc(aes)", 1859 .driver_name = "cbc-aes-caam", 1860 .blocksize = AES_BLOCK_SIZE, 1861 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1862 .template_ablkcipher = { 1863 .setkey = ablkcipher_setkey, 1864 .encrypt = ablkcipher_encrypt, 1865 .decrypt = ablkcipher_decrypt, 1866 .givencrypt = ablkcipher_givencrypt, 1867 .geniv = "<built-in>", | 1620 .skcipher = { 1621 .base = { 1622 .cra_name = "cbc(aes)", 1623 .cra_driver_name = "cbc-aes-caam", 1624 .cra_blocksize = AES_BLOCK_SIZE, 1625 }, 1626 .setkey = skcipher_setkey, 1627 .encrypt = skcipher_encrypt, 1628 .decrypt = skcipher_decrypt, |
1868 .min_keysize = AES_MIN_KEY_SIZE, 1869 .max_keysize = AES_MAX_KEY_SIZE, 1870 .ivsize = AES_BLOCK_SIZE, | 1629 .min_keysize = AES_MIN_KEY_SIZE, 1630 .max_keysize = AES_MAX_KEY_SIZE, 1631 .ivsize = AES_BLOCK_SIZE, |
1871 }, 1872 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 1632 }, 1633 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1873 }, 1874 { | 1634 }, 1635 { |
1875 .name = "cbc(des3_ede)", 1876 .driver_name = "cbc-3des-caam", 1877 .blocksize = DES3_EDE_BLOCK_SIZE, 1878 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1879 .template_ablkcipher = { 1880 .setkey = ablkcipher_setkey, 1881 .encrypt = ablkcipher_encrypt, 1882 .decrypt = ablkcipher_decrypt, 1883 .givencrypt = ablkcipher_givencrypt, 1884 .geniv = "<built-in>", | 1636 .skcipher = { 1637 .base = { 1638 .cra_name = "cbc(des3_ede)", 1639 .cra_driver_name = "cbc-3des-caam", 1640 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1641 }, 1642 .setkey = skcipher_setkey, 1643 .encrypt = skcipher_encrypt, 1644 .decrypt = skcipher_decrypt, |
1885 .min_keysize = DES3_EDE_KEY_SIZE, 1886 .max_keysize = DES3_EDE_KEY_SIZE, 1887 .ivsize = DES3_EDE_BLOCK_SIZE, | 1645 .min_keysize = DES3_EDE_KEY_SIZE, 1646 .max_keysize = DES3_EDE_KEY_SIZE, 1647 .ivsize = DES3_EDE_BLOCK_SIZE, |
1888 }, 1889 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 1648 }, 1649 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
1890 }, 1891 { | 1650 }, 1651 { |
1892 .name = "cbc(des)", 1893 .driver_name = "cbc-des-caam", 1894 .blocksize = DES_BLOCK_SIZE, 1895 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1896 .template_ablkcipher = { 1897 .setkey = ablkcipher_setkey, 1898 .encrypt = ablkcipher_encrypt, 1899 .decrypt = ablkcipher_decrypt, 1900 .givencrypt = ablkcipher_givencrypt, 1901 .geniv = "<built-in>", | 1652 .skcipher = { 1653 .base = { 1654 .cra_name = "cbc(des)", 1655 .cra_driver_name = "cbc-des-caam", 1656 .cra_blocksize = DES_BLOCK_SIZE, 1657 }, 1658 .setkey = skcipher_setkey, 1659 .encrypt = skcipher_encrypt, 1660 .decrypt = skcipher_decrypt, |
1902 .min_keysize = DES_KEY_SIZE, 1903 .max_keysize = DES_KEY_SIZE, 1904 .ivsize = DES_BLOCK_SIZE, | 1661 .min_keysize = DES_KEY_SIZE, 1662 .max_keysize = DES_KEY_SIZE, 1663 .ivsize = DES_BLOCK_SIZE, |
1905 }, 1906 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 1664 }, 1665 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
1907 }, 1908 { | 1666 }, 1667 { |
1909 .name = "ctr(aes)", 1910 .driver_name = "ctr-aes-caam", 1911 .blocksize = 1, 1912 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1913 .template_ablkcipher = { 1914 .setkey = ablkcipher_setkey, 1915 .encrypt = ablkcipher_encrypt, 1916 .decrypt = ablkcipher_decrypt, 1917 .geniv = "chainiv", | 1668 .skcipher = { 1669 .base = { 1670 .cra_name = "ctr(aes)", 1671 .cra_driver_name = "ctr-aes-caam", 1672 .cra_blocksize = 1, 1673 }, 1674 .setkey = skcipher_setkey, 1675 .encrypt = skcipher_encrypt, 1676 .decrypt = skcipher_decrypt, |
1918 .min_keysize = AES_MIN_KEY_SIZE, 1919 .max_keysize = AES_MAX_KEY_SIZE, 1920 .ivsize = AES_BLOCK_SIZE, | 1677 .min_keysize = AES_MIN_KEY_SIZE, 1678 .max_keysize = AES_MAX_KEY_SIZE, 1679 .ivsize = AES_BLOCK_SIZE, |
1921 }, 1922 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 1680 .chunksize = AES_BLOCK_SIZE, 1681 }, 1682 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | 1683 OP_ALG_AAI_CTR_MOD128, |
1923 }, 1924 { | 1684 }, 1685 { |
1925 .name = "rfc3686(ctr(aes))", 1926 .driver_name = "rfc3686-ctr-aes-caam", 1927 .blocksize = 1, 1928 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1929 .template_ablkcipher = { 1930 .setkey = ablkcipher_setkey, 1931 .encrypt = ablkcipher_encrypt, 1932 .decrypt = ablkcipher_decrypt, 1933 .givencrypt = ablkcipher_givencrypt, 1934 .geniv = "<built-in>", | 1686 .skcipher = { 1687 .base = { 1688 .cra_name = "rfc3686(ctr(aes))", 1689 .cra_driver_name = "rfc3686-ctr-aes-caam", 1690 .cra_blocksize = 1, 1691 }, 1692 .setkey = skcipher_setkey, 1693 .encrypt = skcipher_encrypt, 1694 .decrypt = skcipher_decrypt, |
1935 .min_keysize = AES_MIN_KEY_SIZE + 1936 CTR_RFC3686_NONCE_SIZE, 1937 .max_keysize = AES_MAX_KEY_SIZE + 1938 CTR_RFC3686_NONCE_SIZE, 1939 .ivsize = CTR_RFC3686_IV_SIZE, | 1695 .min_keysize = AES_MIN_KEY_SIZE + 1696 CTR_RFC3686_NONCE_SIZE, 1697 .max_keysize = AES_MAX_KEY_SIZE + 1698 CTR_RFC3686_NONCE_SIZE, 1699 .ivsize = CTR_RFC3686_IV_SIZE, |
1940 }, 1941 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 1700 .chunksize = AES_BLOCK_SIZE, 1701 }, 1702 .caam = { 1703 .class1_alg_type = OP_ALG_ALGSEL_AES | 1704 OP_ALG_AAI_CTR_MOD128, 1705 .rfc3686 = true, 1706 }, |
1942 }, 1943 { | 1707 }, 1708 { |
1944 .name = "xts(aes)", 1945 .driver_name = "xts-aes-caam", 1946 .blocksize = AES_BLOCK_SIZE, 1947 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1948 .template_ablkcipher = { 1949 .setkey = xts_ablkcipher_setkey, 1950 .encrypt = ablkcipher_encrypt, 1951 .decrypt = ablkcipher_decrypt, 1952 .geniv = "eseqiv", | 1709 .skcipher = { 1710 .base = { 1711 .cra_name = "xts(aes)", 1712 .cra_driver_name = "xts-aes-caam", 1713 .cra_blocksize = AES_BLOCK_SIZE, 1714 }, 1715 .setkey = xts_skcipher_setkey, 1716 .encrypt = skcipher_encrypt, 1717 .decrypt = skcipher_decrypt, |
1953 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1954 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1955 .ivsize = AES_BLOCK_SIZE, | 1718 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1719 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1720 .ivsize = AES_BLOCK_SIZE, |
1956 }, 1957 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, | 1721 }, 1722 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, |
1958 }, 1959}; 1960 1961static struct caam_aead_alg driver_aeads[] = { 1962 { 1963 .aead = { 1964 .base = { 1965 .cra_name = "rfc4106(gcm(aes))", --- 1268 unchanged lines hidden (view full) --- 3234 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 3235 OP_ALG_AAI_HMAC_PRECOMP, 3236 .rfc3686 = true, 3237 .geniv = true, 3238 }, 3239 }, 3240}; 3241 | 1723 }, 1724}; 1725 1726static struct caam_aead_alg driver_aeads[] = { 1727 { 1728 .aead = { 1729 .base = { 1730 .cra_name = "rfc4106(gcm(aes))", --- 1268 unchanged lines hidden (view full) --- 2999 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 3000 OP_ALG_AAI_HMAC_PRECOMP, 3001 .rfc3686 = true, 3002 .geniv = true, 3003 }, 3004 }, 3005}; 3006 |
3242struct caam_crypto_alg { 3243 struct crypto_alg crypto_alg; 3244 struct list_head entry; 3245 struct caam_alg_entry caam; 3246}; 3247 | |
3248static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 3249 bool uses_dkp) 3250{ 3251 dma_addr_t dma_addr; 3252 struct caam_drv_private *priv; 3253 3254 ctx->jrdev = caam_jr_alloc(); 3255 if (IS_ERR(ctx->jrdev)) { --- 15 unchanged lines hidden (view full) --- 3271 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); 3272 caam_jr_free(ctx->jrdev); 3273 return -ENOMEM; 3274 } 3275 3276 ctx->sh_desc_enc_dma = dma_addr; 3277 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, 3278 sh_desc_dec); | 3007static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 3008 bool uses_dkp) 3009{ 3010 dma_addr_t dma_addr; 3011 struct caam_drv_private *priv; 3012 3013 ctx->jrdev = caam_jr_alloc(); 3014 if (IS_ERR(ctx->jrdev)) { --- 15 unchanged lines hidden (view full) --- 3030 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); 3031 caam_jr_free(ctx->jrdev); 3032 return -ENOMEM; 3033 } 3034 3035 ctx->sh_desc_enc_dma = dma_addr; 3036 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, 3037 sh_desc_dec); |
3279 ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx, 3280 sh_desc_givenc); | |
3281 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key); 3282 3283 /* copy descriptor header template value */ 3284 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 3285 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 3286 3287 return 0; 3288} 3289 | 3038 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key); 3039 3040 /* copy descriptor header template value */ 3041 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 3042 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 3043 3044 return 0; 3045} 3046 |
3290static int caam_cra_init(struct crypto_tfm *tfm) | 3047static int caam_cra_init(struct crypto_skcipher *tfm) |
3291{ | 3048{ |
3292 struct crypto_alg *alg = tfm->__crt_alg; 3293 struct caam_crypto_alg *caam_alg = 3294 container_of(alg, struct caam_crypto_alg, crypto_alg); 3295 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 3049 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 3050 struct caam_skcipher_alg *caam_alg = 3051 container_of(alg, typeof(*caam_alg), skcipher); |
3296 | 3052 |
3297 return caam_init_common(ctx, &caam_alg->caam, false); | 3053 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, 3054 false); |
3298} 3299 3300static int caam_aead_init(struct crypto_aead *tfm) 3301{ 3302 struct aead_alg *alg = crypto_aead_alg(tfm); 3303 struct caam_aead_alg *caam_alg = 3304 container_of(alg, struct caam_aead_alg, aead); 3305 struct caam_ctx *ctx = crypto_aead_ctx(tfm); --- 5 unchanged lines hidden (view full) --- 3311static void caam_exit_common(struct caam_ctx *ctx) 3312{ 3313 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, 3314 offsetof(struct caam_ctx, sh_desc_enc_dma), 3315 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 3316 caam_jr_free(ctx->jrdev); 3317} 3318 | 3055} 3056 3057static int caam_aead_init(struct crypto_aead *tfm) 3058{ 3059 struct aead_alg *alg = crypto_aead_alg(tfm); 3060 struct caam_aead_alg *caam_alg = 3061 container_of(alg, struct caam_aead_alg, aead); 3062 struct caam_ctx *ctx = crypto_aead_ctx(tfm); --- 5 unchanged lines hidden (view full) --- 3068static void caam_exit_common(struct caam_ctx *ctx) 3069{ 3070 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, 3071 offsetof(struct caam_ctx, sh_desc_enc_dma), 3072 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 3073 caam_jr_free(ctx->jrdev); 3074} 3075 |
3319static void caam_cra_exit(struct crypto_tfm *tfm) | 3076static void caam_cra_exit(struct crypto_skcipher *tfm) |
3320{ | 3077{ |
3321 caam_exit_common(crypto_tfm_ctx(tfm)); | 3078 caam_exit_common(crypto_skcipher_ctx(tfm)); |
3322} 3323 3324static void caam_aead_exit(struct crypto_aead *tfm) 3325{ 3326 caam_exit_common(crypto_aead_ctx(tfm)); 3327} 3328 3329static void __exit caam_algapi_exit(void) 3330{ | 3079} 3080 3081static void caam_aead_exit(struct crypto_aead *tfm) 3082{ 3083 caam_exit_common(crypto_aead_ctx(tfm)); 3084} 3085 3086static void __exit caam_algapi_exit(void) 3087{ |
3331 3332 struct caam_crypto_alg *t_alg, *n; | |
3333 int i; 3334 3335 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3336 struct caam_aead_alg *t_alg = driver_aeads + i; 3337 3338 if (t_alg->registered) 3339 crypto_unregister_aead(&t_alg->aead); 3340 } 3341 | 3088 int i; 3089 3090 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3091 struct caam_aead_alg *t_alg = driver_aeads + i; 3092 3093 if (t_alg->registered) 3094 crypto_unregister_aead(&t_alg->aead); 3095 } 3096 |
3342 if (!alg_list.next) 3343 return; | 3097 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 3098 struct caam_skcipher_alg *t_alg = driver_algs + i; |
3344 | 3099 |
3345 list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 3346 crypto_unregister_alg(&t_alg->crypto_alg); 3347 list_del(&t_alg->entry); 3348 kfree(t_alg); | 3100 if (t_alg->registered) 3101 crypto_unregister_skcipher(&t_alg->skcipher); |
3349 } 3350} 3351 | 3102 } 3103} 3104 |
3352static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 3353 *template) | 3105static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) |
3354{ | 3106{ |
3355 struct caam_crypto_alg *t_alg; 3356 struct crypto_alg *alg; | 3107 struct skcipher_alg *alg = &t_alg->skcipher; |
3357 | 3108 |
3358 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 3359 if (!t_alg) { 3360 pr_err("failed to allocate t_alg\n"); 3361 return ERR_PTR(-ENOMEM); 3362 } | 3109 alg->base.cra_module = THIS_MODULE; 3110 alg->base.cra_priority = CAAM_CRA_PRIORITY; 3111 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 3112 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; |
3363 | 3113 |
3364 alg = &t_alg->crypto_alg; 3365 3366 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 3367 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 3368 template->driver_name); 3369 alg->cra_module = THIS_MODULE; 3370 alg->cra_init = caam_cra_init; 3371 alg->cra_exit = caam_cra_exit; 3372 alg->cra_priority = CAAM_CRA_PRIORITY; 3373 alg->cra_blocksize = template->blocksize; 3374 alg->cra_alignmask = 0; 3375 alg->cra_ctxsize = sizeof(struct caam_ctx); 3376 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 3377 template->type; 3378 switch (template->type) { 3379 case CRYPTO_ALG_TYPE_GIVCIPHER: 3380 alg->cra_type = &crypto_givcipher_type; 3381 alg->cra_ablkcipher = template->template_ablkcipher; 3382 break; 3383 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3384 alg->cra_type = &crypto_ablkcipher_type; 3385 alg->cra_ablkcipher = template->template_ablkcipher; 3386 break; 3387 } 3388 3389 t_alg->caam.class1_alg_type = template->class1_alg_type; 3390 t_alg->caam.class2_alg_type = template->class2_alg_type; 3391 3392 return t_alg; | 3114 alg->init = caam_cra_init; 3115 alg->exit = caam_cra_exit; |
3393} 3394 3395static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 3396{ 3397 struct aead_alg *alg = &t_alg->aead; 3398 3399 alg->base.cra_module = THIS_MODULE; 3400 alg->base.cra_priority = CAAM_CRA_PRIORITY; --- 35 unchanged lines hidden (view full) --- 3436 /* 3437 * If priv is NULL, it's probably because the caam driver wasn't 3438 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 3439 */ 3440 if (!priv) 3441 return -ENODEV; 3442 3443 | 3116} 3117 3118static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 3119{ 3120 struct aead_alg *alg = &t_alg->aead; 3121 3122 alg->base.cra_module = THIS_MODULE; 3123 alg->base.cra_priority = CAAM_CRA_PRIORITY; --- 35 unchanged lines hidden (view full) --- 3159 /* 3160 * If priv is NULL, it's probably because the caam driver wasn't 3161 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 3162 */ 3163 if (!priv) 3164 return -ENODEV; 3165 3166 |
3444 INIT_LIST_HEAD(&alg_list); 3445 | |
3446 /* 3447 * Register crypto algorithms the device supports. 3448 * First, detect presence and attributes of DES, AES, and MD blocks. 3449 */ 3450 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 3451 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 3452 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; 3453 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; 3454 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 3455 3456 /* If MD is present, limit digest size based on LP256 */ 3457 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) 3458 md_limit = SHA256_DIGEST_SIZE; 3459 3460 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 3167 /* 3168 * Register crypto algorithms the device supports. 3169 * First, detect presence and attributes of DES, AES, and MD blocks. 3170 */ 3171 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 3172 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 3173 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; 3174 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; 3175 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 3176 3177 /* If MD is present, limit digest size based on LP256 */ 3178 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) 3179 md_limit = SHA256_DIGEST_SIZE; 3180 3181 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
3461 struct caam_crypto_alg *t_alg; 3462 struct caam_alg_template *alg = driver_algs + i; 3463 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; | 3182 struct caam_skcipher_alg *t_alg = driver_algs + i; 3183 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; |
3464 3465 /* Skip DES algorithms if not supported by device */ 3466 if (!des_inst && 3467 ((alg_sel == OP_ALG_ALGSEL_3DES) || 3468 (alg_sel == OP_ALG_ALGSEL_DES))) 3469 continue; 3470 3471 /* Skip AES algorithms if not supported by device */ 3472 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 3473 continue; 3474 3475 /* 3476 * Check support for AES modes not available 3477 * on LP devices. 3478 */ 3479 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) | 3184 3185 /* Skip DES algorithms if not supported by device */ 3186 if (!des_inst && 3187 ((alg_sel == OP_ALG_ALGSEL_3DES) || 3188 (alg_sel == OP_ALG_ALGSEL_DES))) 3189 continue; 3190 3191 /* Skip AES algorithms if not supported by device */ 3192 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 3193 continue; 3194 3195 /* 3196 * Check support for AES modes not available 3197 * on LP devices. 3198 */ 3199 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) |
3480 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == | 3200 if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == |
3481 OP_ALG_AAI_XTS) 3482 continue; 3483 | 3201 OP_ALG_AAI_XTS) 3202 continue; 3203 |
3484 t_alg = caam_alg_alloc(alg); 3485 if (IS_ERR(t_alg)) { 3486 err = PTR_ERR(t_alg); 3487 pr_warn("%s alg allocation failed\n", alg->driver_name); 3488 continue; 3489 } | 3204 caam_skcipher_alg_init(t_alg); |
3490 | 3205 |
3491 err = crypto_register_alg(&t_alg->crypto_alg); | 3206 err = crypto_register_skcipher(&t_alg->skcipher); |
3492 if (err) { 3493 pr_warn("%s alg registration failed\n", | 3207 if (err) { 3208 pr_warn("%s alg registration failed\n", |
3494 t_alg->crypto_alg.cra_driver_name); 3495 kfree(t_alg); | 3209 t_alg->skcipher.base.cra_driver_name); |
3496 continue; 3497 } 3498 | 3210 continue; 3211 } 3212 |
3499 list_add_tail(&t_alg->entry, &alg_list); | 3213 t_alg->registered = true; |
3500 registered = true; 3501 } 3502 3503 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3504 struct caam_aead_alg *t_alg = driver_aeads + i; 3505 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 3506 OP_ALG_ALGSEL_MASK; 3507 u32 c2_alg_sel = t_alg->caam.class2_alg_type & --- 54 unchanged lines hidden --- | 3214 registered = true; 3215 } 3216 3217 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3218 struct caam_aead_alg *t_alg = driver_aeads + i; 3219 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 3220 OP_ALG_ALGSEL_MASK; 3221 u32 c2_alg_sel = t_alg->caam.class2_alg_type & --- 54 unchanged lines hidden --- |