1 /* 2 * Cryptographic API. 3 * 4 * Glue code for the SHA512 Secure Hash Algorithm assembler 5 * implementation using supplemental SSE3 / AVX / AVX2 instructions. 6 * 7 * This file is based on sha512_generic.c 8 * 9 * Copyright (C) 2013 Intel Corporation 10 * Author: Tim Chen <tim.c.chen@linux.intel.com> 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the Free 14 * Software Foundation; either version 2 of the License, or (at your option) 15 * any later version. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 21 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 22 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 * SOFTWARE. 25 * 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <crypto/internal/hash.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #include <linux/mm.h> 34 #include <linux/cryptohash.h> 35 #include <linux/types.h> 36 #include <crypto/sha.h> 37 #include <crypto/sha512_base.h> 38 #include <asm/i387.h> 39 #include <asm/xcr.h> 40 #include <asm/xsave.h> 41 42 #include <linux/string.h> 43 44 asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data, 45 u64 rounds); 46 #ifdef CONFIG_AS_AVX 47 asmlinkage void sha512_transform_avx(u64 *digest, const char *data, 48 u64 rounds); 49 #endif 50 #ifdef CONFIG_AS_AVX2 51 asmlinkage void sha512_transform_rorx(u64 *digest, const char *data, 52 u64 rounds); 53 #endif 54 55 static void (*sha512_transform_asm)(u64 *, const char *, u64); 56 57 static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data, 58 unsigned int len) 59 { 60 struct sha512_state *sctx = shash_desc_ctx(desc); 61 62 if (!irq_fpu_usable() || 63 (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) 64 return crypto_sha512_update(desc, data, len); 65 66 /* make sure casting to sha512_block_fn() is safe */ 67 BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0); 68 69 kernel_fpu_begin(); 70 sha512_base_do_update(desc, data, len, 71 (sha512_block_fn *)sha512_transform_asm); 72 kernel_fpu_end(); 73 74 return 0; 75 } 76 77 static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data, 78 unsigned int len, u8 *out) 79 { 80 if (!irq_fpu_usable()) 81 return crypto_sha512_finup(desc, data, len, out); 82 83 kernel_fpu_begin(); 84 if (len) 85 sha512_base_do_update(desc, data, len, 86 (sha512_block_fn *)sha512_transform_asm); 87 sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_transform_asm); 88 kernel_fpu_end(); 89 90 return sha512_base_finish(desc, out); 91 } 92 93 /* Add padding and return the message digest. */ 94 static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) 95 { 96 return sha512_ssse3_finup(desc, NULL, 0, out); 97 } 98 99 static struct shash_alg algs[] = { { 100 .digestsize = SHA512_DIGEST_SIZE, 101 .init = sha512_base_init, 102 .update = sha512_ssse3_update, 103 .final = sha512_ssse3_final, 104 .finup = sha512_ssse3_finup, 105 .descsize = sizeof(struct sha512_state), 106 .base = { 107 .cra_name = "sha512", 108 .cra_driver_name = "sha512-ssse3", 109 .cra_priority = 150, 110 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 111 .cra_blocksize = SHA512_BLOCK_SIZE, 112 .cra_module = THIS_MODULE, 113 } 114 }, { 115 .digestsize = SHA384_DIGEST_SIZE, 116 .init = sha384_base_init, 117 .update = sha512_ssse3_update, 118 .final = sha512_ssse3_final, 119 .finup = sha512_ssse3_finup, 120 .descsize = sizeof(struct sha512_state), 121 .base = { 122 .cra_name = "sha384", 123 .cra_driver_name = "sha384-ssse3", 124 .cra_priority = 150, 125 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 126 .cra_blocksize = SHA384_BLOCK_SIZE, 127 .cra_module = THIS_MODULE, 128 } 129 } }; 130 131 #ifdef CONFIG_AS_AVX 132 static bool __init avx_usable(void) 133 { 134 u64 xcr0; 135 136 if (!cpu_has_avx || !cpu_has_osxsave) 137 return false; 138 139 xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 140 if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { 141 pr_info("AVX detected but unusable.\n"); 142 143 return false; 144 } 145 146 return true; 147 } 148 #endif 149 150 static int __init sha512_ssse3_mod_init(void) 151 { 152 /* test for SSSE3 first */ 153 if (cpu_has_ssse3) 154 sha512_transform_asm = sha512_transform_ssse3; 155 156 #ifdef CONFIG_AS_AVX 157 /* allow AVX to override SSSE3, it's a little faster */ 158 if (avx_usable()) { 159 #ifdef CONFIG_AS_AVX2 160 if (boot_cpu_has(X86_FEATURE_AVX2)) 161 sha512_transform_asm = sha512_transform_rorx; 162 else 163 #endif 164 sha512_transform_asm = sha512_transform_avx; 165 } 166 #endif 167 168 if (sha512_transform_asm) { 169 #ifdef CONFIG_AS_AVX 170 if (sha512_transform_asm == sha512_transform_avx) 171 pr_info("Using AVX optimized SHA-512 implementation\n"); 172 #ifdef CONFIG_AS_AVX2 173 else if (sha512_transform_asm == sha512_transform_rorx) 174 pr_info("Using AVX2 optimized SHA-512 implementation\n"); 175 #endif 176 else 177 #endif 178 pr_info("Using SSSE3 optimized SHA-512 implementation\n"); 179 return crypto_register_shashes(algs, ARRAY_SIZE(algs)); 180 } 181 pr_info("Neither AVX nor SSSE3 is available/usable.\n"); 182 183 return -ENODEV; 184 } 185 186 static void __exit sha512_ssse3_mod_fini(void) 187 { 188 crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); 189 } 190 191 module_init(sha512_ssse3_mod_init); 192 module_exit(sha512_ssse3_mod_fini); 193 194 MODULE_LICENSE("GPL"); 195 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 196 197 MODULE_ALIAS_CRYPTO("sha512"); 198 MODULE_ALIAS_CRYPTO("sha384"); 199