1 /* 2 * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using 3 * ARM NEON instructions. 4 * 5 * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> 6 * 7 * This file is based on sha1_generic.c and sha1_ssse3_glue.c: 8 * Copyright (c) Alan Smithee. 9 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> 10 * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> 11 * Copyright (c) Mathias Krause <minipli@googlemail.com> 12 * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com> 13 * 14 * This program is free software; you can redistribute it and/or modify it 15 * under the terms of the GNU General Public License as published by the Free 16 * Software Foundation; either version 2 of the License, or (at your option) 17 * any later version. 18 * 19 */ 20 21 #include <crypto/internal/hash.h> 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/mm.h> 25 #include <linux/cryptohash.h> 26 #include <linux/types.h> 27 #include <crypto/sha.h> 28 #include <asm/byteorder.h> 29 #include <asm/neon.h> 30 #include <asm/simd.h> 31 #include <asm/crypto/sha1.h> 32 33 34 asmlinkage void sha1_transform_neon(void *state_h, const char *data, 35 unsigned int rounds); 36 37 38 static int sha1_neon_init(struct shash_desc *desc) 39 { 40 struct sha1_state *sctx = shash_desc_ctx(desc); 41 42 *sctx = (struct sha1_state){ 43 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, 44 }; 45 46 return 0; 47 } 48 49 static int __sha1_neon_update(struct shash_desc *desc, const u8 *data, 50 unsigned int len, unsigned int partial) 51 { 52 struct sha1_state *sctx = shash_desc_ctx(desc); 53 unsigned int done = 0; 54 55 sctx->count += len; 56 57 if (partial) { 58 done = SHA1_BLOCK_SIZE - partial; 59 memcpy(sctx->buffer + partial, data, done); 60 sha1_transform_neon(sctx->state, sctx->buffer, 1); 61 } 62 63 if (len - done >= SHA1_BLOCK_SIZE) { 64 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; 65 66 sha1_transform_neon(sctx->state, data + done, rounds); 67 done += rounds * SHA1_BLOCK_SIZE; 68 } 69 70 memcpy(sctx->buffer, data + done, len - done); 71 72 return 0; 73 } 74 75 static int sha1_neon_update(struct shash_desc *desc, const u8 *data, 76 unsigned int len) 77 { 78 struct sha1_state *sctx = shash_desc_ctx(desc); 79 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; 80 int res; 81 82 /* Handle the fast case right here */ 83 if (partial + len < SHA1_BLOCK_SIZE) { 84 sctx->count += len; 85 memcpy(sctx->buffer + partial, data, len); 86 87 return 0; 88 } 89 90 if (!may_use_simd()) { 91 res = sha1_update_arm(desc, data, len); 92 } else { 93 kernel_neon_begin(); 94 res = __sha1_neon_update(desc, data, len, partial); 95 kernel_neon_end(); 96 } 97 98 return res; 99 } 100 101 102 /* Add padding and return the message digest. */ 103 static int sha1_neon_final(struct shash_desc *desc, u8 *out) 104 { 105 struct sha1_state *sctx = shash_desc_ctx(desc); 106 unsigned int i, index, padlen; 107 __be32 *dst = (__be32 *)out; 108 __be64 bits; 109 static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; 110 111 bits = cpu_to_be64(sctx->count << 3); 112 113 /* Pad out to 56 mod 64 and append length */ 114 index = sctx->count % SHA1_BLOCK_SIZE; 115 padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); 116 if (!may_use_simd()) { 117 sha1_update_arm(desc, padding, padlen); 118 sha1_update_arm(desc, (const u8 *)&bits, sizeof(bits)); 119 } else { 120 kernel_neon_begin(); 121 /* We need to fill a whole block for __sha1_neon_update() */ 122 if (padlen <= 56) { 123 sctx->count += padlen; 124 memcpy(sctx->buffer + index, padding, padlen); 125 } else { 126 __sha1_neon_update(desc, padding, padlen, index); 127 } 128 __sha1_neon_update(desc, (const u8 *)&bits, sizeof(bits), 56); 129 kernel_neon_end(); 130 } 131 132 /* Store state in digest */ 133 for (i = 0; i < 5; i++) 134 dst[i] = cpu_to_be32(sctx->state[i]); 135 136 /* Wipe context */ 137 memset(sctx, 0, sizeof(*sctx)); 138 139 return 0; 140 } 141 142 static int sha1_neon_export(struct shash_desc *desc, void *out) 143 { 144 struct sha1_state *sctx = shash_desc_ctx(desc); 145 146 memcpy(out, sctx, sizeof(*sctx)); 147 148 return 0; 149 } 150 151 static int sha1_neon_import(struct shash_desc *desc, const void *in) 152 { 153 struct sha1_state *sctx = shash_desc_ctx(desc); 154 155 memcpy(sctx, in, sizeof(*sctx)); 156 157 return 0; 158 } 159 160 static struct shash_alg alg = { 161 .digestsize = SHA1_DIGEST_SIZE, 162 .init = sha1_neon_init, 163 .update = sha1_neon_update, 164 .final = sha1_neon_final, 165 .export = sha1_neon_export, 166 .import = sha1_neon_import, 167 .descsize = sizeof(struct sha1_state), 168 .statesize = sizeof(struct sha1_state), 169 .base = { 170 .cra_name = "sha1", 171 .cra_driver_name = "sha1-neon", 172 .cra_priority = 250, 173 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 174 .cra_blocksize = SHA1_BLOCK_SIZE, 175 .cra_module = THIS_MODULE, 176 } 177 }; 178 179 static int __init sha1_neon_mod_init(void) 180 { 181 if (!cpu_has_neon()) 182 return -ENODEV; 183 184 return crypto_register_shash(&alg); 185 } 186 187 static void __exit sha1_neon_mod_fini(void) 188 { 189 crypto_unregister_shash(&alg); 190 } 191 192 module_init(sha1_neon_mod_init); 193 module_exit(sha1_neon_mod_fini); 194 195 MODULE_LICENSE("GPL"); 196 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated"); 197 MODULE_ALIAS_CRYPTO("sha1"); 198