1*4ff28d4cSDavid S. Miller /* Glue code for SHA1 hashing optimized for sparc64 crypto opcodes. 2*4ff28d4cSDavid S. Miller * 3*4ff28d4cSDavid S. Miller * This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c 4*4ff28d4cSDavid S. Miller * 5*4ff28d4cSDavid S. Miller * Copyright (c) Alan Smithee. 6*4ff28d4cSDavid S. Miller * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> 7*4ff28d4cSDavid S. Miller * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> 8*4ff28d4cSDavid S. Miller * Copyright (c) Mathias Krause <minipli@googlemail.com> 9*4ff28d4cSDavid S. Miller */ 10*4ff28d4cSDavid S. Miller 11*4ff28d4cSDavid S. Miller #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12*4ff28d4cSDavid S. Miller 13*4ff28d4cSDavid S. Miller #include <crypto/internal/hash.h> 14*4ff28d4cSDavid S. Miller #include <linux/init.h> 15*4ff28d4cSDavid S. Miller #include <linux/module.h> 16*4ff28d4cSDavid S. Miller #include <linux/mm.h> 17*4ff28d4cSDavid S. Miller #include <linux/cryptohash.h> 18*4ff28d4cSDavid S. Miller #include <linux/types.h> 19*4ff28d4cSDavid S. Miller #include <crypto/sha.h> 20*4ff28d4cSDavid S. Miller 21*4ff28d4cSDavid S. Miller #include <asm/pstate.h> 22*4ff28d4cSDavid S. Miller #include <asm/elf.h> 23*4ff28d4cSDavid S. Miller 24*4ff28d4cSDavid S. Miller asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data, 25*4ff28d4cSDavid S. Miller unsigned int rounds); 26*4ff28d4cSDavid S. Miller 27*4ff28d4cSDavid S. Miller static int sha1_sparc64_init(struct shash_desc *desc) 28*4ff28d4cSDavid S. Miller { 29*4ff28d4cSDavid S. Miller struct sha1_state *sctx = shash_desc_ctx(desc); 30*4ff28d4cSDavid S. Miller 31*4ff28d4cSDavid S. Miller *sctx = (struct sha1_state){ 32*4ff28d4cSDavid S. Miller .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, 33*4ff28d4cSDavid S. Miller }; 34*4ff28d4cSDavid S. Miller 35*4ff28d4cSDavid S. Miller return 0; 36*4ff28d4cSDavid S. Miller } 37*4ff28d4cSDavid S. Miller 38*4ff28d4cSDavid S. Miller static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data, 39*4ff28d4cSDavid S. Miller unsigned int len, unsigned int partial) 40*4ff28d4cSDavid S. Miller { 41*4ff28d4cSDavid S. Miller unsigned int done = 0; 42*4ff28d4cSDavid S. Miller 43*4ff28d4cSDavid S. Miller sctx->count += len; 44*4ff28d4cSDavid S. Miller if (partial) { 45*4ff28d4cSDavid S. Miller done = SHA1_BLOCK_SIZE - partial; 46*4ff28d4cSDavid S. Miller memcpy(sctx->buffer + partial, data, done); 47*4ff28d4cSDavid S. Miller sha1_sparc64_transform(sctx->state, sctx->buffer, 1); 48*4ff28d4cSDavid S. Miller } 49*4ff28d4cSDavid S. Miller if (len - done >= SHA1_BLOCK_SIZE) { 50*4ff28d4cSDavid S. Miller const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; 51*4ff28d4cSDavid S. Miller 52*4ff28d4cSDavid S. Miller sha1_sparc64_transform(sctx->state, data + done, rounds); 53*4ff28d4cSDavid S. Miller done += rounds * SHA1_BLOCK_SIZE; 54*4ff28d4cSDavid S. Miller } 55*4ff28d4cSDavid S. Miller 56*4ff28d4cSDavid S. Miller memcpy(sctx->buffer, data + done, len - done); 57*4ff28d4cSDavid S. Miller } 58*4ff28d4cSDavid S. Miller 59*4ff28d4cSDavid S. Miller static int sha1_sparc64_update(struct shash_desc *desc, const u8 *data, 60*4ff28d4cSDavid S. Miller unsigned int len) 61*4ff28d4cSDavid S. Miller { 62*4ff28d4cSDavid S. Miller struct sha1_state *sctx = shash_desc_ctx(desc); 63*4ff28d4cSDavid S. Miller unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; 64*4ff28d4cSDavid S. Miller 65*4ff28d4cSDavid S. Miller /* Handle the fast case right here */ 66*4ff28d4cSDavid S. Miller if (partial + len < SHA1_BLOCK_SIZE) { 67*4ff28d4cSDavid S. Miller sctx->count += len; 68*4ff28d4cSDavid S. Miller memcpy(sctx->buffer + partial, data, len); 69*4ff28d4cSDavid S. Miller } else 70*4ff28d4cSDavid S. Miller __sha1_sparc64_update(sctx, data, len, partial); 71*4ff28d4cSDavid S. Miller 72*4ff28d4cSDavid S. Miller return 0; 73*4ff28d4cSDavid S. Miller } 74*4ff28d4cSDavid S. Miller 75*4ff28d4cSDavid S. Miller /* Add padding and return the message digest. */ 76*4ff28d4cSDavid S. Miller static int sha1_sparc64_final(struct shash_desc *desc, u8 *out) 77*4ff28d4cSDavid S. Miller { 78*4ff28d4cSDavid S. Miller struct sha1_state *sctx = shash_desc_ctx(desc); 79*4ff28d4cSDavid S. Miller unsigned int i, index, padlen; 80*4ff28d4cSDavid S. Miller __be32 *dst = (__be32 *)out; 81*4ff28d4cSDavid S. Miller __be64 bits; 82*4ff28d4cSDavid S. Miller static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; 83*4ff28d4cSDavid S. Miller 84*4ff28d4cSDavid S. Miller bits = cpu_to_be64(sctx->count << 3); 85*4ff28d4cSDavid S. Miller 86*4ff28d4cSDavid S. Miller /* Pad out to 56 mod 64 and append length */ 87*4ff28d4cSDavid S. Miller index = sctx->count % SHA1_BLOCK_SIZE; 88*4ff28d4cSDavid S. Miller padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); 89*4ff28d4cSDavid S. Miller 90*4ff28d4cSDavid S. Miller /* We need to fill a whole block for __sha1_sparc64_update() */ 91*4ff28d4cSDavid S. Miller if (padlen <= 56) { 92*4ff28d4cSDavid S. Miller sctx->count += padlen; 93*4ff28d4cSDavid S. Miller memcpy(sctx->buffer + index, padding, padlen); 94*4ff28d4cSDavid S. Miller } else { 95*4ff28d4cSDavid S. Miller __sha1_sparc64_update(sctx, padding, padlen, index); 96*4ff28d4cSDavid S. Miller } 97*4ff28d4cSDavid S. Miller __sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); 98*4ff28d4cSDavid S. Miller 99*4ff28d4cSDavid S. Miller /* Store state in digest */ 100*4ff28d4cSDavid S. Miller for (i = 0; i < 5; i++) 101*4ff28d4cSDavid S. Miller dst[i] = cpu_to_be32(sctx->state[i]); 102*4ff28d4cSDavid S. Miller 103*4ff28d4cSDavid S. Miller /* Wipe context */ 104*4ff28d4cSDavid S. Miller memset(sctx, 0, sizeof(*sctx)); 105*4ff28d4cSDavid S. Miller 106*4ff28d4cSDavid S. Miller return 0; 107*4ff28d4cSDavid S. Miller } 108*4ff28d4cSDavid S. Miller 109*4ff28d4cSDavid S. Miller static int sha1_sparc64_export(struct shash_desc *desc, void *out) 110*4ff28d4cSDavid S. Miller { 111*4ff28d4cSDavid S. Miller struct sha1_state *sctx = shash_desc_ctx(desc); 112*4ff28d4cSDavid S. Miller 113*4ff28d4cSDavid S. Miller memcpy(out, sctx, sizeof(*sctx)); 114*4ff28d4cSDavid S. Miller 115*4ff28d4cSDavid S. Miller return 0; 116*4ff28d4cSDavid S. Miller } 117*4ff28d4cSDavid S. Miller 118*4ff28d4cSDavid S. Miller static int sha1_sparc64_import(struct shash_desc *desc, const void *in) 119*4ff28d4cSDavid S. Miller { 120*4ff28d4cSDavid S. Miller struct sha1_state *sctx = shash_desc_ctx(desc); 121*4ff28d4cSDavid S. Miller 122*4ff28d4cSDavid S. Miller memcpy(sctx, in, sizeof(*sctx)); 123*4ff28d4cSDavid S. Miller 124*4ff28d4cSDavid S. Miller return 0; 125*4ff28d4cSDavid S. Miller } 126*4ff28d4cSDavid S. Miller 127*4ff28d4cSDavid S. Miller static struct shash_alg alg = { 128*4ff28d4cSDavid S. Miller .digestsize = SHA1_DIGEST_SIZE, 129*4ff28d4cSDavid S. Miller .init = sha1_sparc64_init, 130*4ff28d4cSDavid S. Miller .update = sha1_sparc64_update, 131*4ff28d4cSDavid S. Miller .final = sha1_sparc64_final, 132*4ff28d4cSDavid S. Miller .export = sha1_sparc64_export, 133*4ff28d4cSDavid S. Miller .import = sha1_sparc64_import, 134*4ff28d4cSDavid S. Miller .descsize = sizeof(struct sha1_state), 135*4ff28d4cSDavid S. Miller .statesize = sizeof(struct sha1_state), 136*4ff28d4cSDavid S. Miller .base = { 137*4ff28d4cSDavid S. Miller .cra_name = "sha1", 138*4ff28d4cSDavid S. Miller .cra_driver_name= "sha1-sparc64", 139*4ff28d4cSDavid S. Miller .cra_priority = 150, 140*4ff28d4cSDavid S. Miller .cra_flags = CRYPTO_ALG_TYPE_SHASH, 141*4ff28d4cSDavid S. Miller .cra_blocksize = SHA1_BLOCK_SIZE, 142*4ff28d4cSDavid S. Miller .cra_module = THIS_MODULE, 143*4ff28d4cSDavid S. Miller } 144*4ff28d4cSDavid S. Miller }; 145*4ff28d4cSDavid S. Miller 146*4ff28d4cSDavid S. Miller static bool __init sparc64_has_sha1_opcode(void) 147*4ff28d4cSDavid S. Miller { 148*4ff28d4cSDavid S. Miller unsigned long cfr; 149*4ff28d4cSDavid S. Miller 150*4ff28d4cSDavid S. Miller if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) 151*4ff28d4cSDavid S. Miller return false; 152*4ff28d4cSDavid S. Miller 153*4ff28d4cSDavid S. Miller __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); 154*4ff28d4cSDavid S. Miller if (!(cfr & CFR_SHA1)) 155*4ff28d4cSDavid S. Miller return false; 156*4ff28d4cSDavid S. Miller 157*4ff28d4cSDavid S. Miller return true; 158*4ff28d4cSDavid S. Miller } 159*4ff28d4cSDavid S. Miller 160*4ff28d4cSDavid S. Miller static int __init sha1_sparc64_mod_init(void) 161*4ff28d4cSDavid S. Miller { 162*4ff28d4cSDavid S. Miller if (sparc64_has_sha1_opcode()) { 163*4ff28d4cSDavid S. Miller pr_info("Using sparc64 sha1 opcode optimized SHA-1 implementation\n"); 164*4ff28d4cSDavid S. Miller return crypto_register_shash(&alg); 165*4ff28d4cSDavid S. Miller } 166*4ff28d4cSDavid S. Miller pr_info("sparc64 sha1 opcode not available.\n"); 167*4ff28d4cSDavid S. Miller return -ENODEV; 168*4ff28d4cSDavid S. Miller } 169*4ff28d4cSDavid S. Miller 170*4ff28d4cSDavid S. Miller static void __exit sha1_sparc64_mod_fini(void) 171*4ff28d4cSDavid S. Miller { 172*4ff28d4cSDavid S. Miller crypto_unregister_shash(&alg); 173*4ff28d4cSDavid S. Miller } 174*4ff28d4cSDavid S. Miller 175*4ff28d4cSDavid S. Miller module_init(sha1_sparc64_mod_init); 176*4ff28d4cSDavid S. Miller module_exit(sha1_sparc64_mod_fini); 177*4ff28d4cSDavid S. Miller 178*4ff28d4cSDavid S. Miller MODULE_LICENSE("GPL"); 179*4ff28d4cSDavid S. Miller MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); 180*4ff28d4cSDavid S. Miller 181*4ff28d4cSDavid S. Miller MODULE_ALIAS("sha1"); 182