1 /*- 2 * Copyright (c) 2021 The FreeBSD Foundation 3 * 4 * This software was developed by Andrew Turner under sponsorship from 5 * the FreeBSD Foundation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/types.h> 31 32 #include <arm_neon.h> 33 34 #include "sha512.h" 35 #include "sha512c_impl.h" 36 37 void __hidden 38 SHA512_Transform_arm64_impl(uint64_t * state, 39 const unsigned char block[SHA512_BLOCK_LENGTH], const uint64_t K[80]) 40 { 41 uint64x2_t W[8]; 42 uint64x2_t S[4]; 43 uint64x2_t S_start[4]; 44 uint64x2_t K_tmp, S_tmp; 45 int i; 46 47 #define A64_LOAD_W(x) \ 48 W[x] = vld1q_u64((const uint64_t *)(&block[(x) * 16])); \ 49 W[x] = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(W[x]))) 50 51 /* 1. Prepare the first part of the message schedule W. */ 52 A64_LOAD_W(0); 53 A64_LOAD_W(1); 54 A64_LOAD_W(2); 55 A64_LOAD_W(3); 56 A64_LOAD_W(4); 57 A64_LOAD_W(5); 58 A64_LOAD_W(6); 59 A64_LOAD_W(7); 60 61 /* 2. Initialize working variables. */ 62 S[0] = vld1q_u64(&state[0]); 63 S[1] = vld1q_u64(&state[2]); 64 S[2] = vld1q_u64(&state[4]); 65 S[3] = vld1q_u64(&state[6]); 66 67 S_start[0] = S[0]; 68 S_start[1] = S[1]; 69 S_start[2] = S[2]; 70 S_start[3] = S[3]; 71 72 /* 3. Mix. */ 73 for (i = 0; i < 80; i += 16) { 74 /* 75 * The schedule array has 4 vectors: 76 * ab = S[( 8 - i) % 4] 77 * cd = S[( 9 - i) % 4] 78 * ef = S[(10 - i) % 4] 79 * gh = S[(11 - i) % 4] 80 * 81 * The following maacro: 82 * - Loads the round constants 83 * - Add them to schedule words 84 * - Rotates the total to switch the order of the two halves 85 * so they are in the correct order for gh 86 * - Fix the alignment 87 * - Extract fg from ef and gh 88 * - Extract de from cd and ef 89 * - Pass these into the first part of the sha512 calculation 90 * to calculate the Sigma 1 and Ch steps 91 * - Calculate the Sigma 0 and Maj steps and store to gh 92 * - Add the first part to the cd vector 93 */ 94 #define A64_RNDr(S, W, i, ii) \ 95 K_tmp = vld1q_u64(K + (i * 2) + ii); \ 96 K_tmp = vaddq_u64(W[i], K_tmp); \ 97 K_tmp = vextq_u64(K_tmp, K_tmp, 1); \ 98 K_tmp = vaddq_u64(K_tmp, S[(11 - i) % 4]); \ 99 S_tmp = vsha512hq_u64(K_tmp, \ 100 vextq_u64(S[(10 - i) % 4], S[(11 - i) % 4], 1), \ 101 vextq_u64(S[(9 - i) % 4], S[(10 - i) % 4], 1)); \ 102 S[(11 - i) % 4] = vsha512h2q_u64(S_tmp, S[(9 - i) % 4], S[(8 - i) % 4]); \ 103 S[(9 - i) % 4] = vaddq_u64(S[(9 - i) % 4], S_tmp) 104 105 A64_RNDr(S, W, 0, i); 106 A64_RNDr(S, W, 1, i); 107 A64_RNDr(S, W, 2, i); 108 A64_RNDr(S, W, 3, i); 109 A64_RNDr(S, W, 4, i); 110 A64_RNDr(S, W, 5, i); 111 A64_RNDr(S, W, 6, i); 112 A64_RNDr(S, W, 7, i); 113 114 if (i == 64) 115 break; 116 117 /* 118 * Perform the Message schedule computation: 119 * - vsha512su0q_u64 performs the sigma 0 half and add it to 120 * the old value 121 * - vextq_u64 fixes the alignment of the vectors 122 * - vsha512su1q_u64 performs the sigma 1 half and adds it 123 * and both the above all together 124 */ 125 #define A64_MSCH(x) \ 126 W[x] = vsha512su1q_u64( \ 127 vsha512su0q_u64(W[x], W[(x + 1) % 8]), \ 128 W[(x + 7) % 8], \ 129 vextq_u64(W[(x + 4) % 8], W[(x + 5) % 8], 1)) 130 131 A64_MSCH(0); 132 A64_MSCH(1); 133 A64_MSCH(2); 134 A64_MSCH(3); 135 A64_MSCH(4); 136 A64_MSCH(5); 137 A64_MSCH(6); 138 A64_MSCH(7); 139 } 140 141 /* 4. Mix local working variables into global state */ 142 S[0] = vaddq_u64(S[0], S_start[0]); 143 S[1] = vaddq_u64(S[1], S_start[1]); 144 S[2] = vaddq_u64(S[2], S_start[2]); 145 S[3] = vaddq_u64(S[3], S_start[3]); 146 147 vst1q_u64(&state[0], S[0]); 148 vst1q_u64(&state[2], S[1]); 149 vst1q_u64(&state[4], S[2]); 150 vst1q_u64(&state[6], S[3]); 151 } 152