1*45818ee1SMatthew Ahrens /* 2*45818ee1SMatthew Ahrens * Implementation of the Skein block functions. 3*45818ee1SMatthew Ahrens * Source code author: Doug Whiting, 2008. 4*45818ee1SMatthew Ahrens * This algorithm and source code is released to the public domain. 5*45818ee1SMatthew Ahrens * Compile-time switches: 6*45818ee1SMatthew Ahrens * SKEIN_USE_ASM -- set bits (256/512/1024) to select which 7*45818ee1SMatthew Ahrens * versions use ASM code for block processing 8*45818ee1SMatthew Ahrens * [default: use C for all block sizes] 9*45818ee1SMatthew Ahrens */ 10*45818ee1SMatthew Ahrens /* Copyright 2013 Doug Whiting. This code is released to the public domain. */ 11*45818ee1SMatthew Ahrens 12*45818ee1SMatthew Ahrens #include <sys/skein.h> 13*45818ee1SMatthew Ahrens #include "skein_impl.h" 14*45818ee1SMatthew Ahrens 15*45818ee1SMatthew Ahrens #ifndef SKEIN_USE_ASM 16*45818ee1SMatthew Ahrens #define SKEIN_USE_ASM (0) /* default is all C code (no ASM) */ 17*45818ee1SMatthew Ahrens #endif 18*45818ee1SMatthew Ahrens 19*45818ee1SMatthew Ahrens #ifndef SKEIN_LOOP 20*45818ee1SMatthew Ahrens #define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */ 21*45818ee1SMatthew Ahrens #endif 22*45818ee1SMatthew Ahrens 23*45818ee1SMatthew Ahrens /* some useful definitions for code here */ 24*45818ee1SMatthew Ahrens #define BLK_BITS (WCNT*64) 25*45818ee1SMatthew Ahrens #define KW_TWK_BASE (0) 26*45818ee1SMatthew Ahrens #define KW_KEY_BASE (3) 27*45818ee1SMatthew Ahrens #define ks (kw + KW_KEY_BASE) 28*45818ee1SMatthew Ahrens #define ts (kw + KW_TWK_BASE) 29*45818ee1SMatthew Ahrens 30*45818ee1SMatthew Ahrens /* no debugging in Illumos version */ 31*45818ee1SMatthew Ahrens #define DebugSaveTweak(ctx) 32*45818ee1SMatthew Ahrens 33*45818ee1SMatthew Ahrens /* Skein_256 */ 34*45818ee1SMatthew Ahrens #if !(SKEIN_USE_ASM & 256) 35*45818ee1SMatthew Ahrens void 36*45818ee1SMatthew Ahrens Skein_256_Process_Block(Skein_256_Ctxt_t *ctx, const uint8_t *blkPtr, 37*45818ee1SMatthew Ahrens size_t blkCnt, size_t byteCntAdd) 38*45818ee1SMatthew Ahrens { /* do it in C */ 39*45818ee1SMatthew Ahrens enum { 40*45818ee1SMatthew Ahrens WCNT = SKEIN_256_STATE_WORDS 41*45818ee1SMatthew Ahrens }; 42*45818ee1SMatthew Ahrens #undef RCNT 43*45818ee1SMatthew Ahrens #define RCNT (SKEIN_256_ROUNDS_TOTAL / 8) 44*45818ee1SMatthew Ahrens 45*45818ee1SMatthew Ahrens #ifdef SKEIN_LOOP /* configure how much to unroll the loop */ 46*45818ee1SMatthew Ahrens #define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10) 47*45818ee1SMatthew Ahrens #else 48*45818ee1SMatthew Ahrens #define SKEIN_UNROLL_256 (0) 49*45818ee1SMatthew Ahrens #endif 50*45818ee1SMatthew Ahrens 51*45818ee1SMatthew Ahrens #if SKEIN_UNROLL_256 52*45818ee1SMatthew Ahrens #if (RCNT % SKEIN_UNROLL_256) 53*45818ee1SMatthew Ahrens #error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */ 54*45818ee1SMatthew Ahrens #endif 55*45818ee1SMatthew Ahrens size_t r; 56*45818ee1SMatthew Ahrens /* key schedule words : chaining vars + tweak + "rotation" */ 57*45818ee1SMatthew Ahrens uint64_t kw[WCNT + 4 + RCNT * 2]; 58*45818ee1SMatthew Ahrens #else 59*45818ee1SMatthew Ahrens uint64_t kw[WCNT + 4]; /* key schedule words : chaining vars + tweak */ 60*45818ee1SMatthew Ahrens #endif 61*45818ee1SMatthew Ahrens /* local copy of context vars, for speed */ 62*45818ee1SMatthew Ahrens uint64_t X0, X1, X2, X3; 63*45818ee1SMatthew Ahrens uint64_t w[WCNT]; /* local copy of input block */ 64*45818ee1SMatthew Ahrens #ifdef SKEIN_DEBUG 65*45818ee1SMatthew Ahrens /* use for debugging (help compiler put Xn in registers) */ 66*45818ee1SMatthew Ahrens const uint64_t *Xptr[4]; 67*45818ee1SMatthew Ahrens Xptr[0] = &X0; 68*45818ee1SMatthew Ahrens Xptr[1] = &X1; 69*45818ee1SMatthew Ahrens Xptr[2] = &X2; 70*45818ee1SMatthew Ahrens Xptr[3] = &X3; 71*45818ee1SMatthew Ahrens #endif 72*45818ee1SMatthew Ahrens Skein_assert(blkCnt != 0); /* never call with blkCnt == 0! */ 73*45818ee1SMatthew Ahrens ts[0] = ctx->h.T[0]; 74*45818ee1SMatthew Ahrens ts[1] = ctx->h.T[1]; 75*45818ee1SMatthew Ahrens do { 76*45818ee1SMatthew Ahrens /* 77*45818ee1SMatthew Ahrens * this implementation only supports 2**64 input bytes 78*45818ee1SMatthew Ahrens * (no carry out here) 79*45818ee1SMatthew Ahrens */ 80*45818ee1SMatthew Ahrens ts[0] += byteCntAdd; /* update processed length */ 81*45818ee1SMatthew Ahrens 82*45818ee1SMatthew Ahrens /* precompute the key schedule for this block */ 83*45818ee1SMatthew Ahrens ks[0] = ctx->X[0]; 84*45818ee1SMatthew Ahrens ks[1] = ctx->X[1]; 85*45818ee1SMatthew Ahrens ks[2] = ctx->X[2]; 86*45818ee1SMatthew Ahrens ks[3] = ctx->X[3]; 87*45818ee1SMatthew Ahrens ks[4] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ SKEIN_KS_PARITY; 88*45818ee1SMatthew Ahrens 89*45818ee1SMatthew Ahrens ts[2] = ts[0] ^ ts[1]; 90*45818ee1SMatthew Ahrens 91*45818ee1SMatthew Ahrens /* get input block in little-endian format */ 92*45818ee1SMatthew Ahrens Skein_Get64_LSB_First(w, blkPtr, WCNT); 93*45818ee1SMatthew Ahrens DebugSaveTweak(ctx); 94*45818ee1SMatthew Ahrens Skein_Show_Block(BLK_BITS, &ctx->h, ctx->X, blkPtr, w, ks, ts); 95*45818ee1SMatthew Ahrens 96*45818ee1SMatthew Ahrens X0 = w[0] + ks[0]; /* do the first full key injection */ 97*45818ee1SMatthew Ahrens X1 = w[1] + ks[1] + ts[0]; 98*45818ee1SMatthew Ahrens X2 = w[2] + ks[2] + ts[1]; 99*45818ee1SMatthew Ahrens X3 = w[3] + ks[3]; 100*45818ee1SMatthew Ahrens 101*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL, 102*45818ee1SMatthew Ahrens Xptr); /* show starting state values */ 103*45818ee1SMatthew Ahrens 104*45818ee1SMatthew Ahrens blkPtr += SKEIN_256_BLOCK_BYTES; 105*45818ee1SMatthew Ahrens 106*45818ee1SMatthew Ahrens /* run the rounds */ 107*45818ee1SMatthew Ahrens 108*45818ee1SMatthew Ahrens #define Round256(p0, p1, p2, p3, ROT, rNum) \ 109*45818ee1SMatthew Ahrens X##p0 += X##p1; X##p1 = RotL_64(X##p1, ROT##_0); X##p1 ^= X##p0; \ 110*45818ee1SMatthew Ahrens X##p2 += X##p3; X##p3 = RotL_64(X##p3, ROT##_1); X##p3 ^= X##p2; \ 111*45818ee1SMatthew Ahrens 112*45818ee1SMatthew Ahrens #if SKEIN_UNROLL_256 == 0 113*45818ee1SMatthew Ahrens #define R256(p0, p1, p2, p3, ROT, rNum) /* fully unrolled */ \ 114*45818ee1SMatthew Ahrens Round256(p0, p1, p2, p3, ROT, rNum) \ 115*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, rNum, Xptr); 116*45818ee1SMatthew Ahrens 117*45818ee1SMatthew Ahrens #define I256(R) \ 118*45818ee1SMatthew Ahrens X0 += ks[((R) + 1) % 5]; /* inject the key schedule value */ \ 119*45818ee1SMatthew Ahrens X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \ 120*45818ee1SMatthew Ahrens X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \ 121*45818ee1SMatthew Ahrens X3 += ks[((R) + 4) % 5] + (R) + 1; \ 122*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); 123*45818ee1SMatthew Ahrens #else /* looping version */ 124*45818ee1SMatthew Ahrens #define R256(p0, p1, p2, p3, ROT, rNum) \ 125*45818ee1SMatthew Ahrens Round256(p0, p1, p2, p3, ROT, rNum) \ 126*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rNum, Xptr); 127*45818ee1SMatthew Ahrens 128*45818ee1SMatthew Ahrens #define I256(R) \ 129*45818ee1SMatthew Ahrens X0 += ks[r + (R) + 0]; /* inject the key schedule value */ \ 130*45818ee1SMatthew Ahrens X1 += ks[r + (R) + 1] + ts[r + (R) + 0]; \ 131*45818ee1SMatthew Ahrens X2 += ks[r + (R) + 2] + ts[r + (R) + 1]; \ 132*45818ee1SMatthew Ahrens X3 += ks[r + (R) + 3] + r + (R); \ 133*45818ee1SMatthew Ahrens ks[r + (R) + 4] = ks[r + (R) - 1]; /* rotate key schedule */ \ 134*45818ee1SMatthew Ahrens ts[r + (R) + 2] = ts[r + (R) - 1]; \ 135*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); 136*45818ee1SMatthew Ahrens 137*45818ee1SMatthew Ahrens /* loop thru it */ 138*45818ee1SMatthew Ahrens for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_256) 139*45818ee1SMatthew Ahrens #endif 140*45818ee1SMatthew Ahrens { 141*45818ee1SMatthew Ahrens #define R256_8_rounds(R) \ 142*45818ee1SMatthew Ahrens R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \ 143*45818ee1SMatthew Ahrens R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \ 144*45818ee1SMatthew Ahrens R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \ 145*45818ee1SMatthew Ahrens R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \ 146*45818ee1SMatthew Ahrens I256(2 * (R)); \ 147*45818ee1SMatthew Ahrens R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \ 148*45818ee1SMatthew Ahrens R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \ 149*45818ee1SMatthew Ahrens R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \ 150*45818ee1SMatthew Ahrens R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \ 151*45818ee1SMatthew Ahrens I256(2 * (R) + 1); 152*45818ee1SMatthew Ahrens 153*45818ee1SMatthew Ahrens R256_8_rounds(0); 154*45818ee1SMatthew Ahrens 155*45818ee1SMatthew Ahrens #define R256_Unroll_R(NN) \ 156*45818ee1SMatthew Ahrens ((SKEIN_UNROLL_256 == 0 && SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \ 157*45818ee1SMatthew Ahrens (SKEIN_UNROLL_256 > (NN))) 158*45818ee1SMatthew Ahrens 159*45818ee1SMatthew Ahrens #if R256_Unroll_R(1) 160*45818ee1SMatthew Ahrens R256_8_rounds(1); 161*45818ee1SMatthew Ahrens #endif 162*45818ee1SMatthew Ahrens #if R256_Unroll_R(2) 163*45818ee1SMatthew Ahrens R256_8_rounds(2); 164*45818ee1SMatthew Ahrens #endif 165*45818ee1SMatthew Ahrens #if R256_Unroll_R(3) 166*45818ee1SMatthew Ahrens R256_8_rounds(3); 167*45818ee1SMatthew Ahrens #endif 168*45818ee1SMatthew Ahrens #if R256_Unroll_R(4) 169*45818ee1SMatthew Ahrens R256_8_rounds(4); 170*45818ee1SMatthew Ahrens #endif 171*45818ee1SMatthew Ahrens #if R256_Unroll_R(5) 172*45818ee1SMatthew Ahrens R256_8_rounds(5); 173*45818ee1SMatthew Ahrens #endif 174*45818ee1SMatthew Ahrens #if R256_Unroll_R(6) 175*45818ee1SMatthew Ahrens R256_8_rounds(6); 176*45818ee1SMatthew Ahrens #endif 177*45818ee1SMatthew Ahrens #if R256_Unroll_R(7) 178*45818ee1SMatthew Ahrens R256_8_rounds(7); 179*45818ee1SMatthew Ahrens #endif 180*45818ee1SMatthew Ahrens #if R256_Unroll_R(8) 181*45818ee1SMatthew Ahrens R256_8_rounds(8); 182*45818ee1SMatthew Ahrens #endif 183*45818ee1SMatthew Ahrens #if R256_Unroll_R(9) 184*45818ee1SMatthew Ahrens R256_8_rounds(9); 185*45818ee1SMatthew Ahrens #endif 186*45818ee1SMatthew Ahrens #if R256_Unroll_R(10) 187*45818ee1SMatthew Ahrens R256_8_rounds(10); 188*45818ee1SMatthew Ahrens #endif 189*45818ee1SMatthew Ahrens #if R256_Unroll_R(11) 190*45818ee1SMatthew Ahrens R256_8_rounds(11); 191*45818ee1SMatthew Ahrens #endif 192*45818ee1SMatthew Ahrens #if R256_Unroll_R(12) 193*45818ee1SMatthew Ahrens R256_8_rounds(12); 194*45818ee1SMatthew Ahrens #endif 195*45818ee1SMatthew Ahrens #if R256_Unroll_R(13) 196*45818ee1SMatthew Ahrens R256_8_rounds(13); 197*45818ee1SMatthew Ahrens #endif 198*45818ee1SMatthew Ahrens #if R256_Unroll_R(14) 199*45818ee1SMatthew Ahrens R256_8_rounds(14); 200*45818ee1SMatthew Ahrens #endif 201*45818ee1SMatthew Ahrens #if (SKEIN_UNROLL_256 > 14) 202*45818ee1SMatthew Ahrens #error "need more unrolling in Skein_256_Process_Block" 203*45818ee1SMatthew Ahrens #endif 204*45818ee1SMatthew Ahrens } 205*45818ee1SMatthew Ahrens /* 206*45818ee1SMatthew Ahrens * do the final "feedforward" xor, update context chaining vars 207*45818ee1SMatthew Ahrens */ 208*45818ee1SMatthew Ahrens ctx->X[0] = X0 ^ w[0]; 209*45818ee1SMatthew Ahrens ctx->X[1] = X1 ^ w[1]; 210*45818ee1SMatthew Ahrens ctx->X[2] = X2 ^ w[2]; 211*45818ee1SMatthew Ahrens ctx->X[3] = X3 ^ w[3]; 212*45818ee1SMatthew Ahrens 213*45818ee1SMatthew Ahrens Skein_Show_Round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->X); 214*45818ee1SMatthew Ahrens 215*45818ee1SMatthew Ahrens ts[1] &= ~SKEIN_T1_FLAG_FIRST; 216*45818ee1SMatthew Ahrens } 217*45818ee1SMatthew Ahrens while (--blkCnt); 218*45818ee1SMatthew Ahrens ctx->h.T[0] = ts[0]; 219*45818ee1SMatthew Ahrens ctx->h.T[1] = ts[1]; 220*45818ee1SMatthew Ahrens } 221*45818ee1SMatthew Ahrens 222*45818ee1SMatthew Ahrens #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) 223*45818ee1SMatthew Ahrens size_t 224*45818ee1SMatthew Ahrens Skein_256_Process_Block_CodeSize(void) 225*45818ee1SMatthew Ahrens { 226*45818ee1SMatthew Ahrens return ((uint8_t *)Skein_256_Process_Block_CodeSize) - 227*45818ee1SMatthew Ahrens ((uint8_t *)Skein_256_Process_Block); 228*45818ee1SMatthew Ahrens } 229*45818ee1SMatthew Ahrens 230*45818ee1SMatthew Ahrens uint_t 231*45818ee1SMatthew Ahrens Skein_256_Unroll_Cnt(void) 232*45818ee1SMatthew Ahrens { 233*45818ee1SMatthew Ahrens return (SKEIN_UNROLL_256); 234*45818ee1SMatthew Ahrens } 235*45818ee1SMatthew Ahrens #endif 236*45818ee1SMatthew Ahrens #endif 237*45818ee1SMatthew Ahrens 238*45818ee1SMatthew Ahrens /* Skein_512 */ 239*45818ee1SMatthew Ahrens #if !(SKEIN_USE_ASM & 512) 240*45818ee1SMatthew Ahrens void 241*45818ee1SMatthew Ahrens Skein_512_Process_Block(Skein_512_Ctxt_t *ctx, const uint8_t *blkPtr, 242*45818ee1SMatthew Ahrens size_t blkCnt, size_t byteCntAdd) 243*45818ee1SMatthew Ahrens { /* do it in C */ 244*45818ee1SMatthew Ahrens enum { 245*45818ee1SMatthew Ahrens WCNT = SKEIN_512_STATE_WORDS 246*45818ee1SMatthew Ahrens }; 247*45818ee1SMatthew Ahrens #undef RCNT 248*45818ee1SMatthew Ahrens #define RCNT (SKEIN_512_ROUNDS_TOTAL / 8) 249*45818ee1SMatthew Ahrens 250*45818ee1SMatthew Ahrens #ifdef SKEIN_LOOP /* configure how much to unroll the loop */ 251*45818ee1SMatthew Ahrens #define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10) 252*45818ee1SMatthew Ahrens #else 253*45818ee1SMatthew Ahrens #define SKEIN_UNROLL_512 (0) 254*45818ee1SMatthew Ahrens #endif 255*45818ee1SMatthew Ahrens 256*45818ee1SMatthew Ahrens #if SKEIN_UNROLL_512 257*45818ee1SMatthew Ahrens #if (RCNT % SKEIN_UNROLL_512) 258*45818ee1SMatthew Ahrens #error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */ 259*45818ee1SMatthew Ahrens #endif 260*45818ee1SMatthew Ahrens size_t r; 261*45818ee1SMatthew Ahrens /* key schedule words : chaining vars + tweak + "rotation" */ 262*45818ee1SMatthew Ahrens uint64_t kw[WCNT + 4 + RCNT * 2]; 263*45818ee1SMatthew Ahrens #else 264*45818ee1SMatthew Ahrens uint64_t kw[WCNT + 4]; /* key schedule words : chaining vars + tweak */ 265*45818ee1SMatthew Ahrens #endif 266*45818ee1SMatthew Ahrens /* local copy of vars, for speed */ 267*45818ee1SMatthew Ahrens uint64_t X0, X1, X2, X3, X4, X5, X6, X7; 268*45818ee1SMatthew Ahrens uint64_t w[WCNT]; /* local copy of input block */ 269*45818ee1SMatthew Ahrens #ifdef SKEIN_DEBUG 270*45818ee1SMatthew Ahrens /* use for debugging (help compiler put Xn in registers) */ 271*45818ee1SMatthew Ahrens const uint64_t *Xptr[8]; 272*45818ee1SMatthew Ahrens Xptr[0] = &X0; 273*45818ee1SMatthew Ahrens Xptr[1] = &X1; 274*45818ee1SMatthew Ahrens Xptr[2] = &X2; 275*45818ee1SMatthew Ahrens Xptr[3] = &X3; 276*45818ee1SMatthew Ahrens Xptr[4] = &X4; 277*45818ee1SMatthew Ahrens Xptr[5] = &X5; 278*45818ee1SMatthew Ahrens Xptr[6] = &X6; 279*45818ee1SMatthew Ahrens Xptr[7] = &X7; 280*45818ee1SMatthew Ahrens #endif 281*45818ee1SMatthew Ahrens 282*45818ee1SMatthew Ahrens Skein_assert(blkCnt != 0); /* never call with blkCnt == 0! */ 283*45818ee1SMatthew Ahrens ts[0] = ctx->h.T[0]; 284*45818ee1SMatthew Ahrens ts[1] = ctx->h.T[1]; 285*45818ee1SMatthew Ahrens do { 286*45818ee1SMatthew Ahrens /* 287*45818ee1SMatthew Ahrens * this implementation only supports 2**64 input bytes 288*45818ee1SMatthew Ahrens * (no carry out here) 289*45818ee1SMatthew Ahrens */ 290*45818ee1SMatthew Ahrens ts[0] += byteCntAdd; /* update processed length */ 291*45818ee1SMatthew Ahrens 292*45818ee1SMatthew Ahrens /* precompute the key schedule for this block */ 293*45818ee1SMatthew Ahrens ks[0] = ctx->X[0]; 294*45818ee1SMatthew Ahrens ks[1] = ctx->X[1]; 295*45818ee1SMatthew Ahrens ks[2] = ctx->X[2]; 296*45818ee1SMatthew Ahrens ks[3] = ctx->X[3]; 297*45818ee1SMatthew Ahrens ks[4] = ctx->X[4]; 298*45818ee1SMatthew Ahrens ks[5] = ctx->X[5]; 299*45818ee1SMatthew Ahrens ks[6] = ctx->X[6]; 300*45818ee1SMatthew Ahrens ks[7] = ctx->X[7]; 301*45818ee1SMatthew Ahrens ks[8] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ 302*45818ee1SMatthew Ahrens ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^ SKEIN_KS_PARITY; 303*45818ee1SMatthew Ahrens 304*45818ee1SMatthew Ahrens ts[2] = ts[0] ^ ts[1]; 305*45818ee1SMatthew Ahrens 306*45818ee1SMatthew Ahrens /* get input block in little-endian format */ 307*45818ee1SMatthew Ahrens Skein_Get64_LSB_First(w, blkPtr, WCNT); 308*45818ee1SMatthew Ahrens DebugSaveTweak(ctx); 309*45818ee1SMatthew Ahrens Skein_Show_Block(BLK_BITS, &ctx->h, ctx->X, blkPtr, w, ks, ts); 310*45818ee1SMatthew Ahrens 311*45818ee1SMatthew Ahrens X0 = w[0] + ks[0]; /* do the first full key injection */ 312*45818ee1SMatthew Ahrens X1 = w[1] + ks[1]; 313*45818ee1SMatthew Ahrens X2 = w[2] + ks[2]; 314*45818ee1SMatthew Ahrens X3 = w[3] + ks[3]; 315*45818ee1SMatthew Ahrens X4 = w[4] + ks[4]; 316*45818ee1SMatthew Ahrens X5 = w[5] + ks[5] + ts[0]; 317*45818ee1SMatthew Ahrens X6 = w[6] + ks[6] + ts[1]; 318*45818ee1SMatthew Ahrens X7 = w[7] + ks[7]; 319*45818ee1SMatthew Ahrens 320*45818ee1SMatthew Ahrens blkPtr += SKEIN_512_BLOCK_BYTES; 321*45818ee1SMatthew Ahrens 322*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL, 323*45818ee1SMatthew Ahrens Xptr); 324*45818ee1SMatthew Ahrens /* run the rounds */ 325*45818ee1SMatthew Ahrens #define Round512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) \ 326*45818ee1SMatthew Ahrens X##p0 += X##p1; X##p1 = RotL_64(X##p1, ROT##_0); X##p1 ^= X##p0;\ 327*45818ee1SMatthew Ahrens X##p2 += X##p3; X##p3 = RotL_64(X##p3, ROT##_1); X##p3 ^= X##p2;\ 328*45818ee1SMatthew Ahrens X##p4 += X##p5; X##p5 = RotL_64(X##p5, ROT##_2); X##p5 ^= X##p4;\ 329*45818ee1SMatthew Ahrens X##p6 += X##p7; X##p7 = RotL_64(X##p7, ROT##_3); X##p7 ^= X##p6; 330*45818ee1SMatthew Ahrens 331*45818ee1SMatthew Ahrens #if SKEIN_UNROLL_512 == 0 332*45818ee1SMatthew Ahrens #define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) /* unrolled */ \ 333*45818ee1SMatthew Ahrens Round512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) \ 334*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, rNum, Xptr); 335*45818ee1SMatthew Ahrens 336*45818ee1SMatthew Ahrens #define I512(R) \ 337*45818ee1SMatthew Ahrens X0 += ks[((R) + 1) % 9]; /* inject the key schedule value */\ 338*45818ee1SMatthew Ahrens X1 += ks[((R) + 2) % 9]; \ 339*45818ee1SMatthew Ahrens X2 += ks[((R) + 3) % 9]; \ 340*45818ee1SMatthew Ahrens X3 += ks[((R) + 4) % 9]; \ 341*45818ee1SMatthew Ahrens X4 += ks[((R) + 5) % 9]; \ 342*45818ee1SMatthew Ahrens X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \ 343*45818ee1SMatthew Ahrens X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \ 344*45818ee1SMatthew Ahrens X7 += ks[((R) + 8) % 9] + (R) + 1; \ 345*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); 346*45818ee1SMatthew Ahrens #else /* looping version */ 347*45818ee1SMatthew Ahrens #define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) \ 348*45818ee1SMatthew Ahrens Round512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) \ 349*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rNum, Xptr); 350*45818ee1SMatthew Ahrens 351*45818ee1SMatthew Ahrens #define I512(R) \ 352*45818ee1SMatthew Ahrens X0 += ks[r + (R) + 0]; /* inject the key schedule value */ \ 353*45818ee1SMatthew Ahrens X1 += ks[r + (R) + 1]; \ 354*45818ee1SMatthew Ahrens X2 += ks[r + (R) + 2]; \ 355*45818ee1SMatthew Ahrens X3 += ks[r + (R) + 3]; \ 356*45818ee1SMatthew Ahrens X4 += ks[r + (R) + 4]; \ 357*45818ee1SMatthew Ahrens X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \ 358*45818ee1SMatthew Ahrens X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \ 359*45818ee1SMatthew Ahrens X7 += ks[r + (R) + 7] + r + (R); \ 360*45818ee1SMatthew Ahrens ks[r + (R)+8] = ks[r + (R) - 1]; /* rotate key schedule */\ 361*45818ee1SMatthew Ahrens ts[r + (R)+2] = ts[r + (R) - 1]; \ 362*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); 363*45818ee1SMatthew Ahrens 364*45818ee1SMatthew Ahrens /* loop thru it */ 365*45818ee1SMatthew Ahrens for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_512) 366*45818ee1SMatthew Ahrens #endif /* end of looped code definitions */ 367*45818ee1SMatthew Ahrens { 368*45818ee1SMatthew Ahrens #define R512_8_rounds(R) /* do 8 full rounds */ \ 369*45818ee1SMatthew Ahrens R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \ 370*45818ee1SMatthew Ahrens R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \ 371*45818ee1SMatthew Ahrens R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \ 372*45818ee1SMatthew Ahrens R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \ 373*45818ee1SMatthew Ahrens I512(2 * (R)); \ 374*45818ee1SMatthew Ahrens R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \ 375*45818ee1SMatthew Ahrens R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \ 376*45818ee1SMatthew Ahrens R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \ 377*45818ee1SMatthew Ahrens R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \ 378*45818ee1SMatthew Ahrens I512(2*(R) + 1); /* and key injection */ 379*45818ee1SMatthew Ahrens 380*45818ee1SMatthew Ahrens R512_8_rounds(0); 381*45818ee1SMatthew Ahrens 382*45818ee1SMatthew Ahrens #define R512_Unroll_R(NN) \ 383*45818ee1SMatthew Ahrens ((SKEIN_UNROLL_512 == 0 && SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \ 384*45818ee1SMatthew Ahrens (SKEIN_UNROLL_512 > (NN))) 385*45818ee1SMatthew Ahrens 386*45818ee1SMatthew Ahrens #if R512_Unroll_R(1) 387*45818ee1SMatthew Ahrens R512_8_rounds(1); 388*45818ee1SMatthew Ahrens #endif 389*45818ee1SMatthew Ahrens #if R512_Unroll_R(2) 390*45818ee1SMatthew Ahrens R512_8_rounds(2); 391*45818ee1SMatthew Ahrens #endif 392*45818ee1SMatthew Ahrens #if R512_Unroll_R(3) 393*45818ee1SMatthew Ahrens R512_8_rounds(3); 394*45818ee1SMatthew Ahrens #endif 395*45818ee1SMatthew Ahrens #if R512_Unroll_R(4) 396*45818ee1SMatthew Ahrens R512_8_rounds(4); 397*45818ee1SMatthew Ahrens #endif 398*45818ee1SMatthew Ahrens #if R512_Unroll_R(5) 399*45818ee1SMatthew Ahrens R512_8_rounds(5); 400*45818ee1SMatthew Ahrens #endif 401*45818ee1SMatthew Ahrens #if R512_Unroll_R(6) 402*45818ee1SMatthew Ahrens R512_8_rounds(6); 403*45818ee1SMatthew Ahrens #endif 404*45818ee1SMatthew Ahrens #if R512_Unroll_R(7) 405*45818ee1SMatthew Ahrens R512_8_rounds(7); 406*45818ee1SMatthew Ahrens #endif 407*45818ee1SMatthew Ahrens #if R512_Unroll_R(8) 408*45818ee1SMatthew Ahrens R512_8_rounds(8); 409*45818ee1SMatthew Ahrens #endif 410*45818ee1SMatthew Ahrens #if R512_Unroll_R(9) 411*45818ee1SMatthew Ahrens R512_8_rounds(9); 412*45818ee1SMatthew Ahrens #endif 413*45818ee1SMatthew Ahrens #if R512_Unroll_R(10) 414*45818ee1SMatthew Ahrens R512_8_rounds(10); 415*45818ee1SMatthew Ahrens #endif 416*45818ee1SMatthew Ahrens #if R512_Unroll_R(11) 417*45818ee1SMatthew Ahrens R512_8_rounds(11); 418*45818ee1SMatthew Ahrens #endif 419*45818ee1SMatthew Ahrens #if R512_Unroll_R(12) 420*45818ee1SMatthew Ahrens R512_8_rounds(12); 421*45818ee1SMatthew Ahrens #endif 422*45818ee1SMatthew Ahrens #if R512_Unroll_R(13) 423*45818ee1SMatthew Ahrens R512_8_rounds(13); 424*45818ee1SMatthew Ahrens #endif 425*45818ee1SMatthew Ahrens #if R512_Unroll_R(14) 426*45818ee1SMatthew Ahrens R512_8_rounds(14); 427*45818ee1SMatthew Ahrens #endif 428*45818ee1SMatthew Ahrens #if (SKEIN_UNROLL_512 > 14) 429*45818ee1SMatthew Ahrens #error "need more unrolling in Skein_512_Process_Block" 430*45818ee1SMatthew Ahrens #endif 431*45818ee1SMatthew Ahrens } 432*45818ee1SMatthew Ahrens 433*45818ee1SMatthew Ahrens /* 434*45818ee1SMatthew Ahrens * do the final "feedforward" xor, update context chaining vars 435*45818ee1SMatthew Ahrens */ 436*45818ee1SMatthew Ahrens ctx->X[0] = X0 ^ w[0]; 437*45818ee1SMatthew Ahrens ctx->X[1] = X1 ^ w[1]; 438*45818ee1SMatthew Ahrens ctx->X[2] = X2 ^ w[2]; 439*45818ee1SMatthew Ahrens ctx->X[3] = X3 ^ w[3]; 440*45818ee1SMatthew Ahrens ctx->X[4] = X4 ^ w[4]; 441*45818ee1SMatthew Ahrens ctx->X[5] = X5 ^ w[5]; 442*45818ee1SMatthew Ahrens ctx->X[6] = X6 ^ w[6]; 443*45818ee1SMatthew Ahrens ctx->X[7] = X7 ^ w[7]; 444*45818ee1SMatthew Ahrens Skein_Show_Round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->X); 445*45818ee1SMatthew Ahrens 446*45818ee1SMatthew Ahrens ts[1] &= ~SKEIN_T1_FLAG_FIRST; 447*45818ee1SMatthew Ahrens } 448*45818ee1SMatthew Ahrens while (--blkCnt); 449*45818ee1SMatthew Ahrens ctx->h.T[0] = ts[0]; 450*45818ee1SMatthew Ahrens ctx->h.T[1] = ts[1]; 451*45818ee1SMatthew Ahrens } 452*45818ee1SMatthew Ahrens 453*45818ee1SMatthew Ahrens #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) 454*45818ee1SMatthew Ahrens size_t 455*45818ee1SMatthew Ahrens Skein_512_Process_Block_CodeSize(void) 456*45818ee1SMatthew Ahrens { 457*45818ee1SMatthew Ahrens return ((uint8_t *)Skein_512_Process_Block_CodeSize) - 458*45818ee1SMatthew Ahrens ((uint8_t *)Skein_512_Process_Block); 459*45818ee1SMatthew Ahrens } 460*45818ee1SMatthew Ahrens 461*45818ee1SMatthew Ahrens uint_t 462*45818ee1SMatthew Ahrens Skein_512_Unroll_Cnt(void) 463*45818ee1SMatthew Ahrens { 464*45818ee1SMatthew Ahrens return (SKEIN_UNROLL_512); 465*45818ee1SMatthew Ahrens } 466*45818ee1SMatthew Ahrens #endif 467*45818ee1SMatthew Ahrens #endif 468*45818ee1SMatthew Ahrens 469*45818ee1SMatthew Ahrens /* Skein1024 */ 470*45818ee1SMatthew Ahrens #if !(SKEIN_USE_ASM & 1024) 471*45818ee1SMatthew Ahrens void 472*45818ee1SMatthew Ahrens Skein1024_Process_Block(Skein1024_Ctxt_t *ctx, const uint8_t *blkPtr, 473*45818ee1SMatthew Ahrens size_t blkCnt, size_t byteCntAdd) 474*45818ee1SMatthew Ahrens { 475*45818ee1SMatthew Ahrens /* do it in C, always looping (unrolled is bigger AND slower!) */ 476*45818ee1SMatthew Ahrens enum { 477*45818ee1SMatthew Ahrens WCNT = SKEIN1024_STATE_WORDS 478*45818ee1SMatthew Ahrens }; 479*45818ee1SMatthew Ahrens #undef RCNT 480*45818ee1SMatthew Ahrens #define RCNT (SKEIN1024_ROUNDS_TOTAL/8) 481*45818ee1SMatthew Ahrens 482*45818ee1SMatthew Ahrens #ifdef SKEIN_LOOP /* configure how much to unroll the loop */ 483*45818ee1SMatthew Ahrens #define SKEIN_UNROLL_1024 ((SKEIN_LOOP)%10) 484*45818ee1SMatthew Ahrens #else 485*45818ee1SMatthew Ahrens #define SKEIN_UNROLL_1024 (0) 486*45818ee1SMatthew Ahrens #endif 487*45818ee1SMatthew Ahrens 488*45818ee1SMatthew Ahrens #if (SKEIN_UNROLL_1024 != 0) 489*45818ee1SMatthew Ahrens #if (RCNT % SKEIN_UNROLL_1024) 490*45818ee1SMatthew Ahrens #error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */ 491*45818ee1SMatthew Ahrens #endif 492*45818ee1SMatthew Ahrens size_t r; 493*45818ee1SMatthew Ahrens /* key schedule words : chaining vars + tweak + "rotation" */ 494*45818ee1SMatthew Ahrens uint64_t kw[WCNT + 4 + RCNT * 2]; 495*45818ee1SMatthew Ahrens #else 496*45818ee1SMatthew Ahrens uint64_t kw[WCNT + 4]; /* key schedule words : chaining vars + tweak */ 497*45818ee1SMatthew Ahrens #endif 498*45818ee1SMatthew Ahrens 499*45818ee1SMatthew Ahrens /* local copy of vars, for speed */ 500*45818ee1SMatthew Ahrens uint64_t X00, X01, X02, X03, X04, X05, X06, X07, X08, X09, X10, X11, 501*45818ee1SMatthew Ahrens X12, X13, X14, X15; 502*45818ee1SMatthew Ahrens uint64_t w[WCNT]; /* local copy of input block */ 503*45818ee1SMatthew Ahrens #ifdef SKEIN_DEBUG 504*45818ee1SMatthew Ahrens /* use for debugging (help compiler put Xn in registers) */ 505*45818ee1SMatthew Ahrens const uint64_t *Xptr[16]; 506*45818ee1SMatthew Ahrens Xptr[0] = &X00; 507*45818ee1SMatthew Ahrens Xptr[1] = &X01; 508*45818ee1SMatthew Ahrens Xptr[2] = &X02; 509*45818ee1SMatthew Ahrens Xptr[3] = &X03; 510*45818ee1SMatthew Ahrens Xptr[4] = &X04; 511*45818ee1SMatthew Ahrens Xptr[5] = &X05; 512*45818ee1SMatthew Ahrens Xptr[6] = &X06; 513*45818ee1SMatthew Ahrens Xptr[7] = &X07; 514*45818ee1SMatthew Ahrens Xptr[8] = &X08; 515*45818ee1SMatthew Ahrens Xptr[9] = &X09; 516*45818ee1SMatthew Ahrens Xptr[10] = &X10; 517*45818ee1SMatthew Ahrens Xptr[11] = &X11; 518*45818ee1SMatthew Ahrens Xptr[12] = &X12; 519*45818ee1SMatthew Ahrens Xptr[13] = &X13; 520*45818ee1SMatthew Ahrens Xptr[14] = &X14; 521*45818ee1SMatthew Ahrens Xptr[15] = &X15; 522*45818ee1SMatthew Ahrens #endif 523*45818ee1SMatthew Ahrens 524*45818ee1SMatthew Ahrens Skein_assert(blkCnt != 0); /* never call with blkCnt == 0! */ 525*45818ee1SMatthew Ahrens ts[0] = ctx->h.T[0]; 526*45818ee1SMatthew Ahrens ts[1] = ctx->h.T[1]; 527*45818ee1SMatthew Ahrens do { 528*45818ee1SMatthew Ahrens /* 529*45818ee1SMatthew Ahrens * this implementation only supports 2**64 input bytes 530*45818ee1SMatthew Ahrens * (no carry out here) 531*45818ee1SMatthew Ahrens */ 532*45818ee1SMatthew Ahrens ts[0] += byteCntAdd; /* update processed length */ 533*45818ee1SMatthew Ahrens 534*45818ee1SMatthew Ahrens /* precompute the key schedule for this block */ 535*45818ee1SMatthew Ahrens ks[0] = ctx->X[0]; 536*45818ee1SMatthew Ahrens ks[1] = ctx->X[1]; 537*45818ee1SMatthew Ahrens ks[2] = ctx->X[2]; 538*45818ee1SMatthew Ahrens ks[3] = ctx->X[3]; 539*45818ee1SMatthew Ahrens ks[4] = ctx->X[4]; 540*45818ee1SMatthew Ahrens ks[5] = ctx->X[5]; 541*45818ee1SMatthew Ahrens ks[6] = ctx->X[6]; 542*45818ee1SMatthew Ahrens ks[7] = ctx->X[7]; 543*45818ee1SMatthew Ahrens ks[8] = ctx->X[8]; 544*45818ee1SMatthew Ahrens ks[9] = ctx->X[9]; 545*45818ee1SMatthew Ahrens ks[10] = ctx->X[10]; 546*45818ee1SMatthew Ahrens ks[11] = ctx->X[11]; 547*45818ee1SMatthew Ahrens ks[12] = ctx->X[12]; 548*45818ee1SMatthew Ahrens ks[13] = ctx->X[13]; 549*45818ee1SMatthew Ahrens ks[14] = ctx->X[14]; 550*45818ee1SMatthew Ahrens ks[15] = ctx->X[15]; 551*45818ee1SMatthew Ahrens ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ 552*45818ee1SMatthew Ahrens ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^ 553*45818ee1SMatthew Ahrens ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^ 554*45818ee1SMatthew Ahrens ks[12] ^ ks[13] ^ ks[14] ^ ks[15] ^ SKEIN_KS_PARITY; 555*45818ee1SMatthew Ahrens 556*45818ee1SMatthew Ahrens ts[2] = ts[0] ^ ts[1]; 557*45818ee1SMatthew Ahrens 558*45818ee1SMatthew Ahrens /* get input block in little-endian format */ 559*45818ee1SMatthew Ahrens Skein_Get64_LSB_First(w, blkPtr, WCNT); 560*45818ee1SMatthew Ahrens DebugSaveTweak(ctx); 561*45818ee1SMatthew Ahrens Skein_Show_Block(BLK_BITS, &ctx->h, ctx->X, blkPtr, w, ks, ts); 562*45818ee1SMatthew Ahrens 563*45818ee1SMatthew Ahrens X00 = w[0] + ks[0]; /* do the first full key injection */ 564*45818ee1SMatthew Ahrens X01 = w[1] + ks[1]; 565*45818ee1SMatthew Ahrens X02 = w[2] + ks[2]; 566*45818ee1SMatthew Ahrens X03 = w[3] + ks[3]; 567*45818ee1SMatthew Ahrens X04 = w[4] + ks[4]; 568*45818ee1SMatthew Ahrens X05 = w[5] + ks[5]; 569*45818ee1SMatthew Ahrens X06 = w[6] + ks[6]; 570*45818ee1SMatthew Ahrens X07 = w[7] + ks[7]; 571*45818ee1SMatthew Ahrens X08 = w[8] + ks[8]; 572*45818ee1SMatthew Ahrens X09 = w[9] + ks[9]; 573*45818ee1SMatthew Ahrens X10 = w[10] + ks[10]; 574*45818ee1SMatthew Ahrens X11 = w[11] + ks[11]; 575*45818ee1SMatthew Ahrens X12 = w[12] + ks[12]; 576*45818ee1SMatthew Ahrens X13 = w[13] + ks[13] + ts[0]; 577*45818ee1SMatthew Ahrens X14 = w[14] + ks[14] + ts[1]; 578*45818ee1SMatthew Ahrens X15 = w[15] + ks[15]; 579*45818ee1SMatthew Ahrens 580*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL, 581*45818ee1SMatthew Ahrens Xptr); 582*45818ee1SMatthew Ahrens 583*45818ee1SMatthew Ahrens #define Round1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, \ 584*45818ee1SMatthew Ahrens pD, pE, pF, ROT, rNum) \ 585*45818ee1SMatthew Ahrens X##p0 += X##p1; X##p1 = RotL_64(X##p1, ROT##_0); X##p1 ^= X##p0;\ 586*45818ee1SMatthew Ahrens X##p2 += X##p3; X##p3 = RotL_64(X##p3, ROT##_1); X##p3 ^= X##p2;\ 587*45818ee1SMatthew Ahrens X##p4 += X##p5; X##p5 = RotL_64(X##p5, ROT##_2); X##p5 ^= X##p4;\ 588*45818ee1SMatthew Ahrens X##p6 += X##p7; X##p7 = RotL_64(X##p7, ROT##_3); X##p7 ^= X##p6;\ 589*45818ee1SMatthew Ahrens X##p8 += X##p9; X##p9 = RotL_64(X##p9, ROT##_4); X##p9 ^= X##p8;\ 590*45818ee1SMatthew Ahrens X##pA += X##pB; X##pB = RotL_64(X##pB, ROT##_5); X##pB ^= X##pA;\ 591*45818ee1SMatthew Ahrens X##pC += X##pD; X##pD = RotL_64(X##pD, ROT##_6); X##pD ^= X##pC;\ 592*45818ee1SMatthew Ahrens X##pE += X##pF; X##pF = RotL_64(X##pF, ROT##_7); X##pF ^= X##pE; 593*45818ee1SMatthew Ahrens 594*45818ee1SMatthew Ahrens #if SKEIN_UNROLL_1024 == 0 595*45818ee1SMatthew Ahrens #define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, \ 596*45818ee1SMatthew Ahrens pE, pF, ROT, rn) \ 597*45818ee1SMatthew Ahrens Round1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, \ 598*45818ee1SMatthew Ahrens pD, pE, pF, ROT, rn) \ 599*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, rn, Xptr); 600*45818ee1SMatthew Ahrens 601*45818ee1SMatthew Ahrens #define I1024(R) \ 602*45818ee1SMatthew Ahrens X00 += ks[((R) + 1) % 17]; /* inject the key schedule value */\ 603*45818ee1SMatthew Ahrens X01 += ks[((R) + 2) % 17]; \ 604*45818ee1SMatthew Ahrens X02 += ks[((R) + 3) % 17]; \ 605*45818ee1SMatthew Ahrens X03 += ks[((R) + 4) % 17]; \ 606*45818ee1SMatthew Ahrens X04 += ks[((R) + 5) % 17]; \ 607*45818ee1SMatthew Ahrens X05 += ks[((R) + 6) % 17]; \ 608*45818ee1SMatthew Ahrens X06 += ks[((R) + 7) % 17]; \ 609*45818ee1SMatthew Ahrens X07 += ks[((R) + 8) % 17]; \ 610*45818ee1SMatthew Ahrens X08 += ks[((R) + 9) % 17]; \ 611*45818ee1SMatthew Ahrens X09 += ks[((R) + 10) % 17]; \ 612*45818ee1SMatthew Ahrens X10 += ks[((R) + 11) % 17]; \ 613*45818ee1SMatthew Ahrens X11 += ks[((R) + 12) % 17]; \ 614*45818ee1SMatthew Ahrens X12 += ks[((R) + 13) % 17]; \ 615*45818ee1SMatthew Ahrens X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \ 616*45818ee1SMatthew Ahrens X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \ 617*45818ee1SMatthew Ahrens X15 += ks[((R) + 16) % 17] + (R) +1; \ 618*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); 619*45818ee1SMatthew Ahrens #else /* looping version */ 620*45818ee1SMatthew Ahrens #define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, \ 621*45818ee1SMatthew Ahrens pE, pF, ROT, rn) \ 622*45818ee1SMatthew Ahrens Round1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, \ 623*45818ee1SMatthew Ahrens pD, pE, pF, ROT, rn) \ 624*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rn, Xptr); 625*45818ee1SMatthew Ahrens 626*45818ee1SMatthew Ahrens #define I1024(R) \ 627*45818ee1SMatthew Ahrens X00 += ks[r + (R) + 0]; /* inject the key schedule value */ \ 628*45818ee1SMatthew Ahrens X01 += ks[r + (R) + 1]; \ 629*45818ee1SMatthew Ahrens X02 += ks[r + (R) + 2]; \ 630*45818ee1SMatthew Ahrens X03 += ks[r + (R) + 3]; \ 631*45818ee1SMatthew Ahrens X04 += ks[r + (R) + 4]; \ 632*45818ee1SMatthew Ahrens X05 += ks[r + (R) + 5]; \ 633*45818ee1SMatthew Ahrens X06 += ks[r + (R) + 6]; \ 634*45818ee1SMatthew Ahrens X07 += ks[r + (R) + 7]; \ 635*45818ee1SMatthew Ahrens X08 += ks[r + (R) + 8]; \ 636*45818ee1SMatthew Ahrens X09 += ks[r + (R) + 9]; \ 637*45818ee1SMatthew Ahrens X10 += ks[r + (R) + 10]; \ 638*45818ee1SMatthew Ahrens X11 += ks[r + (R) + 11]; \ 639*45818ee1SMatthew Ahrens X12 += ks[r + (R) + 12]; \ 640*45818ee1SMatthew Ahrens X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \ 641*45818ee1SMatthew Ahrens X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \ 642*45818ee1SMatthew Ahrens X15 += ks[r + (R) + 15] + r + (R); \ 643*45818ee1SMatthew Ahrens ks[r + (R) + 16] = ks[r + (R) - 1]; /* rotate key schedule */\ 644*45818ee1SMatthew Ahrens ts[r + (R) + 2] = ts[r + (R) - 1]; \ 645*45818ee1SMatthew Ahrens Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); 646*45818ee1SMatthew Ahrens 647*45818ee1SMatthew Ahrens /* loop thru it */ 648*45818ee1SMatthew Ahrens for (r = 1; r <= 2 * RCNT; r += 2 * SKEIN_UNROLL_1024) 649*45818ee1SMatthew Ahrens #endif 650*45818ee1SMatthew Ahrens { 651*45818ee1SMatthew Ahrens #define R1024_8_rounds(R) /* do 8 full rounds */ \ 652*45818ee1SMatthew Ahrens R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, \ 653*45818ee1SMatthew Ahrens 14, 15, R1024_0, 8 * (R) + 1); \ 654*45818ee1SMatthew Ahrens R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, \ 655*45818ee1SMatthew Ahrens 08, 01, R1024_1, 8 * (R) + 2); \ 656*45818ee1SMatthew Ahrens R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, \ 657*45818ee1SMatthew Ahrens 10, 09, R1024_2, 8 * (R) + 3); \ 658*45818ee1SMatthew Ahrens R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, \ 659*45818ee1SMatthew Ahrens 12, 07, R1024_3, 8 * (R) + 4); \ 660*45818ee1SMatthew Ahrens I1024(2 * (R)); \ 661*45818ee1SMatthew Ahrens R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, \ 662*45818ee1SMatthew Ahrens 14, 15, R1024_4, 8 * (R) + 5); \ 663*45818ee1SMatthew Ahrens R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, \ 664*45818ee1SMatthew Ahrens 08, 01, R1024_5, 8 * (R) + 6); \ 665*45818ee1SMatthew Ahrens R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, \ 666*45818ee1SMatthew Ahrens 10, 09, R1024_6, 8 * (R) + 7); \ 667*45818ee1SMatthew Ahrens R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, \ 668*45818ee1SMatthew Ahrens 12, 07, R1024_7, 8 * (R) + 8); \ 669*45818ee1SMatthew Ahrens I1024(2 * (R) + 1); 670*45818ee1SMatthew Ahrens 671*45818ee1SMatthew Ahrens R1024_8_rounds(0); 672*45818ee1SMatthew Ahrens 673*45818ee1SMatthew Ahrens #define R1024_Unroll_R(NN) \ 674*45818ee1SMatthew Ahrens ((SKEIN_UNROLL_1024 == 0 && SKEIN1024_ROUNDS_TOTAL/8 > (NN)) || \ 675*45818ee1SMatthew Ahrens (SKEIN_UNROLL_1024 > (NN))) 676*45818ee1SMatthew Ahrens 677*45818ee1SMatthew Ahrens #if R1024_Unroll_R(1) 678*45818ee1SMatthew Ahrens R1024_8_rounds(1); 679*45818ee1SMatthew Ahrens #endif 680*45818ee1SMatthew Ahrens #if R1024_Unroll_R(2) 681*45818ee1SMatthew Ahrens R1024_8_rounds(2); 682*45818ee1SMatthew Ahrens #endif 683*45818ee1SMatthew Ahrens #if R1024_Unroll_R(3) 684*45818ee1SMatthew Ahrens R1024_8_rounds(3); 685*45818ee1SMatthew Ahrens #endif 686*45818ee1SMatthew Ahrens #if R1024_Unroll_R(4) 687*45818ee1SMatthew Ahrens R1024_8_rounds(4); 688*45818ee1SMatthew Ahrens #endif 689*45818ee1SMatthew Ahrens #if R1024_Unroll_R(5) 690*45818ee1SMatthew Ahrens R1024_8_rounds(5); 691*45818ee1SMatthew Ahrens #endif 692*45818ee1SMatthew Ahrens #if R1024_Unroll_R(6) 693*45818ee1SMatthew Ahrens R1024_8_rounds(6); 694*45818ee1SMatthew Ahrens #endif 695*45818ee1SMatthew Ahrens #if R1024_Unroll_R(7) 696*45818ee1SMatthew Ahrens R1024_8_rounds(7); 697*45818ee1SMatthew Ahrens #endif 698*45818ee1SMatthew Ahrens #if R1024_Unroll_R(8) 699*45818ee1SMatthew Ahrens R1024_8_rounds(8); 700*45818ee1SMatthew Ahrens #endif 701*45818ee1SMatthew Ahrens #if R1024_Unroll_R(9) 702*45818ee1SMatthew Ahrens R1024_8_rounds(9); 703*45818ee1SMatthew Ahrens #endif 704*45818ee1SMatthew Ahrens #if R1024_Unroll_R(10) 705*45818ee1SMatthew Ahrens R1024_8_rounds(10); 706*45818ee1SMatthew Ahrens #endif 707*45818ee1SMatthew Ahrens #if R1024_Unroll_R(11) 708*45818ee1SMatthew Ahrens R1024_8_rounds(11); 709*45818ee1SMatthew Ahrens #endif 710*45818ee1SMatthew Ahrens #if R1024_Unroll_R(12) 711*45818ee1SMatthew Ahrens R1024_8_rounds(12); 712*45818ee1SMatthew Ahrens #endif 713*45818ee1SMatthew Ahrens #if R1024_Unroll_R(13) 714*45818ee1SMatthew Ahrens R1024_8_rounds(13); 715*45818ee1SMatthew Ahrens #endif 716*45818ee1SMatthew Ahrens #if R1024_Unroll_R(14) 717*45818ee1SMatthew Ahrens R1024_8_rounds(14); 718*45818ee1SMatthew Ahrens #endif 719*45818ee1SMatthew Ahrens #if (SKEIN_UNROLL_1024 > 14) 720*45818ee1SMatthew Ahrens #error "need more unrolling in Skein_1024_Process_Block" 721*45818ee1SMatthew Ahrens #endif 722*45818ee1SMatthew Ahrens } 723*45818ee1SMatthew Ahrens /* 724*45818ee1SMatthew Ahrens * do the final "feedforward" xor, update context chaining vars 725*45818ee1SMatthew Ahrens */ 726*45818ee1SMatthew Ahrens 727*45818ee1SMatthew Ahrens ctx->X[0] = X00 ^ w[0]; 728*45818ee1SMatthew Ahrens ctx->X[1] = X01 ^ w[1]; 729*45818ee1SMatthew Ahrens ctx->X[2] = X02 ^ w[2]; 730*45818ee1SMatthew Ahrens ctx->X[3] = X03 ^ w[3]; 731*45818ee1SMatthew Ahrens ctx->X[4] = X04 ^ w[4]; 732*45818ee1SMatthew Ahrens ctx->X[5] = X05 ^ w[5]; 733*45818ee1SMatthew Ahrens ctx->X[6] = X06 ^ w[6]; 734*45818ee1SMatthew Ahrens ctx->X[7] = X07 ^ w[7]; 735*45818ee1SMatthew Ahrens ctx->X[8] = X08 ^ w[8]; 736*45818ee1SMatthew Ahrens ctx->X[9] = X09 ^ w[9]; 737*45818ee1SMatthew Ahrens ctx->X[10] = X10 ^ w[10]; 738*45818ee1SMatthew Ahrens ctx->X[11] = X11 ^ w[11]; 739*45818ee1SMatthew Ahrens ctx->X[12] = X12 ^ w[12]; 740*45818ee1SMatthew Ahrens ctx->X[13] = X13 ^ w[13]; 741*45818ee1SMatthew Ahrens ctx->X[14] = X14 ^ w[14]; 742*45818ee1SMatthew Ahrens ctx->X[15] = X15 ^ w[15]; 743*45818ee1SMatthew Ahrens 744*45818ee1SMatthew Ahrens Skein_Show_Round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->X); 745*45818ee1SMatthew Ahrens 746*45818ee1SMatthew Ahrens ts[1] &= ~SKEIN_T1_FLAG_FIRST; 747*45818ee1SMatthew Ahrens blkPtr += SKEIN1024_BLOCK_BYTES; 748*45818ee1SMatthew Ahrens } while (--blkCnt); 749*45818ee1SMatthew Ahrens ctx->h.T[0] = ts[0]; 750*45818ee1SMatthew Ahrens ctx->h.T[1] = ts[1]; 751*45818ee1SMatthew Ahrens } 752*45818ee1SMatthew Ahrens 753*45818ee1SMatthew Ahrens #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) 754*45818ee1SMatthew Ahrens size_t 755*45818ee1SMatthew Ahrens Skein1024_Process_Block_CodeSize(void) 756*45818ee1SMatthew Ahrens { 757*45818ee1SMatthew Ahrens return ((uint8_t *)Skein1024_Process_Block_CodeSize) - 758*45818ee1SMatthew Ahrens ((uint8_t *)Skein1024_Process_Block); 759*45818ee1SMatthew Ahrens } 760*45818ee1SMatthew Ahrens 761*45818ee1SMatthew Ahrens uint_t 762*45818ee1SMatthew Ahrens Skein1024_Unroll_Cnt(void) 763*45818ee1SMatthew Ahrens { 764*45818ee1SMatthew Ahrens return (SKEIN_UNROLL_1024); 765*45818ee1SMatthew Ahrens } 766*45818ee1SMatthew Ahrens #endif 767*45818ee1SMatthew Ahrens #endif 768