1 /* 2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved. 3 * 4 * Licensed under the Apache License 2.0 (the "License"). You may not use 5 * this file except in compliance with the License. You can obtain a copy 6 * in the file LICENSE in the source distribution or at 7 * https://www.openssl.org/source/license.html 8 */ 9 10 #include <string.h> 11 #include <openssl/crypto.h> 12 #include "internal/endian.h" 13 #include "crypto/modes.h" 14 15 #ifndef STRICT_ALIGNMENT 16 # ifdef __GNUC__ 17 typedef u64 u64_a1 __attribute((__aligned__(1))); 18 # else 19 typedef u64 u64_a1; 20 # endif 21 #endif 22 23 int ossl_crypto_xts128gb_encrypt(const XTS128_CONTEXT *ctx, 24 const unsigned char iv[16], 25 const unsigned char *inp, unsigned char *out, 26 size_t len, int enc) 27 { 28 DECLARE_IS_ENDIAN; 29 union { 30 u64 u[2]; 31 u32 d[4]; 32 u8 c[16]; 33 } tweak, scratch; 34 unsigned int i; 35 36 if (len < 16) 37 return -1; 38 39 memcpy(tweak.c, iv, 16); 40 41 (*ctx->block2) (tweak.c, tweak.c, ctx->key2); 42 43 if (!enc && (len % 16)) 44 len -= 16; 45 46 while (len >= 16) { 47 #if defined(STRICT_ALIGNMENT) 48 memcpy(scratch.c, inp, 16); 49 scratch.u[0] ^= tweak.u[0]; 50 scratch.u[1] ^= tweak.u[1]; 51 #else 52 scratch.u[0] = ((u64_a1 *)inp)[0] ^ tweak.u[0]; 53 scratch.u[1] = ((u64_a1 *)inp)[1] ^ tweak.u[1]; 54 #endif 55 (*ctx->block1) (scratch.c, scratch.c, ctx->key1); 56 #if defined(STRICT_ALIGNMENT) 57 scratch.u[0] ^= tweak.u[0]; 58 scratch.u[1] ^= tweak.u[1]; 59 memcpy(out, scratch.c, 16); 60 #else 61 ((u64_a1 *)out)[0] = scratch.u[0] ^= tweak.u[0]; 62 ((u64_a1 *)out)[1] = scratch.u[1] ^= tweak.u[1]; 63 #endif 64 inp += 16; 65 out += 16; 66 len -= 16; 67 68 if (len == 0) 69 return 0; 70 71 if (IS_LITTLE_ENDIAN) { 72 u8 res; 73 u64 hi, lo; 74 #ifdef BSWAP8 75 hi = BSWAP8(tweak.u[0]); 76 lo = BSWAP8(tweak.u[1]); 77 #else 78 u8 *p = tweak.c; 79 80 hi = (u64)GETU32(p) << 32 | GETU32(p + 4); 81 lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12); 82 #endif 83 res = (u8)lo & 1; 84 tweak.u[0] = (lo >> 1) | (hi << 63); 85 tweak.u[1] = hi >> 1; 86 if (res) 87 tweak.c[15] ^= 0xe1; 88 #ifdef BSWAP8 89 hi = BSWAP8(tweak.u[0]); 90 lo = BSWAP8(tweak.u[1]); 91 #else 92 p = tweak.c; 93 94 hi = (u64)GETU32(p) << 32 | GETU32(p + 4); 95 lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12); 96 #endif 97 tweak.u[0] = lo; 98 tweak.u[1] = hi; 99 } else { 100 u8 carry, res; 101 carry = 0; 102 for (i = 0; i < 16; ++i) { 103 res = (tweak.c[i] << 7) & 0x80; 104 tweak.c[i] = ((tweak.c[i] >> 1) + carry) & 0xff; 105 carry = res; 106 } 107 if (res) 108 tweak.c[0] ^= 0xe1; 109 } 110 } 111 if (enc) { 112 for (i = 0; i < len; ++i) { 113 u8 c = inp[i]; 114 out[i] = scratch.c[i]; 115 scratch.c[i] = c; 116 } 117 scratch.u[0] ^= tweak.u[0]; 118 scratch.u[1] ^= tweak.u[1]; 119 (*ctx->block1) (scratch.c, scratch.c, ctx->key1); 120 scratch.u[0] ^= tweak.u[0]; 121 scratch.u[1] ^= tweak.u[1]; 122 memcpy(out - 16, scratch.c, 16); 123 } else { 124 union { 125 u64 u[2]; 126 u8 c[16]; 127 } tweak1; 128 129 if (IS_LITTLE_ENDIAN) { 130 u8 res; 131 u64 hi, lo; 132 #ifdef BSWAP8 133 hi = BSWAP8(tweak.u[0]); 134 lo = BSWAP8(tweak.u[1]); 135 #else 136 u8 *p = tweak.c; 137 138 hi = (u64)GETU32(p) << 32 | GETU32(p + 4); 139 lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12); 140 #endif 141 res = (u8)lo & 1; 142 tweak1.u[0] = (lo >> 1) | (hi << 63); 143 tweak1.u[1] = hi >> 1; 144 if (res) 145 tweak1.c[15] ^= 0xe1; 146 #ifdef BSWAP8 147 hi = BSWAP8(tweak1.u[0]); 148 lo = BSWAP8(tweak1.u[1]); 149 #else 150 p = tweak1.c; 151 152 hi = (u64)GETU32(p) << 32 | GETU32(p + 4); 153 lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12); 154 #endif 155 tweak1.u[0] = lo; 156 tweak1.u[1] = hi; 157 } else { 158 u8 carry, res; 159 carry = 0; 160 for (i = 0; i < 16; ++i) { 161 res = (tweak.c[i] << 7) & 0x80; 162 tweak1.c[i] = ((tweak.c[i] >> 1) + carry) & 0xff; 163 carry = res; 164 } 165 if (res) 166 tweak1.c[0] ^= 0xe1; 167 } 168 #if defined(STRICT_ALIGNMENT) 169 memcpy(scratch.c, inp, 16); 170 scratch.u[0] ^= tweak1.u[0]; 171 scratch.u[1] ^= tweak1.u[1]; 172 #else 173 scratch.u[0] = ((u64_a1 *)inp)[0] ^ tweak1.u[0]; 174 scratch.u[1] = ((u64_a1 *)inp)[1] ^ tweak1.u[1]; 175 #endif 176 (*ctx->block1) (scratch.c, scratch.c, ctx->key1); 177 scratch.u[0] ^= tweak1.u[0]; 178 scratch.u[1] ^= tweak1.u[1]; 179 180 for (i = 0; i < len; ++i) { 181 u8 c = inp[16 + i]; 182 out[16 + i] = scratch.c[i]; 183 scratch.c[i] = c; 184 } 185 scratch.u[0] ^= tweak.u[0]; 186 scratch.u[1] ^= tweak.u[1]; 187 (*ctx->block1) (scratch.c, scratch.c, ctx->key1); 188 #if defined(STRICT_ALIGNMENT) 189 scratch.u[0] ^= tweak.u[0]; 190 scratch.u[1] ^= tweak.u[1]; 191 memcpy(out, scratch.c, 16); 192 #else 193 ((u64_a1 *)out)[0] = scratch.u[0] ^ tweak.u[0]; 194 ((u64_a1 *)out)[1] = scratch.u[1] ^ tweak.u[1]; 195 #endif 196 } 197 198 return 0; 199 } 200