1 /*-
2 * Copyright 2009 Colin Percival
3 * Copyright 2012,2013 Alexander Peslyak
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * This file was originally written by Colin Percival as part of the Tarsnap
28 * online backup system.
29 */
30
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 #include <stdlib.h>
35 #include <string.h>
36
37 #include "private/common.h"
38 #include "private/sse2_64_32.h"
39
40 #ifdef HAVE_EMMINTRIN_H
41
42 # ifdef __GNUC__
43 # pragma GCC target("sse2")
44 # endif
45 # include <emmintrin.h>
46 # if defined(__XOP__) && defined(DISABLED)
47 # include <x86intrin.h>
48 # endif
49
50 # include "../crypto_scrypt.h"
51 # include "../pbkdf2-sha256.h"
52
53 # if defined(__XOP__) && defined(DISABLED)
54 # define ARX(out, in1, in2, s) \
55 out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
56 # else
57 # define ARX(out, in1, in2, s) \
58 { \
59 __m128i T = _mm_add_epi32(in1, in2); \
60 out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
61 out = _mm_xor_si128(out, _mm_srli_epi32(T, 32 - s)); \
62 }
63 # endif
64
65 # define SALSA20_2ROUNDS \
66 /* Operate on "columns". */ \
67 ARX(X1, X0, X3, 7) \
68 ARX(X2, X1, X0, 9) \
69 ARX(X3, X2, X1, 13) \
70 ARX(X0, X3, X2, 18) \
71 \
72 /* Rearrange data. */ \
73 X1 = _mm_shuffle_epi32(X1, 0x93); \
74 X2 = _mm_shuffle_epi32(X2, 0x4E); \
75 X3 = _mm_shuffle_epi32(X3, 0x39); \
76 \
77 /* Operate on "rows". */ \
78 ARX(X3, X0, X1, 7) \
79 ARX(X2, X3, X0, 9) \
80 ARX(X1, X2, X3, 13) \
81 ARX(X0, X1, X2, 18) \
82 \
83 /* Rearrange data. */ \
84 X1 = _mm_shuffle_epi32(X1, 0x39); \
85 X2 = _mm_shuffle_epi32(X2, 0x4E); \
86 X3 = _mm_shuffle_epi32(X3, 0x93);
87
88 /**
89 * Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
90 */
91 # define SALSA20_8_XOR(in, out) \
92 { \
93 __m128i Y0 = X0 = _mm_xor_si128(X0, (in)[0]); \
94 __m128i Y1 = X1 = _mm_xor_si128(X1, (in)[1]); \
95 __m128i Y2 = X2 = _mm_xor_si128(X2, (in)[2]); \
96 __m128i Y3 = X3 = _mm_xor_si128(X3, (in)[3]); \
97 SALSA20_2ROUNDS \
98 SALSA20_2ROUNDS \
99 SALSA20_2ROUNDS \
100 SALSA20_2ROUNDS(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
101 (out)[1] = X1 = _mm_add_epi32(X1, Y1); \
102 (out)[2] = X2 = _mm_add_epi32(X2, Y2); \
103 (out)[3] = X3 = _mm_add_epi32(X3, Y3); \
104 }
105
106 /**
107 * blockmix_salsa8(Bin, Bout, r):
108 * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
109 * bytes in length; the output Bout must also be the same size.
110 */
111 static inline void
blockmix_salsa8(const __m128i * Bin,__m128i * Bout,size_t r)112 blockmix_salsa8(const __m128i *Bin, __m128i *Bout, size_t r)
113 {
114 __m128i X0, X1, X2, X3;
115 size_t i;
116
117 /* 1: X <-- B_{2r - 1} */
118 X0 = Bin[8 * r - 4];
119 X1 = Bin[8 * r - 3];
120 X2 = Bin[8 * r - 2];
121 X3 = Bin[8 * r - 1];
122
123 /* 3: X <-- H(X \xor B_i) */
124 /* 4: Y_i <-- X */
125 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
126 SALSA20_8_XOR(Bin, Bout)
127
128 /* 2: for i = 0 to 2r - 1 do */
129 r--;
130 for (i = 0; i < r;) {
131 /* 3: X <-- H(X \xor B_i) */
132 /* 4: Y_i <-- X */
133 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
134 SALSA20_8_XOR(&Bin[i * 8 + 4], &Bout[(r + i) * 4 + 4])
135
136 i++;
137
138 /* 3: X <-- H(X \xor B_i) */
139 /* 4: Y_i <-- X */
140 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
141 SALSA20_8_XOR(&Bin[i * 8], &Bout[i * 4])
142 }
143
144 /* 3: X <-- H(X \xor B_i) */
145 /* 4: Y_i <-- X */
146 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
147 SALSA20_8_XOR(&Bin[i * 8 + 4], &Bout[(r + i) * 4 + 4])
148 }
149
150 # define XOR4(in) \
151 X0 = _mm_xor_si128(X0, (in)[0]); \
152 X1 = _mm_xor_si128(X1, (in)[1]); \
153 X2 = _mm_xor_si128(X2, (in)[2]); \
154 X3 = _mm_xor_si128(X3, (in)[3]);
155
156 # define XOR4_2(in1, in2) \
157 X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
158 X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
159 X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
160 X3 = _mm_xor_si128((in1)[3], (in2)[3]);
161
162 static inline uint32_t
blockmix_salsa8_xor(const __m128i * Bin1,const __m128i * Bin2,__m128i * Bout,size_t r)163 blockmix_salsa8_xor(const __m128i *Bin1, const __m128i *Bin2, __m128i *Bout,
164 size_t r)
165 {
166 __m128i X0, X1, X2, X3;
167 size_t i;
168
169 /* 1: X <-- B_{2r - 1} */
170 XOR4_2(&Bin1[8 * r - 4], &Bin2[8 * r - 4])
171
172 /* 3: X <-- H(X \xor B_i) */
173 /* 4: Y_i <-- X */
174 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
175 XOR4(Bin1)
176 SALSA20_8_XOR(Bin2, Bout)
177
178 /* 2: for i = 0 to 2r - 1 do */
179 r--;
180 for (i = 0; i < r;) {
181 /* 3: X <-- H(X \xor B_i) */
182 /* 4: Y_i <-- X */
183 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
184 XOR4(&Bin1[i * 8 + 4])
185 SALSA20_8_XOR(&Bin2[i * 8 + 4], &Bout[(r + i) * 4 + 4])
186
187 i++;
188
189 /* 3: X <-- H(X \xor B_i) */
190 /* 4: Y_i <-- X */
191 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
192 XOR4(&Bin1[i * 8])
193 SALSA20_8_XOR(&Bin2[i * 8], &Bout[i * 4])
194 }
195
196 /* 3: X <-- H(X \xor B_i) */
197 /* 4: Y_i <-- X */
198 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
199 XOR4(&Bin1[i * 8 + 4])
200 SALSA20_8_XOR(&Bin2[i * 8 + 4], &Bout[(r + i) * 4 + 4])
201
202 return _mm_cvtsi128_si32(X0);
203 }
204
205 # undef ARX
206 # undef SALSA20_2ROUNDS
207 # undef SALSA20_8_XOR
208 # undef XOR4
209 # undef XOR4_2
210
211 /**
212 * integerify(B, r):
213 * Return the result of parsing B_{2r-1} as a little-endian integer.
214 * Note that B's layout is permuted compared to the generic implementation.
215 */
216 static inline uint32_t
integerify(const void * B,size_t r)217 integerify(const void *B, size_t r)
218 {
219 return *(const uint32_t *) ((uintptr_t)(B) + (2 * r - 1) * 64);
220 }
221
222 /**
223 * smix(B, r, N, V, XY):
224 * Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
225 * the temporary storage V must be 128rN bytes in length; the temporary
226 * storage XY must be 256r + 64 bytes in length. The value N must be a
227 * power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
228 * multiple of 64 bytes.
229 */
230 static void
smix(uint8_t * B,size_t r,uint32_t N,void * V,void * XY)231 smix(uint8_t *B, size_t r, uint32_t N, void *V, void *XY)
232 {
233 size_t s = 128 * r;
234 __m128i * X = (__m128i *) V, *Y;
235 uint32_t *X32 = (uint32_t *) V;
236 uint32_t i, j;
237 size_t k;
238
239 /* 1: X <-- B */
240 /* 3: V_i <-- X */
241 for (k = 0; k < 2 * r; k++) {
242 for (i = 0; i < 16; i++) {
243 X32[k * 16 + i] = LOAD32_LE(&B[(k * 16 + (i * 5 % 16)) * 4]);
244 }
245 }
246
247 /* 2: for i = 0 to N - 1 do */
248 for (i = 1; i < N - 1; i += 2) {
249 /* 4: X <-- H(X) */
250 /* 3: V_i <-- X */
251 Y = (__m128i *) ((uintptr_t)(V) + i * s);
252 blockmix_salsa8(X, Y, r);
253
254 /* 4: X <-- H(X) */
255 /* 3: V_i <-- X */
256 X = (__m128i *) ((uintptr_t)(V) + (i + 1) * s);
257 blockmix_salsa8(Y, X, r);
258 }
259
260 /* 4: X <-- H(X) */
261 /* 3: V_i <-- X */
262 Y = (__m128i *) ((uintptr_t)(V) + i * s);
263 blockmix_salsa8(X, Y, r);
264
265 /* 4: X <-- H(X) */
266 /* 3: V_i <-- X */
267 X = (__m128i *) XY;
268 blockmix_salsa8(Y, X, r);
269
270 X32 = (uint32_t *) XY;
271 Y = (__m128i *) ((uintptr_t)(XY) + s);
272
273 /* 7: j <-- Integerify(X) mod N */
274 j = integerify(X, r) & (N - 1);
275
276 /* 6: for i = 0 to N - 1 do */
277 for (i = 0; i < N; i += 2) {
278 __m128i *V_j = (__m128i *) ((uintptr_t)(V) + j * s);
279
280 /* 8: X <-- H(X \xor V_j) */
281 /* 7: j <-- Integerify(X) mod N */
282 j = blockmix_salsa8_xor(X, V_j, Y, r) & (N - 1);
283 V_j = (__m128i *) ((uintptr_t)(V) + j * s);
284
285 /* 8: X <-- H(X \xor V_j) */
286 /* 7: j <-- Integerify(X) mod N */
287 j = blockmix_salsa8_xor(Y, V_j, X, r) & (N - 1);
288 }
289
290 /* 10: B' <-- X */
291 for (k = 0; k < 2 * r; k++) {
292 for (i = 0; i < 16; i++) {
293 STORE32_LE(&B[(k * 16 + (i * 5 % 16)) * 4], X32[k * 16 + i]);
294 }
295 }
296 }
297
298 /**
299 * escrypt_kdf(local, passwd, passwdlen, salt, saltlen,
300 * N, r, p, buf, buflen):
301 * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
302 * p, buflen) and write the result into buf. The parameters r, p, and buflen
303 * must satisfy r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N
304 * must be a power of 2 greater than 1.
305 *
306 * Return 0 on success; or -1 on error.
307 */
308 int
escrypt_kdf_sse(escrypt_local_t * local,const uint8_t * passwd,size_t passwdlen,const uint8_t * salt,size_t saltlen,uint64_t N,uint32_t _r,uint32_t _p,uint8_t * buf,size_t buflen)309 escrypt_kdf_sse(escrypt_local_t *local, const uint8_t *passwd, size_t passwdlen,
310 const uint8_t *salt, size_t saltlen, uint64_t N, uint32_t _r,
311 uint32_t _p, uint8_t *buf, size_t buflen)
312 {
313 size_t B_size, V_size, XY_size, need;
314 uint8_t * B;
315 uint32_t *V, *XY;
316 size_t r = _r, p = _p;
317 uint32_t i;
318
319 /* Sanity-check parameters. */
320 # if SIZE_MAX > UINT32_MAX
321 /* LCOV_EXCL_START */
322 if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
323 errno = EFBIG;
324 return -1;
325 }
326 /* LCOV_EXCL_END */
327 # endif
328 if ((uint64_t)(r) * (uint64_t)(p) >= ((uint64_t) 1 << 30)) {
329 errno = EFBIG;
330 return -1;
331 }
332 if (N > UINT32_MAX) {
333 errno = EFBIG;
334 return -1;
335 }
336 if (((N & (N - 1)) != 0) || (N < 2)) {
337 errno = EINVAL;
338 return -1;
339 }
340 if (r == 0 || p == 0) {
341 errno = EINVAL;
342 return -1;
343 }
344 /* LCOV_EXCL_START */
345 if ((r > SIZE_MAX / 128 / p) ||
346 # if SIZE_MAX / 256 <= UINT32_MAX
347 (r > SIZE_MAX / 256) ||
348 # endif
349 (N > SIZE_MAX / 128 / r)) {
350 errno = ENOMEM;
351 return -1;
352 }
353 /* LCOV_EXCL_END */
354
355 /* Allocate memory. */
356 B_size = (size_t) 128 * r * p;
357 V_size = (size_t) 128 * r * N;
358 need = B_size + V_size;
359 /* LCOV_EXCL_START */
360 if (need < V_size) {
361 errno = ENOMEM;
362 return -1;
363 }
364 /* LCOV_EXCL_END */
365 XY_size = (size_t) 256 * r + 64;
366 need += XY_size;
367 /* LCOV_EXCL_START */
368 if (need < XY_size) {
369 errno = ENOMEM;
370 return -1;
371 }
372 /* LCOV_EXCL_END */
373 if (local->size < need) {
374 if (free_region(local)) {
375 return -1; /* LCOV_EXCL_LINE */
376 }
377 if (!alloc_region(local, need)) {
378 return -1; /* LCOV_EXCL_LINE */
379 }
380 }
381 B = (uint8_t *) local->aligned;
382 V = (uint32_t *) ((uint8_t *) B + B_size);
383 XY = (uint32_t *) ((uint8_t *) V + V_size);
384
385 /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
386 PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
387
388 /* 2: for i = 0 to p - 1 do */
389 for (i = 0; i < p; i++) {
390 /* 3: B_i <-- MF(B_i, N) */
391 smix(&B[(size_t) 128 * i * r], r, (uint32_t) N, V, XY);
392 }
393
394 /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
395 PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
396
397 /* Success! */
398 return 0;
399 }
400 #endif
401