1*0957b409SSimon J. Gerraty /*
2*0957b409SSimon J. Gerraty * Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
3*0957b409SSimon J. Gerraty *
4*0957b409SSimon J. Gerraty * Permission is hereby granted, free of charge, to any person obtaining
5*0957b409SSimon J. Gerraty * a copy of this software and associated documentation files (the
6*0957b409SSimon J. Gerraty * "Software"), to deal in the Software without restriction, including
7*0957b409SSimon J. Gerraty * without limitation the rights to use, copy, modify, merge, publish,
8*0957b409SSimon J. Gerraty * distribute, sublicense, and/or sell copies of the Software, and to
9*0957b409SSimon J. Gerraty * permit persons to whom the Software is furnished to do so, subject to
10*0957b409SSimon J. Gerraty * the following conditions:
11*0957b409SSimon J. Gerraty *
12*0957b409SSimon J. Gerraty * The above copyright notice and this permission notice shall be
13*0957b409SSimon J. Gerraty * included in all copies or substantial portions of the Software.
14*0957b409SSimon J. Gerraty *
15*0957b409SSimon J. Gerraty * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16*0957b409SSimon J. Gerraty * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17*0957b409SSimon J. Gerraty * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18*0957b409SSimon J. Gerraty * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19*0957b409SSimon J. Gerraty * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20*0957b409SSimon J. Gerraty * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21*0957b409SSimon J. Gerraty * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22*0957b409SSimon J. Gerraty * SOFTWARE.
23*0957b409SSimon J. Gerraty */
24*0957b409SSimon J. Gerraty
25*0957b409SSimon J. Gerraty #include "inner.h"
26*0957b409SSimon J. Gerraty
27*0957b409SSimon J. Gerraty /*
28*0957b409SSimon J. Gerraty * We compute "carryless multiplications" through normal integer
29*0957b409SSimon J. Gerraty * multiplications, masking out enough bits to create "holes" in which
30*0957b409SSimon J. Gerraty * carries may expand without altering our bits; we really use 8 data
31*0957b409SSimon J. Gerraty * bits per 32-bit word, spaced every fourth bit. Accumulated carries
32*0957b409SSimon J. Gerraty * may not exceed 8 in total, which fits in 4 bits.
33*0957b409SSimon J. Gerraty *
34*0957b409SSimon J. Gerraty * It would be possible to use a 3-bit spacing, allowing two operands,
35*0957b409SSimon J. Gerraty * one with 7 non-zero data bits, the other one with 10 or 11 non-zero
36*0957b409SSimon J. Gerraty * data bits; this asymmetric splitting makes the overall code more
37*0957b409SSimon J. Gerraty * complex with thresholds and exceptions, and does not appear to be
38*0957b409SSimon J. Gerraty * worth the effort.
39*0957b409SSimon J. Gerraty */
40*0957b409SSimon J. Gerraty
41*0957b409SSimon J. Gerraty /*
42*0957b409SSimon J. Gerraty * We cannot really autodetect whether multiplications are "slow" or
43*0957b409SSimon J. Gerraty * not. A typical example is the ARM Cortex M0+, which exists in two
44*0957b409SSimon J. Gerraty * versions: one with a 1-cycle multiplication opcode, the other with
45*0957b409SSimon J. Gerraty * a 32-cycle multiplication opcode. They both use exactly the same
46*0957b409SSimon J. Gerraty * architecture and ABI, and cannot be distinguished from each other
47*0957b409SSimon J. Gerraty * at compile-time.
48*0957b409SSimon J. Gerraty *
49*0957b409SSimon J. Gerraty * Since most modern CPU (even embedded CPU) still have fast
50*0957b409SSimon J. Gerraty * multiplications, we use the "fast mul" code by default.
51*0957b409SSimon J. Gerraty */
52*0957b409SSimon J. Gerraty
53*0957b409SSimon J. Gerraty #if BR_SLOW_MUL
54*0957b409SSimon J. Gerraty
55*0957b409SSimon J. Gerraty /*
56*0957b409SSimon J. Gerraty * This implementation uses Karatsuba-like reduction to make fewer
57*0957b409SSimon J. Gerraty * integer multiplications (9 instead of 16), at the expense of extra
58*0957b409SSimon J. Gerraty * logical operations (XOR, shifts...). On modern x86 CPU that offer
59*0957b409SSimon J. Gerraty * fast, pipelined multiplications, this code is about twice slower than
60*0957b409SSimon J. Gerraty * the simpler code with 16 multiplications. This tendency may be
61*0957b409SSimon J. Gerraty * reversed on low-end platforms with expensive multiplications.
62*0957b409SSimon J. Gerraty */
63*0957b409SSimon J. Gerraty
64*0957b409SSimon J. Gerraty #define MUL32(h, l, x, y) do { \
65*0957b409SSimon J. Gerraty uint64_t mul32tmp = MUL(x, y); \
66*0957b409SSimon J. Gerraty (h) = (uint32_t)(mul32tmp >> 32); \
67*0957b409SSimon J. Gerraty (l) = (uint32_t)mul32tmp; \
68*0957b409SSimon J. Gerraty } while (0)
69*0957b409SSimon J. Gerraty
70*0957b409SSimon J. Gerraty static inline void
bmul(uint32_t * hi,uint32_t * lo,uint32_t x,uint32_t y)71*0957b409SSimon J. Gerraty bmul(uint32_t *hi, uint32_t *lo, uint32_t x, uint32_t y)
72*0957b409SSimon J. Gerraty {
73*0957b409SSimon J. Gerraty uint32_t x0, x1, x2, x3;
74*0957b409SSimon J. Gerraty uint32_t y0, y1, y2, y3;
75*0957b409SSimon J. Gerraty uint32_t a0, a1, a2, a3, a4, a5, a6, a7, a8;
76*0957b409SSimon J. Gerraty uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8;
77*0957b409SSimon J. Gerraty
78*0957b409SSimon J. Gerraty x0 = x & (uint32_t)0x11111111;
79*0957b409SSimon J. Gerraty x1 = x & (uint32_t)0x22222222;
80*0957b409SSimon J. Gerraty x2 = x & (uint32_t)0x44444444;
81*0957b409SSimon J. Gerraty x3 = x & (uint32_t)0x88888888;
82*0957b409SSimon J. Gerraty y0 = y & (uint32_t)0x11111111;
83*0957b409SSimon J. Gerraty y1 = y & (uint32_t)0x22222222;
84*0957b409SSimon J. Gerraty y2 = y & (uint32_t)0x44444444;
85*0957b409SSimon J. Gerraty y3 = y & (uint32_t)0x88888888;
86*0957b409SSimon J. Gerraty
87*0957b409SSimon J. Gerraty /*
88*0957b409SSimon J. Gerraty * (x0+W*x1)*(y0+W*y1) -> a0:b0
89*0957b409SSimon J. Gerraty * (x2+W*x3)*(y2+W*y3) -> a3:b3
90*0957b409SSimon J. Gerraty * ((x0+x2)+W*(x1+x3))*((y0+y2)+W*(y1+y3)) -> a6:b6
91*0957b409SSimon J. Gerraty */
92*0957b409SSimon J. Gerraty a0 = x0;
93*0957b409SSimon J. Gerraty b0 = y0;
94*0957b409SSimon J. Gerraty a1 = x1 >> 1;
95*0957b409SSimon J. Gerraty b1 = y1 >> 1;
96*0957b409SSimon J. Gerraty a2 = a0 ^ a1;
97*0957b409SSimon J. Gerraty b2 = b0 ^ b1;
98*0957b409SSimon J. Gerraty a3 = x2 >> 2;
99*0957b409SSimon J. Gerraty b3 = y2 >> 2;
100*0957b409SSimon J. Gerraty a4 = x3 >> 3;
101*0957b409SSimon J. Gerraty b4 = y3 >> 3;
102*0957b409SSimon J. Gerraty a5 = a3 ^ a4;
103*0957b409SSimon J. Gerraty b5 = b3 ^ b4;
104*0957b409SSimon J. Gerraty a6 = a0 ^ a3;
105*0957b409SSimon J. Gerraty b6 = b0 ^ b3;
106*0957b409SSimon J. Gerraty a7 = a1 ^ a4;
107*0957b409SSimon J. Gerraty b7 = b1 ^ b4;
108*0957b409SSimon J. Gerraty a8 = a6 ^ a7;
109*0957b409SSimon J. Gerraty b8 = b6 ^ b7;
110*0957b409SSimon J. Gerraty
111*0957b409SSimon J. Gerraty MUL32(b0, a0, b0, a0);
112*0957b409SSimon J. Gerraty MUL32(b1, a1, b1, a1);
113*0957b409SSimon J. Gerraty MUL32(b2, a2, b2, a2);
114*0957b409SSimon J. Gerraty MUL32(b3, a3, b3, a3);
115*0957b409SSimon J. Gerraty MUL32(b4, a4, b4, a4);
116*0957b409SSimon J. Gerraty MUL32(b5, a5, b5, a5);
117*0957b409SSimon J. Gerraty MUL32(b6, a6, b6, a6);
118*0957b409SSimon J. Gerraty MUL32(b7, a7, b7, a7);
119*0957b409SSimon J. Gerraty MUL32(b8, a8, b8, a8);
120*0957b409SSimon J. Gerraty
121*0957b409SSimon J. Gerraty a0 &= (uint32_t)0x11111111;
122*0957b409SSimon J. Gerraty a1 &= (uint32_t)0x11111111;
123*0957b409SSimon J. Gerraty a2 &= (uint32_t)0x11111111;
124*0957b409SSimon J. Gerraty a3 &= (uint32_t)0x11111111;
125*0957b409SSimon J. Gerraty a4 &= (uint32_t)0x11111111;
126*0957b409SSimon J. Gerraty a5 &= (uint32_t)0x11111111;
127*0957b409SSimon J. Gerraty a6 &= (uint32_t)0x11111111;
128*0957b409SSimon J. Gerraty a7 &= (uint32_t)0x11111111;
129*0957b409SSimon J. Gerraty a8 &= (uint32_t)0x11111111;
130*0957b409SSimon J. Gerraty b0 &= (uint32_t)0x11111111;
131*0957b409SSimon J. Gerraty b1 &= (uint32_t)0x11111111;
132*0957b409SSimon J. Gerraty b2 &= (uint32_t)0x11111111;
133*0957b409SSimon J. Gerraty b3 &= (uint32_t)0x11111111;
134*0957b409SSimon J. Gerraty b4 &= (uint32_t)0x11111111;
135*0957b409SSimon J. Gerraty b5 &= (uint32_t)0x11111111;
136*0957b409SSimon J. Gerraty b6 &= (uint32_t)0x11111111;
137*0957b409SSimon J. Gerraty b7 &= (uint32_t)0x11111111;
138*0957b409SSimon J. Gerraty b8 &= (uint32_t)0x11111111;
139*0957b409SSimon J. Gerraty
140*0957b409SSimon J. Gerraty a2 ^= a0 ^ a1;
141*0957b409SSimon J. Gerraty b2 ^= b0 ^ b1;
142*0957b409SSimon J. Gerraty a0 ^= (a2 << 1) ^ (a1 << 2);
143*0957b409SSimon J. Gerraty b0 ^= (b2 << 1) ^ (b1 << 2);
144*0957b409SSimon J. Gerraty a5 ^= a3 ^ a4;
145*0957b409SSimon J. Gerraty b5 ^= b3 ^ b4;
146*0957b409SSimon J. Gerraty a3 ^= (a5 << 1) ^ (a4 << 2);
147*0957b409SSimon J. Gerraty b3 ^= (b5 << 1) ^ (b4 << 2);
148*0957b409SSimon J. Gerraty a8 ^= a6 ^ a7;
149*0957b409SSimon J. Gerraty b8 ^= b6 ^ b7;
150*0957b409SSimon J. Gerraty a6 ^= (a8 << 1) ^ (a7 << 2);
151*0957b409SSimon J. Gerraty b6 ^= (b8 << 1) ^ (b7 << 2);
152*0957b409SSimon J. Gerraty a6 ^= a0 ^ a3;
153*0957b409SSimon J. Gerraty b6 ^= b0 ^ b3;
154*0957b409SSimon J. Gerraty *lo = a0 ^ (a6 << 2) ^ (a3 << 4);
155*0957b409SSimon J. Gerraty *hi = b0 ^ (b6 << 2) ^ (b3 << 4) ^ (a6 >> 30) ^ (a3 >> 28);
156*0957b409SSimon J. Gerraty }
157*0957b409SSimon J. Gerraty
158*0957b409SSimon J. Gerraty #else
159*0957b409SSimon J. Gerraty
160*0957b409SSimon J. Gerraty /*
161*0957b409SSimon J. Gerraty * Simple multiplication in GF(2)[X], using 16 integer multiplications.
162*0957b409SSimon J. Gerraty */
163*0957b409SSimon J. Gerraty
164*0957b409SSimon J. Gerraty static inline void
bmul(uint32_t * hi,uint32_t * lo,uint32_t x,uint32_t y)165*0957b409SSimon J. Gerraty bmul(uint32_t *hi, uint32_t *lo, uint32_t x, uint32_t y)
166*0957b409SSimon J. Gerraty {
167*0957b409SSimon J. Gerraty uint32_t x0, x1, x2, x3;
168*0957b409SSimon J. Gerraty uint32_t y0, y1, y2, y3;
169*0957b409SSimon J. Gerraty uint64_t z0, z1, z2, z3;
170*0957b409SSimon J. Gerraty uint64_t z;
171*0957b409SSimon J. Gerraty
172*0957b409SSimon J. Gerraty x0 = x & (uint32_t)0x11111111;
173*0957b409SSimon J. Gerraty x1 = x & (uint32_t)0x22222222;
174*0957b409SSimon J. Gerraty x2 = x & (uint32_t)0x44444444;
175*0957b409SSimon J. Gerraty x3 = x & (uint32_t)0x88888888;
176*0957b409SSimon J. Gerraty y0 = y & (uint32_t)0x11111111;
177*0957b409SSimon J. Gerraty y1 = y & (uint32_t)0x22222222;
178*0957b409SSimon J. Gerraty y2 = y & (uint32_t)0x44444444;
179*0957b409SSimon J. Gerraty y3 = y & (uint32_t)0x88888888;
180*0957b409SSimon J. Gerraty z0 = MUL(x0, y0) ^ MUL(x1, y3) ^ MUL(x2, y2) ^ MUL(x3, y1);
181*0957b409SSimon J. Gerraty z1 = MUL(x0, y1) ^ MUL(x1, y0) ^ MUL(x2, y3) ^ MUL(x3, y2);
182*0957b409SSimon J. Gerraty z2 = MUL(x0, y2) ^ MUL(x1, y1) ^ MUL(x2, y0) ^ MUL(x3, y3);
183*0957b409SSimon J. Gerraty z3 = MUL(x0, y3) ^ MUL(x1, y2) ^ MUL(x2, y1) ^ MUL(x3, y0);
184*0957b409SSimon J. Gerraty z0 &= (uint64_t)0x1111111111111111;
185*0957b409SSimon J. Gerraty z1 &= (uint64_t)0x2222222222222222;
186*0957b409SSimon J. Gerraty z2 &= (uint64_t)0x4444444444444444;
187*0957b409SSimon J. Gerraty z3 &= (uint64_t)0x8888888888888888;
188*0957b409SSimon J. Gerraty z = z0 | z1 | z2 | z3;
189*0957b409SSimon J. Gerraty *lo = (uint32_t)z;
190*0957b409SSimon J. Gerraty *hi = (uint32_t)(z >> 32);
191*0957b409SSimon J. Gerraty }
192*0957b409SSimon J. Gerraty
193*0957b409SSimon J. Gerraty #endif
194*0957b409SSimon J. Gerraty
195*0957b409SSimon J. Gerraty /* see bearssl_hash.h */
196*0957b409SSimon J. Gerraty void
br_ghash_ctmul(void * y,const void * h,const void * data,size_t len)197*0957b409SSimon J. Gerraty br_ghash_ctmul(void *y, const void *h, const void *data, size_t len)
198*0957b409SSimon J. Gerraty {
199*0957b409SSimon J. Gerraty const unsigned char *buf, *hb;
200*0957b409SSimon J. Gerraty unsigned char *yb;
201*0957b409SSimon J. Gerraty uint32_t yw[4];
202*0957b409SSimon J. Gerraty uint32_t hw[4];
203*0957b409SSimon J. Gerraty
204*0957b409SSimon J. Gerraty /*
205*0957b409SSimon J. Gerraty * Throughout the loop we handle the y and h values as arrays
206*0957b409SSimon J. Gerraty * of 32-bit words.
207*0957b409SSimon J. Gerraty */
208*0957b409SSimon J. Gerraty buf = data;
209*0957b409SSimon J. Gerraty yb = y;
210*0957b409SSimon J. Gerraty hb = h;
211*0957b409SSimon J. Gerraty yw[3] = br_dec32be(yb);
212*0957b409SSimon J. Gerraty yw[2] = br_dec32be(yb + 4);
213*0957b409SSimon J. Gerraty yw[1] = br_dec32be(yb + 8);
214*0957b409SSimon J. Gerraty yw[0] = br_dec32be(yb + 12);
215*0957b409SSimon J. Gerraty hw[3] = br_dec32be(hb);
216*0957b409SSimon J. Gerraty hw[2] = br_dec32be(hb + 4);
217*0957b409SSimon J. Gerraty hw[1] = br_dec32be(hb + 8);
218*0957b409SSimon J. Gerraty hw[0] = br_dec32be(hb + 12);
219*0957b409SSimon J. Gerraty while (len > 0) {
220*0957b409SSimon J. Gerraty const unsigned char *src;
221*0957b409SSimon J. Gerraty unsigned char tmp[16];
222*0957b409SSimon J. Gerraty int i;
223*0957b409SSimon J. Gerraty uint32_t a[9], b[9], zw[8];
224*0957b409SSimon J. Gerraty uint32_t c0, c1, c2, c3, d0, d1, d2, d3, e0, e1, e2, e3;
225*0957b409SSimon J. Gerraty
226*0957b409SSimon J. Gerraty /*
227*0957b409SSimon J. Gerraty * Get the next 16-byte block (using zero-padding if
228*0957b409SSimon J. Gerraty * necessary).
229*0957b409SSimon J. Gerraty */
230*0957b409SSimon J. Gerraty if (len >= 16) {
231*0957b409SSimon J. Gerraty src = buf;
232*0957b409SSimon J. Gerraty buf += 16;
233*0957b409SSimon J. Gerraty len -= 16;
234*0957b409SSimon J. Gerraty } else {
235*0957b409SSimon J. Gerraty memcpy(tmp, buf, len);
236*0957b409SSimon J. Gerraty memset(tmp + len, 0, (sizeof tmp) - len);
237*0957b409SSimon J. Gerraty src = tmp;
238*0957b409SSimon J. Gerraty len = 0;
239*0957b409SSimon J. Gerraty }
240*0957b409SSimon J. Gerraty
241*0957b409SSimon J. Gerraty /*
242*0957b409SSimon J. Gerraty * Decode the block. The GHASH standard mandates
243*0957b409SSimon J. Gerraty * big-endian encoding.
244*0957b409SSimon J. Gerraty */
245*0957b409SSimon J. Gerraty yw[3] ^= br_dec32be(src);
246*0957b409SSimon J. Gerraty yw[2] ^= br_dec32be(src + 4);
247*0957b409SSimon J. Gerraty yw[1] ^= br_dec32be(src + 8);
248*0957b409SSimon J. Gerraty yw[0] ^= br_dec32be(src + 12);
249*0957b409SSimon J. Gerraty
250*0957b409SSimon J. Gerraty /*
251*0957b409SSimon J. Gerraty * We multiply two 128-bit field elements. We use
252*0957b409SSimon J. Gerraty * Karatsuba to turn that into three 64-bit
253*0957b409SSimon J. Gerraty * multiplications, which are themselves done with a
254*0957b409SSimon J. Gerraty * total of nine 32-bit multiplications.
255*0957b409SSimon J. Gerraty */
256*0957b409SSimon J. Gerraty
257*0957b409SSimon J. Gerraty /*
258*0957b409SSimon J. Gerraty * y[0,1]*h[0,1] -> 0..2
259*0957b409SSimon J. Gerraty * y[2,3]*h[2,3] -> 3..5
260*0957b409SSimon J. Gerraty * (y[0,1]+y[2,3])*(h[0,1]+h[2,3]) -> 6..8
261*0957b409SSimon J. Gerraty */
262*0957b409SSimon J. Gerraty a[0] = yw[0];
263*0957b409SSimon J. Gerraty b[0] = hw[0];
264*0957b409SSimon J. Gerraty a[1] = yw[1];
265*0957b409SSimon J. Gerraty b[1] = hw[1];
266*0957b409SSimon J. Gerraty a[2] = a[0] ^ a[1];
267*0957b409SSimon J. Gerraty b[2] = b[0] ^ b[1];
268*0957b409SSimon J. Gerraty
269*0957b409SSimon J. Gerraty a[3] = yw[2];
270*0957b409SSimon J. Gerraty b[3] = hw[2];
271*0957b409SSimon J. Gerraty a[4] = yw[3];
272*0957b409SSimon J. Gerraty b[4] = hw[3];
273*0957b409SSimon J. Gerraty a[5] = a[3] ^ a[4];
274*0957b409SSimon J. Gerraty b[5] = b[3] ^ b[4];
275*0957b409SSimon J. Gerraty
276*0957b409SSimon J. Gerraty a[6] = a[0] ^ a[3];
277*0957b409SSimon J. Gerraty b[6] = b[0] ^ b[3];
278*0957b409SSimon J. Gerraty a[7] = a[1] ^ a[4];
279*0957b409SSimon J. Gerraty b[7] = b[1] ^ b[4];
280*0957b409SSimon J. Gerraty a[8] = a[6] ^ a[7];
281*0957b409SSimon J. Gerraty b[8] = b[6] ^ b[7];
282*0957b409SSimon J. Gerraty
283*0957b409SSimon J. Gerraty for (i = 0; i < 9; i ++) {
284*0957b409SSimon J. Gerraty bmul(&b[i], &a[i], b[i], a[i]);
285*0957b409SSimon J. Gerraty }
286*0957b409SSimon J. Gerraty
287*0957b409SSimon J. Gerraty c0 = a[0];
288*0957b409SSimon J. Gerraty c1 = b[0] ^ a[2] ^ a[0] ^ a[1];
289*0957b409SSimon J. Gerraty c2 = a[1] ^ b[2] ^ b[0] ^ b[1];
290*0957b409SSimon J. Gerraty c3 = b[1];
291*0957b409SSimon J. Gerraty d0 = a[3];
292*0957b409SSimon J. Gerraty d1 = b[3] ^ a[5] ^ a[3] ^ a[4];
293*0957b409SSimon J. Gerraty d2 = a[4] ^ b[5] ^ b[3] ^ b[4];
294*0957b409SSimon J. Gerraty d3 = b[4];
295*0957b409SSimon J. Gerraty e0 = a[6];
296*0957b409SSimon J. Gerraty e1 = b[6] ^ a[8] ^ a[6] ^ a[7];
297*0957b409SSimon J. Gerraty e2 = a[7] ^ b[8] ^ b[6] ^ b[7];
298*0957b409SSimon J. Gerraty e3 = b[7];
299*0957b409SSimon J. Gerraty
300*0957b409SSimon J. Gerraty e0 ^= c0 ^ d0;
301*0957b409SSimon J. Gerraty e1 ^= c1 ^ d1;
302*0957b409SSimon J. Gerraty e2 ^= c2 ^ d2;
303*0957b409SSimon J. Gerraty e3 ^= c3 ^ d3;
304*0957b409SSimon J. Gerraty c2 ^= e0;
305*0957b409SSimon J. Gerraty c3 ^= e1;
306*0957b409SSimon J. Gerraty d0 ^= e2;
307*0957b409SSimon J. Gerraty d1 ^= e3;
308*0957b409SSimon J. Gerraty
309*0957b409SSimon J. Gerraty /*
310*0957b409SSimon J. Gerraty * GHASH specification has the bits "reversed" (most
311*0957b409SSimon J. Gerraty * significant is in fact least significant), which does
312*0957b409SSimon J. Gerraty * not matter for a carryless multiplication, except that
313*0957b409SSimon J. Gerraty * the 255-bit result must be shifted by 1 bit.
314*0957b409SSimon J. Gerraty */
315*0957b409SSimon J. Gerraty zw[0] = c0 << 1;
316*0957b409SSimon J. Gerraty zw[1] = (c1 << 1) | (c0 >> 31);
317*0957b409SSimon J. Gerraty zw[2] = (c2 << 1) | (c1 >> 31);
318*0957b409SSimon J. Gerraty zw[3] = (c3 << 1) | (c2 >> 31);
319*0957b409SSimon J. Gerraty zw[4] = (d0 << 1) | (c3 >> 31);
320*0957b409SSimon J. Gerraty zw[5] = (d1 << 1) | (d0 >> 31);
321*0957b409SSimon J. Gerraty zw[6] = (d2 << 1) | (d1 >> 31);
322*0957b409SSimon J. Gerraty zw[7] = (d3 << 1) | (d2 >> 31);
323*0957b409SSimon J. Gerraty
324*0957b409SSimon J. Gerraty /*
325*0957b409SSimon J. Gerraty * We now do the reduction modulo the field polynomial
326*0957b409SSimon J. Gerraty * to get back to 128 bits.
327*0957b409SSimon J. Gerraty */
328*0957b409SSimon J. Gerraty for (i = 0; i < 4; i ++) {
329*0957b409SSimon J. Gerraty uint32_t lw;
330*0957b409SSimon J. Gerraty
331*0957b409SSimon J. Gerraty lw = zw[i];
332*0957b409SSimon J. Gerraty zw[i + 4] ^= lw ^ (lw >> 1) ^ (lw >> 2) ^ (lw >> 7);
333*0957b409SSimon J. Gerraty zw[i + 3] ^= (lw << 31) ^ (lw << 30) ^ (lw << 25);
334*0957b409SSimon J. Gerraty }
335*0957b409SSimon J. Gerraty memcpy(yw, zw + 4, sizeof yw);
336*0957b409SSimon J. Gerraty }
337*0957b409SSimon J. Gerraty
338*0957b409SSimon J. Gerraty /*
339*0957b409SSimon J. Gerraty * Encode back the result.
340*0957b409SSimon J. Gerraty */
341*0957b409SSimon J. Gerraty br_enc32be(yb, yw[3]);
342*0957b409SSimon J. Gerraty br_enc32be(yb + 4, yw[2]);
343*0957b409SSimon J. Gerraty br_enc32be(yb + 8, yw[1]);
344*0957b409SSimon J. Gerraty br_enc32be(yb + 12, yw[0]);
345*0957b409SSimon J. Gerraty }
346