xref: /freebsd/sys/opencrypto/gfmult.c (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 /*-
2  * Copyright (c) 2014 The FreeBSD Foundation
3  *
4  * This software was developed by John-Mark Gurney under
5  * the sponsorship of the FreeBSD Foundation and
6  * Rubicon Communications, LLC (Netgate).
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1.  Redistributions of source code must retain the above copyright
11  *     notice, this list of conditions and the following disclaimer.
12  * 2.  Redistributions in binary form must reproduce the above copyright
13  *     notice, this list of conditions and the following disclaimer in the
14  *     documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	$FreeBSD$
29  *
30  */
31 
32 #include "gfmult.h"
33 
34 #define REV_POLY_REDUCT	0xe1	/* 0x87 bit reversed */
35 
36 /* reverse the bits of a nibble */
37 static const uint8_t nib_rev[] = {
38 	0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
39 	0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf,
40 };
41 
42 /* calculate v * 2 */
43 static inline struct gf128
44 gf128_mulalpha(struct gf128 v)
45 {
46 	uint64_t mask;
47 
48 	mask = !!(v.v[1] & 1);
49 	mask = ~(mask - 1);
50 	v.v[1] = (v.v[1] >> 1) | ((v.v[0] & 1) << 63);
51 	v.v[0] = (v.v[0] >> 1) ^ ((mask & REV_POLY_REDUCT) << 56);
52 
53 	return v;
54 }
55 
56 /*
57  * Generate a table for 0-16 * h.  Store the results in the table w/ indexes
58  * bit reversed, and the words striped across the values.
59  */
60 void
61 gf128_genmultable(struct gf128 h, struct gf128table *t)
62 {
63 	struct gf128 tbl[16];
64 	int i;
65 
66 	tbl[0] = MAKE_GF128(0, 0);
67 	tbl[1] = h;
68 
69 	for (i = 2; i < 16; i += 2) {
70 		tbl[i] = gf128_mulalpha(tbl[i / 2]);
71 		tbl[i + 1] = gf128_add(tbl[i], h);
72 	}
73 
74 	for (i = 0; i < 16; i++) {
75 		t->a[nib_rev[i]] = tbl[i].v[0] >> 32;
76 		t->b[nib_rev[i]] = tbl[i].v[0];
77 		t->c[nib_rev[i]] = tbl[i].v[1] >> 32;
78 		t->d[nib_rev[i]] = tbl[i].v[1];
79 	}
80 }
81 
82 /*
83  * Generate tables containing h, h^2, h^3 and h^4, starting at 0.
84  */
85 void
86 gf128_genmultable4(struct gf128 h, struct gf128table4 *t)
87 {
88 	struct gf128 h2, h3, h4;
89 
90 	gf128_genmultable(h, &t->tbls[0]);
91 
92 	h2 = gf128_mul(h, &t->tbls[0]);
93 
94 	gf128_genmultable(h2, &t->tbls[1]);
95 
96 	h3 = gf128_mul(h, &t->tbls[1]);
97 	gf128_genmultable(h3, &t->tbls[2]);
98 
99 	h4 = gf128_mul(h2, &t->tbls[1]);
100 	gf128_genmultable(h4, &t->tbls[3]);
101 }
102 
103 /*
104  * Read a row from the table.
105  */
106 static inline struct gf128
107 readrow(struct gf128table *tbl, unsigned bits)
108 {
109 	struct gf128 r;
110 
111 	bits = bits % 16;
112 
113 	r.v[0] = ((uint64_t)tbl->a[bits] << 32) | tbl->b[bits];
114 	r.v[1] = ((uint64_t)tbl->c[bits] << 32) | tbl->d[bits];
115 
116 	return r;
117 }
118 
119 /*
120  * These are the reduction values.  Since we are dealing with bit reversed
121  * version, the values need to be bit reversed, AND the indexes are also
122  * bit reversed to make lookups quicker.
123  */
124 static uint16_t reduction[] = {
125 	0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
126 	0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
127 };
128 
129 /*
130  * Calculate:
131  * (x*2^4 + word[3,0]*h) *
132  * 2^4 + word[7,4]*h) *
133  * ...
134  * 2^4 + word[63,60]*h
135  */
136 static struct gf128
137 gfmultword(uint64_t word, struct gf128 x, struct gf128table *tbl)
138 {
139 	struct gf128 row;
140 	unsigned bits;
141 	unsigned redbits;
142 	int i;
143 
144 	for (i = 0; i < 64; i += 4) {
145 		bits = word % 16;
146 
147 		/* fetch row */
148 		row = readrow(tbl, bits);
149 
150 		/* x * 2^4 */
151 		redbits = x.v[1] % 16;
152 		x.v[1] = (x.v[1] >> 4) | (x.v[0] % 16) << 60;
153 		x.v[0] >>= 4;
154 		x.v[0] ^= (uint64_t)reduction[redbits] << (64 - 16);
155 
156 		word >>= 4;
157 
158 		x = gf128_add(x, row);
159 	}
160 
161 	return x;
162 }
163 
164 /*
165  * Calculate
166  * (x*2^4 + worda[3,0]*h^4+wordb[3,0]*h^3+...+wordd[3,0]*h) *
167  * ...
168  * 2^4 + worda[63,60]*h^4+ ... + wordd[63,60]*h
169  *
170  * Passing/returning struct is .5% faster than passing in via pointer on
171  * amd64.
172  */
173 static struct gf128
174 gfmultword4(uint64_t worda, uint64_t wordb, uint64_t wordc, uint64_t wordd,
175     struct gf128 x, struct gf128table4 *tbl)
176 {
177 	struct gf128 rowa, rowb, rowc, rowd;
178 	unsigned bitsa, bitsb, bitsc, bitsd;
179 	unsigned redbits;
180 	int i;
181 
182 	/*
183 	 * XXX - nibble reverse words to save a shift? probably not as
184 	 * nibble reverse would take 20 ops (5 * 4) verse 16
185 	 */
186 
187 	for (i = 0; i < 64; i += 4) {
188 		bitsa = worda % 16;
189 		bitsb = wordb % 16;
190 		bitsc = wordc % 16;
191 		bitsd = wordd % 16;
192 
193 		/* fetch row */
194 		rowa = readrow(&tbl->tbls[3], bitsa);
195 		rowb = readrow(&tbl->tbls[2], bitsb);
196 		rowc = readrow(&tbl->tbls[1], bitsc);
197 		rowd = readrow(&tbl->tbls[0], bitsd);
198 
199 		/* x * 2^4 */
200 		redbits = x.v[1] % 16;
201 		x.v[1] = (x.v[1] >> 4) | (x.v[0] % 16) << 60;
202 		x.v[0] >>= 4;
203 		x.v[0] ^= (uint64_t)reduction[redbits] << (64 - 16);
204 
205 		worda >>= 4;
206 		wordb >>= 4;
207 		wordc >>= 4;
208 		wordd >>= 4;
209 
210 		x = gf128_add(x, gf128_add(rowa, gf128_add(rowb,
211 		    gf128_add(rowc, rowd))));
212 	}
213 
214 	return x;
215 }
216 
217 struct gf128
218 gf128_mul(struct gf128 v, struct gf128table *tbl)
219 {
220 	struct gf128 ret;
221 
222 	ret = MAKE_GF128(0, 0);
223 
224 	ret = gfmultword(v.v[1], ret, tbl);
225 	ret = gfmultword(v.v[0], ret, tbl);
226 
227 	return ret;
228 }
229 
230 /*
231  * Calculate a*h^4 + b*h^3 + c*h^2 + d*h, or:
232  * (((a*h+b)*h+c)*h+d)*h
233  */
234 struct gf128
235 gf128_mul4(struct gf128 a, struct gf128 b, struct gf128 c, struct gf128 d,
236     struct gf128table4 *tbl)
237 {
238 	struct gf128 tmp;
239 
240 	tmp = MAKE_GF128(0, 0);
241 
242 	tmp = gfmultword4(a.v[1], b.v[1], c.v[1], d.v[1], tmp, tbl);
243 	tmp = gfmultword4(a.v[0], b.v[0], c.v[0], d.v[0], tmp, tbl);
244 
245 	return tmp;
246 }
247 
248 /*
249  * a = data[0..15] + r
250  * b = data[16..31]
251  * c = data[32..47]
252  * d = data[48..63]
253  *
254  * Calculate a*h^4 + b*h^3 + c*h^2 + d*h, or:
255  * (((a*h+b)*h+c)*h+d)*h
256  */
257 struct gf128
258 gf128_mul4b(struct gf128 r, const uint8_t *v, struct gf128table4 *tbl)
259 {
260 	struct gf128 a, b, c, d;
261 	struct gf128 tmp;
262 
263 	tmp = MAKE_GF128(0, 0);
264 
265 	a = gf128_add(r, gf128_read(&v[0*16]));
266 	b = gf128_read(&v[1*16]);
267 	c = gf128_read(&v[2*16]);
268 	d = gf128_read(&v[3*16]);
269 
270 	tmp = gfmultword4(a.v[1], b.v[1], c.v[1], d.v[1], tmp, tbl);
271 	tmp = gfmultword4(a.v[0], b.v[0], c.v[0], d.v[0], tmp, tbl);
272 
273 	return tmp;
274 }
275