xref: /freebsd/sys/crypto/via/padlock_hash.c (revision 6829dae12bb055451fa467da4589c43bd03b1e64)
1 /*-
2  * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/malloc.h>
35 #include <sys/libkern.h>
36 #include <sys/endian.h>
37 #include <sys/pcpu.h>
38 #if defined(__amd64__) || defined(__i386__)
39 #include <machine/cpufunc.h>
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
43 #endif
44 #include <machine/pcb.h>
45 
46 #include <opencrypto/cryptodev.h>
47 #include <opencrypto/cryptosoft.h> /* for hmac_ipad_buffer and hmac_opad_buffer */
48 #include <opencrypto/xform.h>
49 
50 #include <crypto/via/padlock.h>
51 
52 /*
53  * Implementation notes.
54  *
55  * Some VIA CPUs provides SHA1 and SHA256 acceleration.
56  * We implement all HMAC algorithms provided by crypto(9) framework, but we do
57  * the crypto work in software unless this is HMAC/SHA1 or HMAC/SHA256 and
58  * our CPU can accelerate it.
59  *
60  * Additional CPU instructions, which preform SHA1 and SHA256 are one-shot
61  * functions - we have only one chance to give the data, CPU itself will add
62  * the padding and calculate hash automatically.
63  * This means, it is not possible to implement common init(), update(), final()
64  * methods.
65  * The way I've choosen is to keep adding data to the buffer on update()
66  * (reallocating the buffer if necessary) and call XSHA{1,256} instruction on
67  * final().
68  */
69 
70 struct padlock_sha_ctx {
71 	uint8_t	*psc_buf;
72 	int	 psc_offset;
73 	int	 psc_size;
74 };
75 CTASSERT(sizeof(struct padlock_sha_ctx) <= sizeof(union authctx));
76 
77 static void padlock_sha_init(struct padlock_sha_ctx *ctx);
78 static int padlock_sha_update(struct padlock_sha_ctx *ctx, const uint8_t *buf,
79     uint16_t bufsize);
80 static void padlock_sha1_final(uint8_t *hash, struct padlock_sha_ctx *ctx);
81 static void padlock_sha256_final(uint8_t *hash, struct padlock_sha_ctx *ctx);
82 
83 static struct auth_hash padlock_hmac_sha1 = {
84 	.type = CRYPTO_SHA1_HMAC,
85 	.name = "HMAC-SHA1",
86 	.keysize = SHA1_BLOCK_LEN,
87 	.hashsize = SHA1_HASH_LEN,
88 	.ctxsize = sizeof(struct padlock_sha_ctx),
89 	.blocksize = SHA1_BLOCK_LEN,
90         .Init = (void (*)(void *))padlock_sha_init,
91 	.Update = (int (*)(void *, const uint8_t *, uint16_t))padlock_sha_update,
92 	.Final = (void (*)(uint8_t *, void *))padlock_sha1_final,
93 };
94 
95 static struct auth_hash padlock_hmac_sha256 = {
96 	.type = CRYPTO_SHA2_256_HMAC,
97 	.name = "HMAC-SHA2-256",
98 	.keysize = SHA2_256_BLOCK_LEN,
99 	.hashsize = SHA2_256_HASH_LEN,
100 	.ctxsize = sizeof(struct padlock_sha_ctx),
101 	.blocksize = SHA2_256_BLOCK_LEN,
102         .Init = (void (*)(void *))padlock_sha_init,
103 	.Update = (int (*)(void *, const uint8_t *, uint16_t))padlock_sha_update,
104 	.Final = (void (*)(uint8_t *, void *))padlock_sha256_final,
105 };
106 
107 MALLOC_DECLARE(M_PADLOCK);
108 
109 static __inline void
110 padlock_output_block(uint32_t *src, uint32_t *dst, size_t count)
111 {
112 
113 	while (count-- > 0)
114 		*dst++ = bswap32(*src++);
115 }
116 
117 static void
118 padlock_do_sha1(const u_char *in, u_char *out, int count)
119 {
120 	u_char buf[128+16];	/* PadLock needs at least 128 bytes buffer. */
121 	u_char *result = PADLOCK_ALIGN(buf);
122 
123 	((uint32_t *)result)[0] = 0x67452301;
124 	((uint32_t *)result)[1] = 0xEFCDAB89;
125 	((uint32_t *)result)[2] = 0x98BADCFE;
126 	((uint32_t *)result)[3] = 0x10325476;
127 	((uint32_t *)result)[4] = 0xC3D2E1F0;
128 
129 #ifdef __GNUCLIKE_ASM
130 	__asm __volatile(
131 		".byte  0xf3, 0x0f, 0xa6, 0xc8" /* rep xsha1 */
132 			: "+S"(in), "+D"(result)
133 			: "c"(count), "a"(0)
134 		);
135 #endif
136 
137 	padlock_output_block((uint32_t *)result, (uint32_t *)out,
138 	    SHA1_HASH_LEN / sizeof(uint32_t));
139 }
140 
141 static void
142 padlock_do_sha256(const char *in, char *out, int count)
143 {
144 	char buf[128+16];	/* PadLock needs at least 128 bytes buffer. */
145 	char *result = PADLOCK_ALIGN(buf);
146 
147 	((uint32_t *)result)[0] = 0x6A09E667;
148 	((uint32_t *)result)[1] = 0xBB67AE85;
149 	((uint32_t *)result)[2] = 0x3C6EF372;
150 	((uint32_t *)result)[3] = 0xA54FF53A;
151 	((uint32_t *)result)[4] = 0x510E527F;
152 	((uint32_t *)result)[5] = 0x9B05688C;
153 	((uint32_t *)result)[6] = 0x1F83D9AB;
154 	((uint32_t *)result)[7] = 0x5BE0CD19;
155 
156 #ifdef __GNUCLIKE_ASM
157 	__asm __volatile(
158 		".byte  0xf3, 0x0f, 0xa6, 0xd0" /* rep xsha256 */
159 			: "+S"(in), "+D"(result)
160 			: "c"(count), "a"(0)
161 		);
162 #endif
163 
164 	padlock_output_block((uint32_t *)result, (uint32_t *)out,
165 	    SHA2_256_HASH_LEN / sizeof(uint32_t));
166 }
167 
168 static void
169 padlock_sha_init(struct padlock_sha_ctx *ctx)
170 {
171 
172 	ctx->psc_buf = NULL;
173 	ctx->psc_offset = 0;
174 	ctx->psc_size = 0;
175 }
176 
177 static int
178 padlock_sha_update(struct padlock_sha_ctx *ctx, const uint8_t *buf, uint16_t bufsize)
179 {
180 
181 	if (ctx->psc_size - ctx->psc_offset < bufsize) {
182 		ctx->psc_size = MAX(ctx->psc_size * 2, ctx->psc_size + bufsize);
183 		ctx->psc_buf = realloc(ctx->psc_buf, ctx->psc_size, M_PADLOCK,
184 		    M_NOWAIT);
185 		if(ctx->psc_buf == NULL)
186 			return (ENOMEM);
187 	}
188 	bcopy(buf, ctx->psc_buf + ctx->psc_offset, bufsize);
189 	ctx->psc_offset += bufsize;
190 	return (0);
191 }
192 
193 static void
194 padlock_sha_free(struct padlock_sha_ctx *ctx)
195 {
196 
197 	if (ctx->psc_buf != NULL) {
198 		//bzero(ctx->psc_buf, ctx->psc_size);
199 		free(ctx->psc_buf, M_PADLOCK);
200 		ctx->psc_buf = NULL;
201 		ctx->psc_offset = 0;
202 		ctx->psc_size = 0;
203 	}
204 }
205 
206 static void
207 padlock_sha1_final(uint8_t *hash, struct padlock_sha_ctx *ctx)
208 {
209 
210 	padlock_do_sha1(ctx->psc_buf, hash, ctx->psc_offset);
211 	padlock_sha_free(ctx);
212 }
213 
214 static void
215 padlock_sha256_final(uint8_t *hash, struct padlock_sha_ctx *ctx)
216 {
217 
218 	padlock_do_sha256(ctx->psc_buf, hash, ctx->psc_offset);
219 	padlock_sha_free(ctx);
220 }
221 
222 static void
223 padlock_copy_ctx(struct auth_hash *axf, void *sctx, void *dctx)
224 {
225 
226 	if ((via_feature_xcrypt & VIA_HAS_SHA) != 0 &&
227 	    (axf->type == CRYPTO_SHA1_HMAC ||
228 	     axf->type == CRYPTO_SHA2_256_HMAC)) {
229 		struct padlock_sha_ctx *spctx = sctx, *dpctx = dctx;
230 
231 		dpctx->psc_offset = spctx->psc_offset;
232 		dpctx->psc_size = spctx->psc_size;
233 		dpctx->psc_buf = malloc(dpctx->psc_size, M_PADLOCK, M_WAITOK);
234 		bcopy(spctx->psc_buf, dpctx->psc_buf, dpctx->psc_size);
235 	} else {
236 		bcopy(sctx, dctx, axf->ctxsize);
237 	}
238 }
239 
240 static void
241 padlock_free_ctx(struct auth_hash *axf, void *ctx)
242 {
243 
244 	if ((via_feature_xcrypt & VIA_HAS_SHA) != 0 &&
245 	    (axf->type == CRYPTO_SHA1_HMAC ||
246 	     axf->type == CRYPTO_SHA2_256_HMAC)) {
247 		padlock_sha_free(ctx);
248 	}
249 }
250 
251 static void
252 padlock_hash_key_setup(struct padlock_session *ses, caddr_t key, int klen)
253 {
254 	struct auth_hash *axf;
255 	int i;
256 
257 	klen /= 8;
258 	axf = ses->ses_axf;
259 
260 	/*
261 	 * Try to free contexts before using them, because
262 	 * padlock_hash_key_setup() can be called twice - once from
263 	 * padlock_newsession() and again from padlock_process().
264 	 */
265 	padlock_free_ctx(axf, ses->ses_ictx);
266 	padlock_free_ctx(axf, ses->ses_octx);
267 
268 	for (i = 0; i < klen; i++)
269 		key[i] ^= HMAC_IPAD_VAL;
270 
271 	axf->Init(ses->ses_ictx);
272 	axf->Update(ses->ses_ictx, key, klen);
273 	axf->Update(ses->ses_ictx, hmac_ipad_buffer, axf->blocksize - klen);
274 
275 	for (i = 0; i < klen; i++)
276 		key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
277 
278 	axf->Init(ses->ses_octx);
279 	axf->Update(ses->ses_octx, key, klen);
280 	axf->Update(ses->ses_octx, hmac_opad_buffer, axf->blocksize - klen);
281 
282 	for (i = 0; i < klen; i++)
283 		key[i] ^= HMAC_OPAD_VAL;
284 }
285 
286 /*
287  * Compute keyed-hash authenticator.
288  */
289 static int
290 padlock_authcompute(struct padlock_session *ses, struct cryptodesc *crd,
291     caddr_t buf, int flags)
292 {
293 	u_char hash[HASH_MAX_LEN];
294 	struct auth_hash *axf;
295 	union authctx ctx;
296 	int error;
297 
298 	axf = ses->ses_axf;
299 
300 	padlock_copy_ctx(axf, ses->ses_ictx, &ctx);
301 	error = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
302 	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
303 	if (error != 0) {
304 		padlock_free_ctx(axf, &ctx);
305 		return (error);
306 	}
307 	axf->Final(hash, &ctx);
308 
309 	padlock_copy_ctx(axf, ses->ses_octx, &ctx);
310 	axf->Update(&ctx, hash, axf->hashsize);
311 	axf->Final(hash, &ctx);
312 
313 	/* Inject the authentication data */
314 	crypto_copyback(flags, buf, crd->crd_inject,
315 	    ses->ses_mlen == 0 ? axf->hashsize : ses->ses_mlen, hash);
316 	return (0);
317 }
318 
319 int
320 padlock_hash_setup(struct padlock_session *ses, struct cryptoini *macini)
321 {
322 
323 	ses->ses_mlen = macini->cri_mlen;
324 
325 	/* Find software structure which describes HMAC algorithm. */
326 	switch (macini->cri_alg) {
327 	case CRYPTO_NULL_HMAC:
328 		ses->ses_axf = &auth_hash_null;
329 		break;
330 	case CRYPTO_MD5_HMAC:
331 		ses->ses_axf = &auth_hash_hmac_md5;
332 		break;
333 	case CRYPTO_SHA1_HMAC:
334 		if ((via_feature_xcrypt & VIA_HAS_SHA) != 0)
335 			ses->ses_axf = &padlock_hmac_sha1;
336 		else
337 			ses->ses_axf = &auth_hash_hmac_sha1;
338 		break;
339 	case CRYPTO_RIPEMD160_HMAC:
340 		ses->ses_axf = &auth_hash_hmac_ripemd_160;
341 		break;
342 	case CRYPTO_SHA2_256_HMAC:
343 		if ((via_feature_xcrypt & VIA_HAS_SHA) != 0)
344 			ses->ses_axf = &padlock_hmac_sha256;
345 		else
346 			ses->ses_axf = &auth_hash_hmac_sha2_256;
347 		break;
348 	case CRYPTO_SHA2_384_HMAC:
349 		ses->ses_axf = &auth_hash_hmac_sha2_384;
350 		break;
351 	case CRYPTO_SHA2_512_HMAC:
352 		ses->ses_axf = &auth_hash_hmac_sha2_512;
353 		break;
354 	}
355 
356 	/* Allocate memory for HMAC inner and outer contexts. */
357 	ses->ses_ictx = malloc(ses->ses_axf->ctxsize, M_PADLOCK,
358 	    M_ZERO | M_NOWAIT);
359 	ses->ses_octx = malloc(ses->ses_axf->ctxsize, M_PADLOCK,
360 	    M_ZERO | M_NOWAIT);
361 	if (ses->ses_ictx == NULL || ses->ses_octx == NULL)
362 		return (ENOMEM);
363 
364 	/* Setup key if given. */
365 	if (macini->cri_key != NULL) {
366 		padlock_hash_key_setup(ses, macini->cri_key,
367 		    macini->cri_klen);
368 	}
369 	return (0);
370 }
371 
372 int
373 padlock_hash_process(struct padlock_session *ses, struct cryptodesc *maccrd,
374     struct cryptop *crp)
375 {
376 	struct thread *td;
377 	int error;
378 
379 	td = curthread;
380 	fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR);
381 	if ((maccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0)
382 		padlock_hash_key_setup(ses, maccrd->crd_key, maccrd->crd_klen);
383 
384 	error = padlock_authcompute(ses, maccrd, crp->crp_buf, crp->crp_flags);
385 	fpu_kern_leave(td, ses->ses_fpu_ctx);
386 	return (error);
387 }
388 
389 void
390 padlock_hash_free(struct padlock_session *ses)
391 {
392 
393 	if (ses->ses_ictx != NULL) {
394 		padlock_free_ctx(ses->ses_axf, ses->ses_ictx);
395 		bzero(ses->ses_ictx, ses->ses_axf->ctxsize);
396 		free(ses->ses_ictx, M_PADLOCK);
397 		ses->ses_ictx = NULL;
398 	}
399 	if (ses->ses_octx != NULL) {
400 		padlock_free_ctx(ses->ses_axf, ses->ses_octx);
401 		bzero(ses->ses_octx, ses->ses_axf->ctxsize);
402 		free(ses->ses_octx, M_PADLOCK);
403 		ses->ses_octx = NULL;
404 	}
405 }
406