xref: /titanic_44/usr/src/common/crypto/modes/gcm.c (revision 0dc2366f7b9f9f36e10909b1e95edbf2a261c2ac)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 #ifndef _KERNEL
28 #include <strings.h>
29 #include <limits.h>
30 #include <assert.h>
31 #include <security/cryptoki.h>
32 #endif	/* _KERNEL */
33 
34 
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <modes/modes.h>
38 #include <sys/crypto/common.h>
39 #include <sys/crypto/impl.h>
40 #include <sys/byteorder.h>
41 
42 #ifdef __amd64
43 
44 #ifdef _KERNEL
45 #include <sys/cpuvar.h>		/* cpu_t, CPU */
46 #include <sys/x86_archext.h>	/* x86_feature, X86_*, CPUID_* */
47 #include <sys/disp.h>		/* kpreempt_disable(), kpreempt_enable */
48 /* Workaround for no XMM kernel thread save/restore */
49 #define	KPREEMPT_DISABLE	kpreempt_disable()
50 #define	KPREEMPT_ENABLE		kpreempt_enable()
51 
52 #else
53 #include <sys/auxv.h>		/* getisax() */
54 #include <sys/auxv_386.h>	/* AV_386_PCLMULQDQ bit */
55 #define	KPREEMPT_DISABLE
56 #define	KPREEMPT_ENABLE
57 #endif	/* _KERNEL */
58 
59 extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
60 static int intel_pclmulqdq_instruction_present(void);
61 #endif	/* __amd64 */
62 
63 struct aes_block {
64 	uint64_t a;
65 	uint64_t b;
66 };
67 
68 
69 /*
70  * gcm_mul()
71  * Perform a carry-less multiplication (that is, use XOR instead of the
72  * multiply operator) on *x_in and *y and place the result in *res.
73  *
74  * Byte swap the input (*x_in and *y) and the output (*res).
75  *
76  * Note: x_in, y, and res all point to 16-byte numbers (an array of two
77  * 64-bit integers).
78  */
79 void
80 gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res)
81 {
82 #ifdef __amd64
83 	if (intel_pclmulqdq_instruction_present()) {
84 		KPREEMPT_DISABLE;
85 		gcm_mul_pclmulqdq(x_in, y, res);
86 		KPREEMPT_ENABLE;
87 	} else
88 #endif	/* __amd64 */
89 	{
90 		static const uint64_t R = 0xe100000000000000ULL;
91 		struct aes_block z = {0, 0};
92 		struct aes_block v;
93 		uint64_t x;
94 		int i, j;
95 
96 		v.a = ntohll(y[0]);
97 		v.b = ntohll(y[1]);
98 
99 		for (j = 0; j < 2; j++) {
100 			x = ntohll(x_in[j]);
101 			for (i = 0; i < 64; i++, x <<= 1) {
102 				if (x & 0x8000000000000000ULL) {
103 					z.a ^= v.a;
104 					z.b ^= v.b;
105 				}
106 				if (v.b & 1ULL) {
107 					v.b = (v.a << 63)|(v.b >> 1);
108 					v.a = (v.a >> 1) ^ R;
109 				} else {
110 					v.b = (v.a << 63)|(v.b >> 1);
111 					v.a = v.a >> 1;
112 				}
113 			}
114 		}
115 		res[0] = htonll(z.a);
116 		res[1] = htonll(z.b);
117 	}
118 }
119 
120 
121 #define	GHASH(c, d, t) \
122 	xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
123 	gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
124 	(uint64_t *)(void *)(t));
125 
126 
127 /*
128  * Encrypt multiple blocks of data in GCM mode.  Decrypt for GCM mode
129  * is done in another function.
130  */
131 int
132 gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
133     crypto_data_t *out, size_t block_size,
134     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
135     void (*copy_block)(uint8_t *, uint8_t *),
136     void (*xor_block)(uint8_t *, uint8_t *))
137 {
138 	size_t remainder = length;
139 	size_t need;
140 	uint8_t *datap = (uint8_t *)data;
141 	uint8_t *blockp;
142 	uint8_t *lastp;
143 	void *iov_or_mp;
144 	offset_t offset;
145 	uint8_t *out_data_1;
146 	uint8_t *out_data_2;
147 	size_t out_data_1_len;
148 	uint64_t counter;
149 	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
150 
151 	if (length + ctx->gcm_remainder_len < block_size) {
152 		/* accumulate bytes here and return */
153 		bcopy(datap,
154 		    (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
155 		    length);
156 		ctx->gcm_remainder_len += length;
157 		ctx->gcm_copy_to = datap;
158 		return (CRYPTO_SUCCESS);
159 	}
160 
161 	lastp = (uint8_t *)ctx->gcm_cb;
162 	if (out != NULL)
163 		crypto_init_ptrs(out, &iov_or_mp, &offset);
164 
165 	do {
166 		/* Unprocessed data from last call. */
167 		if (ctx->gcm_remainder_len > 0) {
168 			need = block_size - ctx->gcm_remainder_len;
169 
170 			if (need > remainder)
171 				return (CRYPTO_DATA_LEN_RANGE);
172 
173 			bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
174 			    [ctx->gcm_remainder_len], need);
175 
176 			blockp = (uint8_t *)ctx->gcm_remainder;
177 		} else {
178 			blockp = datap;
179 		}
180 
181 		/*
182 		 * Increment counter. Counter bits are confined
183 		 * to the bottom 32 bits of the counter block.
184 		 */
185 		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
186 		counter = htonll(counter + 1);
187 		counter &= counter_mask;
188 		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
189 
190 		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
191 		    (uint8_t *)ctx->gcm_tmp);
192 		xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
193 
194 		lastp = (uint8_t *)ctx->gcm_tmp;
195 
196 		ctx->gcm_processed_data_len += block_size;
197 
198 		if (out == NULL) {
199 			if (ctx->gcm_remainder_len > 0) {
200 				bcopy(blockp, ctx->gcm_copy_to,
201 				    ctx->gcm_remainder_len);
202 				bcopy(blockp + ctx->gcm_remainder_len, datap,
203 				    need);
204 			}
205 		} else {
206 			crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
207 			    &out_data_1_len, &out_data_2, block_size);
208 
209 			/* copy block to where it belongs */
210 			if (out_data_1_len == block_size) {
211 				copy_block(lastp, out_data_1);
212 			} else {
213 				bcopy(lastp, out_data_1, out_data_1_len);
214 				if (out_data_2 != NULL) {
215 					bcopy(lastp + out_data_1_len,
216 					    out_data_2,
217 					    block_size - out_data_1_len);
218 				}
219 			}
220 			/* update offset */
221 			out->cd_offset += block_size;
222 		}
223 
224 		/* add ciphertext to the hash */
225 		GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
226 
227 		/* Update pointer to next block of data to be processed. */
228 		if (ctx->gcm_remainder_len != 0) {
229 			datap += need;
230 			ctx->gcm_remainder_len = 0;
231 		} else {
232 			datap += block_size;
233 		}
234 
235 		remainder = (size_t)&data[length] - (size_t)datap;
236 
237 		/* Incomplete last block. */
238 		if (remainder > 0 && remainder < block_size) {
239 			bcopy(datap, ctx->gcm_remainder, remainder);
240 			ctx->gcm_remainder_len = remainder;
241 			ctx->gcm_copy_to = datap;
242 			goto out;
243 		}
244 		ctx->gcm_copy_to = NULL;
245 
246 	} while (remainder > 0);
247 out:
248 	return (CRYPTO_SUCCESS);
249 }
250 
251 /* ARGSUSED */
252 int
253 gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
254     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
255     void (*copy_block)(uint8_t *, uint8_t *),
256     void (*xor_block)(uint8_t *, uint8_t *))
257 {
258 	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
259 	uint8_t *ghash, *macp;
260 	int i, rv;
261 
262 	if (out->cd_length <
263 	    (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
264 		return (CRYPTO_DATA_LEN_RANGE);
265 	}
266 
267 	ghash = (uint8_t *)ctx->gcm_ghash;
268 
269 	if (ctx->gcm_remainder_len > 0) {
270 		uint64_t counter;
271 		uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
272 
273 		/*
274 		 * Here is where we deal with data that is not a
275 		 * multiple of the block size.
276 		 */
277 
278 		/*
279 		 * Increment counter.
280 		 */
281 		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
282 		counter = htonll(counter + 1);
283 		counter &= counter_mask;
284 		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
285 
286 		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
287 		    (uint8_t *)ctx->gcm_tmp);
288 
289 		macp = (uint8_t *)ctx->gcm_remainder;
290 		bzero(macp + ctx->gcm_remainder_len,
291 		    block_size - ctx->gcm_remainder_len);
292 
293 		/* XOR with counter block */
294 		for (i = 0; i < ctx->gcm_remainder_len; i++) {
295 			macp[i] ^= tmpp[i];
296 		}
297 
298 		/* add ciphertext to the hash */
299 		GHASH(ctx, macp, ghash);
300 
301 		ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
302 	}
303 
304 	ctx->gcm_len_a_len_c[1] =
305 	    htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
306 	GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
307 	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
308 	    (uint8_t *)ctx->gcm_J0);
309 	xor_block((uint8_t *)ctx->gcm_J0, ghash);
310 
311 	if (ctx->gcm_remainder_len > 0) {
312 		rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
313 		if (rv != CRYPTO_SUCCESS)
314 			return (rv);
315 	}
316 	out->cd_offset += ctx->gcm_remainder_len;
317 	ctx->gcm_remainder_len = 0;
318 	rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
319 	if (rv != CRYPTO_SUCCESS)
320 		return (rv);
321 	out->cd_offset += ctx->gcm_tag_len;
322 
323 	return (CRYPTO_SUCCESS);
324 }
325 
326 /*
327  * This will only deal with decrypting the last block of the input that
328  * might not be a multiple of block length.
329  */
330 static void
331 gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
332     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
333     void (*xor_block)(uint8_t *, uint8_t *))
334 {
335 	uint8_t *datap, *outp, *counterp;
336 	uint64_t counter;
337 	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
338 	int i;
339 
340 	/*
341 	 * Increment counter.
342 	 * Counter bits are confined to the bottom 32 bits
343 	 */
344 	counter = ntohll(ctx->gcm_cb[1] & counter_mask);
345 	counter = htonll(counter + 1);
346 	counter &= counter_mask;
347 	ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
348 
349 	datap = (uint8_t *)ctx->gcm_remainder;
350 	outp = &((ctx->gcm_pt_buf)[index]);
351 	counterp = (uint8_t *)ctx->gcm_tmp;
352 
353 	/* authentication tag */
354 	bzero((uint8_t *)ctx->gcm_tmp, block_size);
355 	bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
356 
357 	/* add ciphertext to the hash */
358 	GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
359 
360 	/* decrypt remaining ciphertext */
361 	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
362 
363 	/* XOR with counter block */
364 	for (i = 0; i < ctx->gcm_remainder_len; i++) {
365 		outp[i] = datap[i] ^ counterp[i];
366 	}
367 }
368 
369 /* ARGSUSED */
370 int
371 gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
372     crypto_data_t *out, size_t block_size,
373     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
374     void (*copy_block)(uint8_t *, uint8_t *),
375     void (*xor_block)(uint8_t *, uint8_t *))
376 {
377 	size_t new_len;
378 	uint8_t *new;
379 
380 	/*
381 	 * Copy contiguous ciphertext input blocks to plaintext buffer.
382 	 * Ciphertext will be decrypted in the final.
383 	 */
384 	if (length > 0) {
385 		new_len = ctx->gcm_pt_buf_len + length;
386 #ifdef _KERNEL
387 		new = kmem_alloc(new_len, ctx->gcm_kmflag);
388 		bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
389 		kmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
390 #else
391 		new = malloc(new_len);
392 		bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
393 		free(ctx->gcm_pt_buf);
394 #endif
395 		if (new == NULL)
396 			return (CRYPTO_HOST_MEMORY);
397 
398 		ctx->gcm_pt_buf = new;
399 		ctx->gcm_pt_buf_len = new_len;
400 		bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
401 		    length);
402 		ctx->gcm_processed_data_len += length;
403 	}
404 
405 	ctx->gcm_remainder_len = 0;
406 	return (CRYPTO_SUCCESS);
407 }
408 
409 int
410 gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
411     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
412     void (*xor_block)(uint8_t *, uint8_t *))
413 {
414 	size_t pt_len;
415 	size_t remainder;
416 	uint8_t *ghash;
417 	uint8_t *blockp;
418 	uint8_t *cbp;
419 	uint64_t counter;
420 	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
421 	int processed = 0, rv;
422 
423 	ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
424 
425 	pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
426 	ghash = (uint8_t *)ctx->gcm_ghash;
427 	blockp = ctx->gcm_pt_buf;
428 	remainder = pt_len;
429 	while (remainder > 0) {
430 		/* add ciphertext to the hash */
431 		GHASH(ctx, blockp, ghash);
432 
433 		/*
434 		 * Increment counter.
435 		 * Counter bits are confined to the bottom 32 bits
436 		 */
437 		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
438 		counter = htonll(counter + 1);
439 		counter &= counter_mask;
440 		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
441 
442 		cbp = (uint8_t *)ctx->gcm_tmp;
443 		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
444 
445 		/* XOR with ciphertext */
446 		xor_block(cbp, blockp);
447 
448 		processed += block_size;
449 		blockp += block_size;
450 		remainder -= block_size;
451 
452 		/* Incomplete last block */
453 		if (remainder > 0 && remainder < block_size) {
454 			bcopy(blockp, ctx->gcm_remainder, remainder);
455 			ctx->gcm_remainder_len = remainder;
456 			/*
457 			 * not expecting anymore ciphertext, just
458 			 * compute plaintext for the remaining input
459 			 */
460 			gcm_decrypt_incomplete_block(ctx, block_size,
461 			    processed, encrypt_block, xor_block);
462 			ctx->gcm_remainder_len = 0;
463 			goto out;
464 		}
465 	}
466 out:
467 	ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
468 	GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
469 	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
470 	    (uint8_t *)ctx->gcm_J0);
471 	xor_block((uint8_t *)ctx->gcm_J0, ghash);
472 
473 	/* compare the input authentication tag with what we calculated */
474 	if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
475 		/* They don't match */
476 		return (CRYPTO_INVALID_MAC);
477 	} else {
478 		rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
479 		if (rv != CRYPTO_SUCCESS)
480 			return (rv);
481 		out->cd_offset += pt_len;
482 	}
483 	return (CRYPTO_SUCCESS);
484 }
485 
486 static int
487 gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
488 {
489 	size_t tag_len;
490 
491 	/*
492 	 * Check the length of the authentication tag (in bits).
493 	 */
494 	tag_len = gcm_param->ulTagBits;
495 	switch (tag_len) {
496 	case 32:
497 	case 64:
498 	case 96:
499 	case 104:
500 	case 112:
501 	case 120:
502 	case 128:
503 		break;
504 	default:
505 		return (CRYPTO_MECHANISM_PARAM_INVALID);
506 	}
507 
508 	if (gcm_param->ulIvLen == 0)
509 		return (CRYPTO_MECHANISM_PARAM_INVALID);
510 
511 	return (CRYPTO_SUCCESS);
512 }
513 
514 static void
515 gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
516     gcm_ctx_t *ctx, size_t block_size,
517     void (*copy_block)(uint8_t *, uint8_t *),
518     void (*xor_block)(uint8_t *, uint8_t *))
519 {
520 	uint8_t *cb;
521 	ulong_t remainder = iv_len;
522 	ulong_t processed = 0;
523 	uint8_t *datap, *ghash;
524 	uint64_t len_a_len_c[2];
525 
526 	ghash = (uint8_t *)ctx->gcm_ghash;
527 	cb = (uint8_t *)ctx->gcm_cb;
528 	if (iv_len == 12) {
529 		bcopy(iv, cb, 12);
530 		cb[12] = 0;
531 		cb[13] = 0;
532 		cb[14] = 0;
533 		cb[15] = 1;
534 		/* J0 will be used again in the final */
535 		copy_block(cb, (uint8_t *)ctx->gcm_J0);
536 	} else {
537 		/* GHASH the IV */
538 		do {
539 			if (remainder < block_size) {
540 				bzero(cb, block_size);
541 				bcopy(&(iv[processed]), cb, remainder);
542 				datap = (uint8_t *)cb;
543 				remainder = 0;
544 			} else {
545 				datap = (uint8_t *)(&(iv[processed]));
546 				processed += block_size;
547 				remainder -= block_size;
548 			}
549 			GHASH(ctx, datap, ghash);
550 		} while (remainder > 0);
551 
552 		len_a_len_c[0] = 0;
553 		len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
554 		GHASH(ctx, len_a_len_c, ctx->gcm_J0);
555 
556 		/* J0 will be used again in the final */
557 		copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
558 	}
559 }
560 
561 /*
562  * The following function is called at encrypt or decrypt init time
563  * for AES GCM mode.
564  */
565 int
566 gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
567     unsigned char *auth_data, size_t auth_data_len, size_t block_size,
568     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
569     void (*copy_block)(uint8_t *, uint8_t *),
570     void (*xor_block)(uint8_t *, uint8_t *))
571 {
572 	uint8_t *ghash, *datap, *authp;
573 	size_t remainder, processed;
574 
575 	/* encrypt zero block to get subkey H */
576 	bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
577 	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
578 	    (uint8_t *)ctx->gcm_H);
579 
580 	gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
581 	    copy_block, xor_block);
582 
583 	authp = (uint8_t *)ctx->gcm_tmp;
584 	ghash = (uint8_t *)ctx->gcm_ghash;
585 	bzero(authp, block_size);
586 	bzero(ghash, block_size);
587 
588 	processed = 0;
589 	remainder = auth_data_len;
590 	do {
591 		if (remainder < block_size) {
592 			/*
593 			 * There's not a block full of data, pad rest of
594 			 * buffer with zero
595 			 */
596 			bzero(authp, block_size);
597 			bcopy(&(auth_data[processed]), authp, remainder);
598 			datap = (uint8_t *)authp;
599 			remainder = 0;
600 		} else {
601 			datap = (uint8_t *)(&(auth_data[processed]));
602 			processed += block_size;
603 			remainder -= block_size;
604 		}
605 
606 		/* add auth data to the hash */
607 		GHASH(ctx, datap, ghash);
608 
609 	} while (remainder > 0);
610 
611 	return (CRYPTO_SUCCESS);
612 }
613 
614 int
615 gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
616     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
617     void (*copy_block)(uint8_t *, uint8_t *),
618     void (*xor_block)(uint8_t *, uint8_t *))
619 {
620 	int rv;
621 	CK_AES_GCM_PARAMS *gcm_param;
622 
623 	if (param != NULL) {
624 		gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
625 
626 		if ((rv = gcm_validate_args(gcm_param)) != 0) {
627 			return (rv);
628 		}
629 
630 		gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
631 		gcm_ctx->gcm_tag_len >>= 3;
632 		gcm_ctx->gcm_processed_data_len = 0;
633 
634 		/* these values are in bits */
635 		gcm_ctx->gcm_len_a_len_c[0]
636 		    = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
637 
638 		rv = CRYPTO_SUCCESS;
639 		gcm_ctx->gcm_flags |= GCM_MODE;
640 	} else {
641 		rv = CRYPTO_MECHANISM_PARAM_INVALID;
642 		goto out;
643 	}
644 
645 	if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
646 	    gcm_param->pAAD, gcm_param->ulAADLen, block_size,
647 	    encrypt_block, copy_block, xor_block) != 0) {
648 		rv = CRYPTO_MECHANISM_PARAM_INVALID;
649 	}
650 out:
651 	return (rv);
652 }
653 
654 int
655 gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
656     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
657     void (*copy_block)(uint8_t *, uint8_t *),
658     void (*xor_block)(uint8_t *, uint8_t *))
659 {
660 	int rv;
661 	CK_AES_GMAC_PARAMS *gmac_param;
662 
663 	if (param != NULL) {
664 		gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
665 
666 		gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
667 		gcm_ctx->gcm_processed_data_len = 0;
668 
669 		/* these values are in bits */
670 		gcm_ctx->gcm_len_a_len_c[0]
671 		    = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
672 
673 		rv = CRYPTO_SUCCESS;
674 		gcm_ctx->gcm_flags |= GMAC_MODE;
675 	} else {
676 		rv = CRYPTO_MECHANISM_PARAM_INVALID;
677 		goto out;
678 	}
679 
680 	if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
681 	    gmac_param->pAAD, gmac_param->ulAADLen, block_size,
682 	    encrypt_block, copy_block, xor_block) != 0) {
683 		rv = CRYPTO_MECHANISM_PARAM_INVALID;
684 	}
685 out:
686 	return (rv);
687 }
688 
689 void *
690 gcm_alloc_ctx(int kmflag)
691 {
692 	gcm_ctx_t *gcm_ctx;
693 
694 #ifdef _KERNEL
695 	if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
696 #else
697 	if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL)
698 #endif
699 		return (NULL);
700 
701 	gcm_ctx->gcm_flags = GCM_MODE;
702 	return (gcm_ctx);
703 }
704 
705 void *
706 gmac_alloc_ctx(int kmflag)
707 {
708 	gcm_ctx_t *gcm_ctx;
709 
710 #ifdef _KERNEL
711 	if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
712 #else
713 	if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL)
714 #endif
715 		return (NULL);
716 
717 	gcm_ctx->gcm_flags = GMAC_MODE;
718 	return (gcm_ctx);
719 }
720 
721 void
722 gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
723 {
724 	ctx->gcm_kmflag = kmflag;
725 }
726 
727 
728 #ifdef __amd64
729 /*
730  * Return 1 if executing on Intel with PCLMULQDQ instructions,
731  * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
732  * Cache the result, as the CPU can't change.
733  *
734  * Note: the userland version uses getisax().  The kernel version uses
735  * global variable x86_feature or the output of cpuid_insn().
736  */
737 static int
738 intel_pclmulqdq_instruction_present(void)
739 {
740 	static int	cached_result = -1;
741 
742 	if (cached_result == -1) { /* first time */
743 #ifdef _KERNEL
744 #ifdef X86_PCLMULQDQ
745 		cached_result = (x86_feature & X86_PCLMULQDQ) != 0;
746 #else
747 		if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) {
748 			struct cpuid_regs	cpr;
749 			cpu_t			*cp = CPU;
750 
751 			cpr.cp_eax = 1; /* Function 1: get processor info */
752 			(void) cpuid_insn(cp, &cpr);
753 			cached_result = ((cpr.cp_ecx &
754 			    CPUID_INTC_ECX_PCLMULQDQ) != 0);
755 		} else {
756 			cached_result = 0;
757 		}
758 #endif	/* X86_PCLMULQDQ */
759 #else
760 		uint_t		ui = 0;
761 
762 		(void) getisax(&ui, 1);
763 		cached_result = (ui & AV_386_PCLMULQDQ) != 0;
764 #endif	/* _KERNEL */
765 	}
766 
767 	return (cached_result);
768 }
769 #endif	/* __amd64 */
770