xref: /titanic_50/usr/src/common/crypto/modes/gcm.c (revision 42ed7838f131b8f58d6c95db1c7e3a6a3e6ea7e4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 #ifndef _KERNEL
28 #include <strings.h>
29 #include <limits.h>
30 #include <assert.h>
31 #include <security/cryptoki.h>
32 #endif	/* _KERNEL */
33 
34 
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <modes/modes.h>
38 #include <sys/crypto/common.h>
39 #include <sys/crypto/impl.h>
40 #include <sys/byteorder.h>
41 
42 #ifdef __amd64
43 #include <sys/x86_archext.h>	/* x86_feature, X86_*, CPUID_* */
44 
45 #ifndef _KERNEL
46 #include <sys/cpuvar.h>		/* cpu_t, CPU */
47 #include <sys/disp.h>		/* kpreempt_disable(), kpreempt_enable */
48 /* Workaround for no XMM kernel thread save/restore */
49 #define	KPREEMPT_DISABLE	kpreempt_disable()
50 #define	KPREEMPT_ENABLE		kpreempt_enable()
51 
52 #else
53 #include <sys/auxv.h>		/* getisax() */
54 #include <sys/auxv_386.h>	/* AV_386_PCLMULQDQ bit */
55 #define	KPREEMPT_DISABLE
56 #define	KPREEMPT_ENABLE
57 #endif	/* _KERNEL */
58 
59 extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
60 static int intel_pclmulqdq_instruction_present(void);
61 #endif	/* __amd64 */
62 
63 struct aes_block {
64 	uint64_t a;
65 	uint64_t b;
66 };
67 
68 
69 /*
70  * gcm_mul()
71  * Perform a carry-less multiplication (that is, use XOR instead of the
72  * multiply operator) on *x_in and *y and place the result in *res.
73  *
74  * Byte swap the input (*x_in and *y) and the output (*res).
75  *
76  * Note: x_in, y, and res all point to 16-byte numbers (an array of two
77  * 64-bit integers).
78  */
79 void
80 gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res)
81 {
82 #ifdef __amd64
83 	if (intel_pclmulqdq_instruction_present()) {
84 		KPREEMPT_DISABLE;
85 		gcm_mul_pclmulqdq(x_in, y, res);
86 		KPREEMPT_ENABLE;
87 	} else
88 #endif	/* __amd64 */
89 	{
90 		static const uint64_t R = 0xe100000000000000ULL;
91 		struct aes_block z = {0, 0};
92 		struct aes_block v;
93 		uint64_t x;
94 		int i, j;
95 
96 		v.a = ntohll(y[0]);
97 		v.b = ntohll(y[1]);
98 
99 		for (j = 0; j < 2; j++) {
100 			x = ntohll(x_in[j]);
101 			for (i = 0; i < 64; i++, x <<= 1) {
102 				if (x & 0x8000000000000000ULL) {
103 					z.a ^= v.a;
104 					z.b ^= v.b;
105 				}
106 				if (v.b & 1ULL) {
107 					v.b = (v.a << 63)|(v.b >> 1);
108 					v.a = (v.a >> 1) ^ R;
109 				} else {
110 					v.b = (v.a << 63)|(v.b >> 1);
111 					v.a = v.a >> 1;
112 				}
113 			}
114 		}
115 		res[0] = htonll(z.a);
116 		res[1] = htonll(z.b);
117 	}
118 }
119 
120 
121 #define	GHASH(c, d, t) \
122 	xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
123 	gcm_mul((uint64_t *)(c)->gcm_ghash, (c)->gcm_H, (uint64_t *)(t));
124 
125 /*
126  * Encrypt multiple blocks of data in GCM mode.  Decrypt for GCM mode
127  * is done in another function.
128  */
129 int
130 gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
131     crypto_data_t *out, size_t block_size,
132     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
133     void (*copy_block)(uint8_t *, uint8_t *),
134     void (*xor_block)(uint8_t *, uint8_t *))
135 {
136 	size_t remainder = length;
137 	size_t need;
138 	uint8_t *datap = (uint8_t *)data;
139 	uint8_t *blockp;
140 	uint8_t *lastp;
141 	void *iov_or_mp;
142 	offset_t offset;
143 	uint8_t *out_data_1;
144 	uint8_t *out_data_2;
145 	size_t out_data_1_len;
146 	uint64_t counter;
147 	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
148 
149 	if (length + ctx->gcm_remainder_len < block_size) {
150 		/* accumulate bytes here and return */
151 		bcopy(datap,
152 		    (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
153 		    length);
154 		ctx->gcm_remainder_len += length;
155 		ctx->gcm_copy_to = datap;
156 		return (CRYPTO_SUCCESS);
157 	}
158 
159 	lastp = (uint8_t *)ctx->gcm_cb;
160 	if (out != NULL)
161 		crypto_init_ptrs(out, &iov_or_mp, &offset);
162 
163 	do {
164 		/* Unprocessed data from last call. */
165 		if (ctx->gcm_remainder_len > 0) {
166 			need = block_size - ctx->gcm_remainder_len;
167 
168 			if (need > remainder)
169 				return (CRYPTO_DATA_LEN_RANGE);
170 
171 			bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
172 			    [ctx->gcm_remainder_len], need);
173 
174 			blockp = (uint8_t *)ctx->gcm_remainder;
175 		} else {
176 			blockp = datap;
177 		}
178 
179 		/*
180 		 * Increment counter. Counter bits are confined
181 		 * to the bottom 32 bits of the counter block.
182 		 */
183 		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
184 		counter = htonll(counter + 1);
185 		counter &= counter_mask;
186 		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
187 
188 		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
189 		    (uint8_t *)ctx->gcm_tmp);
190 		xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
191 
192 		lastp = (uint8_t *)ctx->gcm_tmp;
193 
194 		ctx->gcm_processed_data_len += block_size;
195 
196 		if (out == NULL) {
197 			if (ctx->gcm_remainder_len > 0) {
198 				bcopy(blockp, ctx->gcm_copy_to,
199 				    ctx->gcm_remainder_len);
200 				bcopy(blockp + ctx->gcm_remainder_len, datap,
201 				    need);
202 			}
203 		} else {
204 			crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
205 			    &out_data_1_len, &out_data_2, block_size);
206 
207 			/* copy block to where it belongs */
208 			if (out_data_1_len == block_size) {
209 				copy_block(lastp, out_data_1);
210 			} else {
211 				bcopy(lastp, out_data_1, out_data_1_len);
212 				if (out_data_2 != NULL) {
213 					bcopy(lastp + out_data_1_len,
214 					    out_data_2,
215 					    block_size - out_data_1_len);
216 				}
217 			}
218 			/* update offset */
219 			out->cd_offset += block_size;
220 		}
221 
222 		/* add ciphertext to the hash */
223 		GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
224 
225 		/* Update pointer to next block of data to be processed. */
226 		if (ctx->gcm_remainder_len != 0) {
227 			datap += need;
228 			ctx->gcm_remainder_len = 0;
229 		} else {
230 			datap += block_size;
231 		}
232 
233 		remainder = (size_t)&data[length] - (size_t)datap;
234 
235 		/* Incomplete last block. */
236 		if (remainder > 0 && remainder < block_size) {
237 			bcopy(datap, ctx->gcm_remainder, remainder);
238 			ctx->gcm_remainder_len = remainder;
239 			ctx->gcm_copy_to = datap;
240 			goto out;
241 		}
242 		ctx->gcm_copy_to = NULL;
243 
244 	} while (remainder > 0);
245 out:
246 	return (CRYPTO_SUCCESS);
247 }
248 
249 /* ARGSUSED */
250 int
251 gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
252     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
253     void (*copy_block)(uint8_t *, uint8_t *),
254     void (*xor_block)(uint8_t *, uint8_t *))
255 {
256 	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
257 	uint8_t *ghash, *macp;
258 	int i, rv;
259 
260 	if (out->cd_length <
261 	    (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
262 		return (CRYPTO_DATA_LEN_RANGE);
263 	}
264 
265 	ghash = (uint8_t *)ctx->gcm_ghash;
266 
267 	if (ctx->gcm_remainder_len > 0) {
268 		uint64_t counter;
269 		uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
270 
271 		/*
272 		 * Here is where we deal with data that is not a
273 		 * multiple of the block size.
274 		 */
275 
276 		/*
277 		 * Increment counter.
278 		 */
279 		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
280 		counter = htonll(counter + 1);
281 		counter &= counter_mask;
282 		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
283 
284 		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
285 		    (uint8_t *)ctx->gcm_tmp);
286 
287 		macp = (uint8_t *)ctx->gcm_remainder;
288 		bzero(macp + ctx->gcm_remainder_len,
289 		    block_size - ctx->gcm_remainder_len);
290 
291 		/* XOR with counter block */
292 		for (i = 0; i < ctx->gcm_remainder_len; i++) {
293 			macp[i] ^= tmpp[i];
294 		}
295 
296 		/* add ciphertext to the hash */
297 		GHASH(ctx, macp, ghash);
298 
299 		ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
300 	}
301 
302 	ctx->gcm_len_a_len_c[1] = htonll(ctx->gcm_processed_data_len << 3);
303 	GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
304 	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
305 	    (uint8_t *)ctx->gcm_J0);
306 	xor_block((uint8_t *)ctx->gcm_J0, ghash);
307 
308 	if (ctx->gcm_remainder_len > 0) {
309 		rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
310 		if (rv != CRYPTO_SUCCESS)
311 			return (rv);
312 	}
313 	out->cd_offset += ctx->gcm_remainder_len;
314 	ctx->gcm_remainder_len = 0;
315 	rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
316 	if (rv != CRYPTO_SUCCESS)
317 		return (rv);
318 	out->cd_offset += ctx->gcm_tag_len;
319 
320 	return (CRYPTO_SUCCESS);
321 }
322 
323 /*
324  * This will only deal with decrypting the last block of the input that
325  * might not be a multiple of block length.
326  */
327 static void
328 gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
329     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
330     void (*xor_block)(uint8_t *, uint8_t *))
331 {
332 	uint8_t *datap, *outp, *counterp;
333 	uint64_t counter;
334 	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
335 	int i;
336 
337 	/*
338 	 * Increment counter.
339 	 * Counter bits are confined to the bottom 32 bits
340 	 */
341 	counter = ntohll(ctx->gcm_cb[1] & counter_mask);
342 	counter = htonll(counter + 1);
343 	counter &= counter_mask;
344 	ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
345 
346 	datap = (uint8_t *)ctx->gcm_remainder;
347 	outp = &((ctx->gcm_pt_buf)[index]);
348 	counterp = (uint8_t *)ctx->gcm_tmp;
349 
350 	/* authentication tag */
351 	bzero((uint8_t *)ctx->gcm_tmp, block_size);
352 	bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
353 
354 	/* add ciphertext to the hash */
355 	GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
356 
357 	/* decrypt remaining ciphertext */
358 	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
359 
360 	/* XOR with counter block */
361 	for (i = 0; i < ctx->gcm_remainder_len; i++) {
362 		outp[i] = datap[i] ^ counterp[i];
363 	}
364 }
365 
366 /* ARGSUSED */
367 int
368 gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
369     crypto_data_t *out, size_t block_size,
370     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
371     void (*copy_block)(uint8_t *, uint8_t *),
372     void (*xor_block)(uint8_t *, uint8_t *))
373 {
374 	size_t new_len;
375 	uint8_t *new;
376 
377 	/*
378 	 * Copy contiguous ciphertext input blocks to plaintext buffer.
379 	 * Ciphertext will be decrypted in the final.
380 	 */
381 	if (length > 0) {
382 		new_len = ctx->gcm_pt_buf_len + length;
383 #ifdef _KERNEL
384 		new = kmem_alloc(new_len, ctx->gcm_kmflag);
385 		bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
386 		kmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
387 #else
388 		new = malloc(new_len);
389 		bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
390 		free(ctx->gcm_pt_buf);
391 #endif
392 		if (new == NULL)
393 			return (CRYPTO_HOST_MEMORY);
394 
395 		ctx->gcm_pt_buf = new;
396 		ctx->gcm_pt_buf_len = new_len;
397 		bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
398 		    length);
399 		ctx->gcm_processed_data_len += length;
400 	}
401 
402 	ctx->gcm_remainder_len = 0;
403 	return (CRYPTO_SUCCESS);
404 }
405 
406 int
407 gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
408     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
409     void (*xor_block)(uint8_t *, uint8_t *))
410 {
411 	size_t pt_len;
412 	size_t remainder;
413 	uint8_t *ghash;
414 	uint8_t *blockp;
415 	uint8_t *cbp;
416 	uint64_t counter;
417 	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
418 	int processed = 0, rv;
419 
420 	ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
421 
422 	pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
423 	ghash = (uint8_t *)ctx->gcm_ghash;
424 	blockp = ctx->gcm_pt_buf;
425 	remainder = pt_len;
426 	while (remainder > 0) {
427 		/* add ciphertext to the hash */
428 		GHASH(ctx, blockp, ghash);
429 
430 		/*
431 		 * Increment counter.
432 		 * Counter bits are confined to the bottom 32 bits
433 		 */
434 		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
435 		counter = htonll(counter + 1);
436 		counter &= counter_mask;
437 		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
438 
439 		cbp = (uint8_t *)ctx->gcm_tmp;
440 		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
441 
442 		/* XOR with ciphertext */
443 		xor_block(cbp, blockp);
444 
445 		processed += block_size;
446 		blockp += block_size;
447 		remainder -= block_size;
448 
449 		/* Incomplete last block */
450 		if (remainder > 0 && remainder < block_size) {
451 			bcopy(blockp, ctx->gcm_remainder, remainder);
452 			ctx->gcm_remainder_len = remainder;
453 			/*
454 			 * not expecting anymore ciphertext, just
455 			 * compute plaintext for the remaining input
456 			 */
457 			gcm_decrypt_incomplete_block(ctx, block_size,
458 			    processed, encrypt_block, xor_block);
459 			ctx->gcm_remainder_len = 0;
460 			goto out;
461 		}
462 	}
463 out:
464 	ctx->gcm_len_a_len_c[1] = htonll(pt_len << 3);
465 	GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
466 	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
467 	    (uint8_t *)ctx->gcm_J0);
468 	xor_block((uint8_t *)ctx->gcm_J0, ghash);
469 
470 	/* compare the input authentication tag with what we calculated */
471 	if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
472 		/* They don't match */
473 		return (CRYPTO_INVALID_MAC);
474 	} else {
475 		rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
476 		if (rv != CRYPTO_SUCCESS)
477 			return (rv);
478 		out->cd_offset += pt_len;
479 	}
480 	return (CRYPTO_SUCCESS);
481 }
482 
483 static int
484 gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
485 {
486 	size_t tag_len;
487 
488 	/*
489 	 * Check the length of the authentication tag (in bits).
490 	 */
491 	tag_len = gcm_param->ulTagBits;
492 	switch (tag_len) {
493 	case 32:
494 	case 64:
495 	case 96:
496 	case 104:
497 	case 112:
498 	case 120:
499 	case 128:
500 		break;
501 	default:
502 		return (CRYPTO_MECHANISM_PARAM_INVALID);
503 	}
504 
505 	if (gcm_param->ulIvLen == 0)
506 		return (CRYPTO_MECHANISM_PARAM_INVALID);
507 
508 	return (CRYPTO_SUCCESS);
509 }
510 
511 static void
512 gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
513     gcm_ctx_t *ctx, size_t block_size,
514     void (*copy_block)(uint8_t *, uint8_t *),
515     void (*xor_block)(uint8_t *, uint8_t *))
516 {
517 	uint8_t *cb;
518 	ulong_t remainder = iv_len;
519 	ulong_t processed = 0;
520 	uint8_t *datap, *ghash;
521 	uint64_t len_a_len_c[2];
522 
523 	ghash = (uint8_t *)ctx->gcm_ghash;
524 	cb = (uint8_t *)ctx->gcm_cb;
525 	if (iv_len == 12) {
526 		bcopy(iv, cb, 12);
527 		cb[12] = 0;
528 		cb[13] = 0;
529 		cb[14] = 0;
530 		cb[15] = 1;
531 		/* J0 will be used again in the final */
532 		copy_block(cb, (uint8_t *)ctx->gcm_J0);
533 	} else {
534 		/* GHASH the IV */
535 		do {
536 			if (remainder < block_size) {
537 				bzero(cb, block_size);
538 				bcopy(&(iv[processed]), cb, remainder);
539 				datap = (uint8_t *)cb;
540 				remainder = 0;
541 			} else {
542 				datap = (uint8_t *)(&(iv[processed]));
543 				processed += block_size;
544 				remainder -= block_size;
545 			}
546 			GHASH(ctx, datap, ghash);
547 		} while (remainder > 0);
548 
549 		len_a_len_c[0] = 0;
550 		len_a_len_c[1] = htonll(iv_len << 3);
551 		GHASH(ctx, len_a_len_c, ctx->gcm_J0);
552 
553 		/* J0 will be used again in the final */
554 		copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
555 	}
556 }
557 
558 /*
559  * The following function is called at encrypt or decrypt init time
560  * for AES GCM mode.
561  */
562 int
563 gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
564     unsigned char *auth_data, size_t auth_data_len, size_t block_size,
565     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
566     void (*copy_block)(uint8_t *, uint8_t *),
567     void (*xor_block)(uint8_t *, uint8_t *))
568 {
569 	uint8_t *ghash, *datap, *authp;
570 	size_t remainder, processed;
571 
572 	/* encrypt zero block to get subkey H */
573 	bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
574 	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
575 	    (uint8_t *)ctx->gcm_H);
576 
577 	gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
578 	    copy_block, xor_block);
579 
580 	authp = (uint8_t *)ctx->gcm_tmp;
581 	ghash = (uint8_t *)ctx->gcm_ghash;
582 	bzero(authp, block_size);
583 	bzero(ghash, block_size);
584 
585 	processed = 0;
586 	remainder = auth_data_len;
587 	do {
588 		if (remainder < block_size) {
589 			/*
590 			 * There's not a block full of data, pad rest of
591 			 * buffer with zero
592 			 */
593 			bzero(authp, block_size);
594 			bcopy(&(auth_data[processed]), authp, remainder);
595 			datap = (uint8_t *)authp;
596 			remainder = 0;
597 		} else {
598 			datap = (uint8_t *)(&(auth_data[processed]));
599 			processed += block_size;
600 			remainder -= block_size;
601 		}
602 
603 		/* add auth data to the hash */
604 		GHASH(ctx, datap, ghash);
605 
606 	} while (remainder > 0);
607 
608 	return (CRYPTO_SUCCESS);
609 }
610 
611 int
612 gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
613     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
614     void (*copy_block)(uint8_t *, uint8_t *),
615     void (*xor_block)(uint8_t *, uint8_t *))
616 {
617 	int rv;
618 	CK_AES_GCM_PARAMS *gcm_param;
619 
620 	if (param != NULL) {
621 		gcm_param = (CK_AES_GCM_PARAMS *)param;
622 
623 		if ((rv = gcm_validate_args(gcm_param)) != 0) {
624 			return (rv);
625 		}
626 
627 		gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
628 		gcm_ctx->gcm_tag_len >>= 3;
629 		gcm_ctx->gcm_processed_data_len = 0;
630 
631 		/* these values are in bits */
632 		gcm_ctx->gcm_len_a_len_c[0] = htonll(gcm_param->ulAADLen << 3);
633 
634 		rv = CRYPTO_SUCCESS;
635 		gcm_ctx->gcm_flags |= GCM_MODE;
636 	} else {
637 		rv = CRYPTO_MECHANISM_PARAM_INVALID;
638 		goto out;
639 	}
640 
641 	if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
642 	    gcm_param->pAAD, gcm_param->ulAADLen, block_size,
643 	    encrypt_block, copy_block, xor_block) != 0) {
644 		rv = CRYPTO_MECHANISM_PARAM_INVALID;
645 	}
646 out:
647 	return (rv);
648 }
649 
650 int
651 gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
652     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
653     void (*copy_block)(uint8_t *, uint8_t *),
654     void (*xor_block)(uint8_t *, uint8_t *))
655 {
656 	int rv;
657 	CK_AES_GMAC_PARAMS *gmac_param;
658 
659 	if (param != NULL) {
660 		gmac_param = (CK_AES_GMAC_PARAMS *)param;
661 
662 		gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
663 		gcm_ctx->gcm_processed_data_len = 0;
664 
665 		/* these values are in bits */
666 		gcm_ctx->gcm_len_a_len_c[0] = htonll(gmac_param->ulAADLen << 3);
667 
668 		rv = CRYPTO_SUCCESS;
669 		gcm_ctx->gcm_flags |= GMAC_MODE;
670 	} else {
671 		rv = CRYPTO_MECHANISM_PARAM_INVALID;
672 		goto out;
673 	}
674 
675 	if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
676 	    gmac_param->pAAD, gmac_param->ulAADLen, block_size,
677 	    encrypt_block, copy_block, xor_block) != 0) {
678 		rv = CRYPTO_MECHANISM_PARAM_INVALID;
679 	}
680 out:
681 	return (rv);
682 }
683 
684 void *
685 gcm_alloc_ctx(int kmflag)
686 {
687 	gcm_ctx_t *gcm_ctx;
688 
689 #ifdef _KERNEL
690 	if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
691 #else
692 	if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL)
693 #endif
694 		return (NULL);
695 
696 	gcm_ctx->gcm_flags = GCM_MODE;
697 	return (gcm_ctx);
698 }
699 
700 void *
701 gmac_alloc_ctx(int kmflag)
702 {
703 	gcm_ctx_t *gcm_ctx;
704 
705 #ifdef _KERNEL
706 	if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
707 #else
708 	if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL)
709 #endif
710 		return (NULL);
711 
712 	gcm_ctx->gcm_flags = GMAC_MODE;
713 	return (gcm_ctx);
714 }
715 
716 void
717 gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
718 {
719 	ctx->gcm_kmflag = kmflag;
720 }
721 
722 
723 #ifdef __amd64
724 /*
725  * Return 1 if executing on Intel with PCLMULQDQ instructions,
726  * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
727  * Cache the result, as the CPU can't change.
728  *
729  * Note: the userland version uses getisax().  The kernel version uses
730  * global variable x86_feature or the output of cpuid_insn().
731  */
732 static int
733 intel_pclmulqdq_instruction_present(void)
734 {
735 	static int	cached_result = -1;
736 
737 	if (cached_result == -1) { /* first time */
738 #ifdef _KERNEL
739 #ifdef X86_PCLMULQDQ
740 		cached_result = (x86_feature & X86_PCLMULQDQ) != 0;
741 #else
742 		if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) {
743 			struct cpuid_regs	cpr;
744 			cpu_t			*cp = CPU;
745 
746 			cpr.cp_eax = 1; /* Function 1: get processor info */
747 			(void) cpuid_insn(cp, &cpr);
748 			cached_result = ((cpr.cp_ecx &
749 			    CPUID_INTC_ECX_PCLMULQDQ) != 0);
750 		} else {
751 			cached_result = 0;
752 		}
753 #endif	/* X86_PCLMULQDQ */
754 #else
755 		uint_t		ui = 0;
756 
757 		(void) getisax(&ui, 1);
758 		cached_result = (ui & AV_386_PCLMULQDQ) != 0;
759 #endif	/* _KERNEL */
760 	}
761 
762 	return (cached_result);
763 }
764 #endif	/* __amd64 */
765