1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 #ifndef _KERNEL 28 #include <strings.h> 29 #include <limits.h> 30 #include <assert.h> 31 #include <security/cryptoki.h> 32 #endif /* _KERNEL */ 33 34 35 #include <sys/types.h> 36 #include <sys/kmem.h> 37 #include <modes/modes.h> 38 #include <sys/crypto/common.h> 39 #include <sys/crypto/impl.h> 40 #include <sys/byteorder.h> 41 42 #ifdef __amd64 43 44 #ifdef _KERNEL 45 #include <sys/cpuvar.h> /* cpu_t, CPU */ 46 #include <sys/x86_archext.h> /* x86_feature, X86_*, CPUID_* */ 47 #include <sys/disp.h> /* kpreempt_disable(), kpreempt_enable */ 48 /* Workaround for no XMM kernel thread save/restore */ 49 #define KPREEMPT_DISABLE kpreempt_disable() 50 #define KPREEMPT_ENABLE kpreempt_enable() 51 52 #else 53 #include <sys/auxv.h> /* getisax() */ 54 #include <sys/auxv_386.h> /* AV_386_PCLMULQDQ bit */ 55 #define KPREEMPT_DISABLE 56 #define KPREEMPT_ENABLE 57 #endif /* _KERNEL */ 58 59 extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res); 60 static int intel_pclmulqdq_instruction_present(void); 61 #endif /* __amd64 */ 62 63 struct aes_block { 64 uint64_t a; 65 uint64_t b; 66 }; 67 68 69 /* 70 * gcm_mul() 71 * Perform a carry-less multiplication (that is, use XOR instead of the 72 * multiply operator) on *x_in and *y and place the result in *res. 73 * 74 * Byte swap the input (*x_in and *y) and the output (*res). 75 * 76 * Note: x_in, y, and res all point to 16-byte numbers (an array of two 77 * 64-bit integers). 78 */ 79 void 80 gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res) 81 { 82 #ifdef __amd64 83 if (intel_pclmulqdq_instruction_present()) { 84 KPREEMPT_DISABLE; 85 gcm_mul_pclmulqdq(x_in, y, res); 86 KPREEMPT_ENABLE; 87 } else 88 #endif /* __amd64 */ 89 { 90 static const uint64_t R = 0xe100000000000000ULL; 91 struct aes_block z = {0, 0}; 92 struct aes_block v; 93 uint64_t x; 94 int i, j; 95 96 v.a = ntohll(y[0]); 97 v.b = ntohll(y[1]); 98 99 for (j = 0; j < 2; j++) { 100 x = ntohll(x_in[j]); 101 for (i = 0; i < 64; i++, x <<= 1) { 102 if (x & 0x8000000000000000ULL) { 103 z.a ^= v.a; 104 z.b ^= v.b; 105 } 106 if (v.b & 1ULL) { 107 v.b = (v.a << 63)|(v.b >> 1); 108 v.a = (v.a >> 1) ^ R; 109 } else { 110 v.b = (v.a << 63)|(v.b >> 1); 111 v.a = v.a >> 1; 112 } 113 } 114 } 115 res[0] = htonll(z.a); 116 res[1] = htonll(z.b); 117 } 118 } 119 120 121 #define GHASH(c, d, t) \ 122 xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \ 123 gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \ 124 (uint64_t *)(void *)(t)); 125 126 127 /* 128 * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode 129 * is done in another function. 130 */ 131 int 132 gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, 133 crypto_data_t *out, size_t block_size, 134 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), 135 void (*copy_block)(uint8_t *, uint8_t *), 136 void (*xor_block)(uint8_t *, uint8_t *)) 137 { 138 size_t remainder = length; 139 size_t need; 140 uint8_t *datap = (uint8_t *)data; 141 uint8_t *blockp; 142 uint8_t *lastp; 143 void *iov_or_mp; 144 offset_t offset; 145 uint8_t *out_data_1; 146 uint8_t *out_data_2; 147 size_t out_data_1_len; 148 uint64_t counter; 149 uint64_t counter_mask = ntohll(0x00000000ffffffffULL); 150 151 if (length + ctx->gcm_remainder_len < block_size) { 152 /* accumulate bytes here and return */ 153 bcopy(datap, 154 (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len, 155 length); 156 ctx->gcm_remainder_len += length; 157 ctx->gcm_copy_to = datap; 158 return (CRYPTO_SUCCESS); 159 } 160 161 lastp = (uint8_t *)ctx->gcm_cb; 162 if (out != NULL) 163 crypto_init_ptrs(out, &iov_or_mp, &offset); 164 165 do { 166 /* Unprocessed data from last call. */ 167 if (ctx->gcm_remainder_len > 0) { 168 need = block_size - ctx->gcm_remainder_len; 169 170 if (need > remainder) 171 return (CRYPTO_DATA_LEN_RANGE); 172 173 bcopy(datap, &((uint8_t *)ctx->gcm_remainder) 174 [ctx->gcm_remainder_len], need); 175 176 blockp = (uint8_t *)ctx->gcm_remainder; 177 } else { 178 blockp = datap; 179 } 180 181 /* 182 * Increment counter. Counter bits are confined 183 * to the bottom 32 bits of the counter block. 184 */ 185 counter = ntohll(ctx->gcm_cb[1] & counter_mask); 186 counter = htonll(counter + 1); 187 counter &= counter_mask; 188 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; 189 190 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, 191 (uint8_t *)ctx->gcm_tmp); 192 xor_block(blockp, (uint8_t *)ctx->gcm_tmp); 193 194 lastp = (uint8_t *)ctx->gcm_tmp; 195 196 ctx->gcm_processed_data_len += block_size; 197 198 if (out == NULL) { 199 if (ctx->gcm_remainder_len > 0) { 200 bcopy(blockp, ctx->gcm_copy_to, 201 ctx->gcm_remainder_len); 202 bcopy(blockp + ctx->gcm_remainder_len, datap, 203 need); 204 } 205 } else { 206 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, 207 &out_data_1_len, &out_data_2, block_size); 208 209 /* copy block to where it belongs */ 210 if (out_data_1_len == block_size) { 211 copy_block(lastp, out_data_1); 212 } else { 213 bcopy(lastp, out_data_1, out_data_1_len); 214 if (out_data_2 != NULL) { 215 bcopy(lastp + out_data_1_len, 216 out_data_2, 217 block_size - out_data_1_len); 218 } 219 } 220 /* update offset */ 221 out->cd_offset += block_size; 222 } 223 224 /* add ciphertext to the hash */ 225 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash); 226 227 /* Update pointer to next block of data to be processed. */ 228 if (ctx->gcm_remainder_len != 0) { 229 datap += need; 230 ctx->gcm_remainder_len = 0; 231 } else { 232 datap += block_size; 233 } 234 235 remainder = (size_t)&data[length] - (size_t)datap; 236 237 /* Incomplete last block. */ 238 if (remainder > 0 && remainder < block_size) { 239 bcopy(datap, ctx->gcm_remainder, remainder); 240 ctx->gcm_remainder_len = remainder; 241 ctx->gcm_copy_to = datap; 242 goto out; 243 } 244 ctx->gcm_copy_to = NULL; 245 246 } while (remainder > 0); 247 out: 248 return (CRYPTO_SUCCESS); 249 } 250 251 /* ARGSUSED */ 252 int 253 gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, 254 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), 255 void (*copy_block)(uint8_t *, uint8_t *), 256 void (*xor_block)(uint8_t *, uint8_t *)) 257 { 258 uint64_t counter_mask = ntohll(0x00000000ffffffffULL); 259 uint8_t *ghash, *macp; 260 int i, rv; 261 262 if (out->cd_length < 263 (ctx->gcm_remainder_len + ctx->gcm_tag_len)) { 264 return (CRYPTO_DATA_LEN_RANGE); 265 } 266 267 ghash = (uint8_t *)ctx->gcm_ghash; 268 269 if (ctx->gcm_remainder_len > 0) { 270 uint64_t counter; 271 uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp; 272 273 /* 274 * Here is where we deal with data that is not a 275 * multiple of the block size. 276 */ 277 278 /* 279 * Increment counter. 280 */ 281 counter = ntohll(ctx->gcm_cb[1] & counter_mask); 282 counter = htonll(counter + 1); 283 counter &= counter_mask; 284 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; 285 286 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, 287 (uint8_t *)ctx->gcm_tmp); 288 289 macp = (uint8_t *)ctx->gcm_remainder; 290 bzero(macp + ctx->gcm_remainder_len, 291 block_size - ctx->gcm_remainder_len); 292 293 /* XOR with counter block */ 294 for (i = 0; i < ctx->gcm_remainder_len; i++) { 295 macp[i] ^= tmpp[i]; 296 } 297 298 /* add ciphertext to the hash */ 299 GHASH(ctx, macp, ghash); 300 301 ctx->gcm_processed_data_len += ctx->gcm_remainder_len; 302 } 303 304 ctx->gcm_len_a_len_c[1] = 305 htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len)); 306 GHASH(ctx, ctx->gcm_len_a_len_c, ghash); 307 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0, 308 (uint8_t *)ctx->gcm_J0); 309 xor_block((uint8_t *)ctx->gcm_J0, ghash); 310 311 if (ctx->gcm_remainder_len > 0) { 312 rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len); 313 if (rv != CRYPTO_SUCCESS) 314 return (rv); 315 } 316 out->cd_offset += ctx->gcm_remainder_len; 317 ctx->gcm_remainder_len = 0; 318 rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len); 319 if (rv != CRYPTO_SUCCESS) 320 return (rv); 321 out->cd_offset += ctx->gcm_tag_len; 322 323 return (CRYPTO_SUCCESS); 324 } 325 326 /* 327 * This will only deal with decrypting the last block of the input that 328 * might not be a multiple of block length. 329 */ 330 static void 331 gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index, 332 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), 333 void (*xor_block)(uint8_t *, uint8_t *)) 334 { 335 uint8_t *datap, *outp, *counterp; 336 uint64_t counter; 337 uint64_t counter_mask = ntohll(0x00000000ffffffffULL); 338 int i; 339 340 /* 341 * Increment counter. 342 * Counter bits are confined to the bottom 32 bits 343 */ 344 counter = ntohll(ctx->gcm_cb[1] & counter_mask); 345 counter = htonll(counter + 1); 346 counter &= counter_mask; 347 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; 348 349 datap = (uint8_t *)ctx->gcm_remainder; 350 outp = &((ctx->gcm_pt_buf)[index]); 351 counterp = (uint8_t *)ctx->gcm_tmp; 352 353 /* authentication tag */ 354 bzero((uint8_t *)ctx->gcm_tmp, block_size); 355 bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len); 356 357 /* add ciphertext to the hash */ 358 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash); 359 360 /* decrypt remaining ciphertext */ 361 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp); 362 363 /* XOR with counter block */ 364 for (i = 0; i < ctx->gcm_remainder_len; i++) { 365 outp[i] = datap[i] ^ counterp[i]; 366 } 367 } 368 369 /* ARGSUSED */ 370 int 371 gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, 372 crypto_data_t *out, size_t block_size, 373 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), 374 void (*copy_block)(uint8_t *, uint8_t *), 375 void (*xor_block)(uint8_t *, uint8_t *)) 376 { 377 size_t new_len; 378 uint8_t *new; 379 380 /* 381 * Copy contiguous ciphertext input blocks to plaintext buffer. 382 * Ciphertext will be decrypted in the final. 383 */ 384 if (length > 0) { 385 new_len = ctx->gcm_pt_buf_len + length; 386 #ifdef _KERNEL 387 new = kmem_alloc(new_len, ctx->gcm_kmflag); 388 bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len); 389 kmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); 390 #else 391 new = malloc(new_len); 392 bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len); 393 free(ctx->gcm_pt_buf); 394 #endif 395 if (new == NULL) 396 return (CRYPTO_HOST_MEMORY); 397 398 ctx->gcm_pt_buf = new; 399 ctx->gcm_pt_buf_len = new_len; 400 bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len], 401 length); 402 ctx->gcm_processed_data_len += length; 403 } 404 405 ctx->gcm_remainder_len = 0; 406 return (CRYPTO_SUCCESS); 407 } 408 409 int 410 gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, 411 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), 412 void (*xor_block)(uint8_t *, uint8_t *)) 413 { 414 size_t pt_len; 415 size_t remainder; 416 uint8_t *ghash; 417 uint8_t *blockp; 418 uint8_t *cbp; 419 uint64_t counter; 420 uint64_t counter_mask = ntohll(0x00000000ffffffffULL); 421 int processed = 0, rv; 422 423 ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len); 424 425 pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; 426 ghash = (uint8_t *)ctx->gcm_ghash; 427 blockp = ctx->gcm_pt_buf; 428 remainder = pt_len; 429 while (remainder > 0) { 430 /* Incomplete last block */ 431 if (remainder < block_size) { 432 bcopy(blockp, ctx->gcm_remainder, remainder); 433 ctx->gcm_remainder_len = remainder; 434 /* 435 * not expecting anymore ciphertext, just 436 * compute plaintext for the remaining input 437 */ 438 gcm_decrypt_incomplete_block(ctx, block_size, 439 processed, encrypt_block, xor_block); 440 ctx->gcm_remainder_len = 0; 441 goto out; 442 } 443 /* add ciphertext to the hash */ 444 GHASH(ctx, blockp, ghash); 445 446 /* 447 * Increment counter. 448 * Counter bits are confined to the bottom 32 bits 449 */ 450 counter = ntohll(ctx->gcm_cb[1] & counter_mask); 451 counter = htonll(counter + 1); 452 counter &= counter_mask; 453 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; 454 455 cbp = (uint8_t *)ctx->gcm_tmp; 456 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp); 457 458 /* XOR with ciphertext */ 459 xor_block(cbp, blockp); 460 461 processed += block_size; 462 blockp += block_size; 463 remainder -= block_size; 464 } 465 out: 466 ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len)); 467 GHASH(ctx, ctx->gcm_len_a_len_c, ghash); 468 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0, 469 (uint8_t *)ctx->gcm_J0); 470 xor_block((uint8_t *)ctx->gcm_J0, ghash); 471 472 /* compare the input authentication tag with what we calculated */ 473 if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { 474 /* They don't match */ 475 return (CRYPTO_INVALID_MAC); 476 } else { 477 rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len); 478 if (rv != CRYPTO_SUCCESS) 479 return (rv); 480 out->cd_offset += pt_len; 481 } 482 return (CRYPTO_SUCCESS); 483 } 484 485 static int 486 gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param) 487 { 488 size_t tag_len; 489 490 /* 491 * Check the length of the authentication tag (in bits). 492 */ 493 tag_len = gcm_param->ulTagBits; 494 switch (tag_len) { 495 case 32: 496 case 64: 497 case 96: 498 case 104: 499 case 112: 500 case 120: 501 case 128: 502 break; 503 default: 504 return (CRYPTO_MECHANISM_PARAM_INVALID); 505 } 506 507 if (gcm_param->ulIvLen == 0) 508 return (CRYPTO_MECHANISM_PARAM_INVALID); 509 510 return (CRYPTO_SUCCESS); 511 } 512 513 static void 514 gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len, 515 gcm_ctx_t *ctx, size_t block_size, 516 void (*copy_block)(uint8_t *, uint8_t *), 517 void (*xor_block)(uint8_t *, uint8_t *)) 518 { 519 uint8_t *cb; 520 ulong_t remainder = iv_len; 521 ulong_t processed = 0; 522 uint8_t *datap, *ghash; 523 uint64_t len_a_len_c[2]; 524 525 ghash = (uint8_t *)ctx->gcm_ghash; 526 cb = (uint8_t *)ctx->gcm_cb; 527 if (iv_len == 12) { 528 bcopy(iv, cb, 12); 529 cb[12] = 0; 530 cb[13] = 0; 531 cb[14] = 0; 532 cb[15] = 1; 533 /* J0 will be used again in the final */ 534 copy_block(cb, (uint8_t *)ctx->gcm_J0); 535 } else { 536 /* GHASH the IV */ 537 do { 538 if (remainder < block_size) { 539 bzero(cb, block_size); 540 bcopy(&(iv[processed]), cb, remainder); 541 datap = (uint8_t *)cb; 542 remainder = 0; 543 } else { 544 datap = (uint8_t *)(&(iv[processed])); 545 processed += block_size; 546 remainder -= block_size; 547 } 548 GHASH(ctx, datap, ghash); 549 } while (remainder > 0); 550 551 len_a_len_c[0] = 0; 552 len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len)); 553 GHASH(ctx, len_a_len_c, ctx->gcm_J0); 554 555 /* J0 will be used again in the final */ 556 copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb); 557 } 558 } 559 560 /* 561 * The following function is called at encrypt or decrypt init time 562 * for AES GCM mode. 563 */ 564 int 565 gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, 566 unsigned char *auth_data, size_t auth_data_len, size_t block_size, 567 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), 568 void (*copy_block)(uint8_t *, uint8_t *), 569 void (*xor_block)(uint8_t *, uint8_t *)) 570 { 571 uint8_t *ghash, *datap, *authp; 572 size_t remainder, processed; 573 574 /* encrypt zero block to get subkey H */ 575 bzero(ctx->gcm_H, sizeof (ctx->gcm_H)); 576 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H, 577 (uint8_t *)ctx->gcm_H); 578 579 gcm_format_initial_blocks(iv, iv_len, ctx, block_size, 580 copy_block, xor_block); 581 582 authp = (uint8_t *)ctx->gcm_tmp; 583 ghash = (uint8_t *)ctx->gcm_ghash; 584 bzero(authp, block_size); 585 bzero(ghash, block_size); 586 587 processed = 0; 588 remainder = auth_data_len; 589 do { 590 if (remainder < block_size) { 591 /* 592 * There's not a block full of data, pad rest of 593 * buffer with zero 594 */ 595 bzero(authp, block_size); 596 bcopy(&(auth_data[processed]), authp, remainder); 597 datap = (uint8_t *)authp; 598 remainder = 0; 599 } else { 600 datap = (uint8_t *)(&(auth_data[processed])); 601 processed += block_size; 602 remainder -= block_size; 603 } 604 605 /* add auth data to the hash */ 606 GHASH(ctx, datap, ghash); 607 608 } while (remainder > 0); 609 610 return (CRYPTO_SUCCESS); 611 } 612 613 int 614 gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, 615 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), 616 void (*copy_block)(uint8_t *, uint8_t *), 617 void (*xor_block)(uint8_t *, uint8_t *)) 618 { 619 int rv; 620 CK_AES_GCM_PARAMS *gcm_param; 621 622 if (param != NULL) { 623 gcm_param = (CK_AES_GCM_PARAMS *)(void *)param; 624 625 if ((rv = gcm_validate_args(gcm_param)) != 0) { 626 return (rv); 627 } 628 629 gcm_ctx->gcm_tag_len = gcm_param->ulTagBits; 630 gcm_ctx->gcm_tag_len >>= 3; 631 gcm_ctx->gcm_processed_data_len = 0; 632 633 /* these values are in bits */ 634 gcm_ctx->gcm_len_a_len_c[0] 635 = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen)); 636 637 rv = CRYPTO_SUCCESS; 638 gcm_ctx->gcm_flags |= GCM_MODE; 639 } else { 640 rv = CRYPTO_MECHANISM_PARAM_INVALID; 641 goto out; 642 } 643 644 if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen, 645 gcm_param->pAAD, gcm_param->ulAADLen, block_size, 646 encrypt_block, copy_block, xor_block) != 0) { 647 rv = CRYPTO_MECHANISM_PARAM_INVALID; 648 } 649 out: 650 return (rv); 651 } 652 653 int 654 gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, 655 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), 656 void (*copy_block)(uint8_t *, uint8_t *), 657 void (*xor_block)(uint8_t *, uint8_t *)) 658 { 659 int rv; 660 CK_AES_GMAC_PARAMS *gmac_param; 661 662 if (param != NULL) { 663 gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param; 664 665 gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS); 666 gcm_ctx->gcm_processed_data_len = 0; 667 668 /* these values are in bits */ 669 gcm_ctx->gcm_len_a_len_c[0] 670 = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen)); 671 672 rv = CRYPTO_SUCCESS; 673 gcm_ctx->gcm_flags |= GMAC_MODE; 674 } else { 675 rv = CRYPTO_MECHANISM_PARAM_INVALID; 676 goto out; 677 } 678 679 if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN, 680 gmac_param->pAAD, gmac_param->ulAADLen, block_size, 681 encrypt_block, copy_block, xor_block) != 0) { 682 rv = CRYPTO_MECHANISM_PARAM_INVALID; 683 } 684 out: 685 return (rv); 686 } 687 688 void * 689 gcm_alloc_ctx(int kmflag) 690 { 691 gcm_ctx_t *gcm_ctx; 692 693 #ifdef _KERNEL 694 if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL) 695 #else 696 if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL) 697 #endif 698 return (NULL); 699 700 gcm_ctx->gcm_flags = GCM_MODE; 701 return (gcm_ctx); 702 } 703 704 void * 705 gmac_alloc_ctx(int kmflag) 706 { 707 gcm_ctx_t *gcm_ctx; 708 709 #ifdef _KERNEL 710 if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL) 711 #else 712 if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL) 713 #endif 714 return (NULL); 715 716 gcm_ctx->gcm_flags = GMAC_MODE; 717 return (gcm_ctx); 718 } 719 720 void 721 gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag) 722 { 723 ctx->gcm_kmflag = kmflag; 724 } 725 726 727 #ifdef __amd64 728 /* 729 * Return 1 if executing on Intel with PCLMULQDQ instructions, 730 * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64). 731 * Cache the result, as the CPU can't change. 732 * 733 * Note: the userland version uses getisax(). The kernel version uses 734 * global variable x86_feature or the output of cpuid_insn(). 735 */ 736 static int 737 intel_pclmulqdq_instruction_present(void) 738 { 739 static int cached_result = -1; 740 741 if (cached_result == -1) { /* first time */ 742 #ifdef _KERNEL 743 #ifdef X86_PCLMULQDQ 744 cached_result = (x86_feature & X86_PCLMULQDQ) != 0; 745 #else 746 if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) { 747 struct cpuid_regs cpr; 748 cpu_t *cp = CPU; 749 750 cpr.cp_eax = 1; /* Function 1: get processor info */ 751 (void) cpuid_insn(cp, &cpr); 752 cached_result = ((cpr.cp_ecx & 753 CPUID_INTC_ECX_PCLMULQDQ) != 0); 754 } else { 755 cached_result = 0; 756 } 757 #endif /* X86_PCLMULQDQ */ 758 #else 759 uint_t ui = 0; 760 761 (void) getisax(&ui, 1); 762 cached_result = (ui & AV_386_PCLMULQDQ) != 0; 763 #endif /* _KERNEL */ 764 } 765 766 return (cached_result); 767 } 768 #endif /* __amd64 */ 769