1 /* 2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * The basic framework for this code came from the reference 8 * implementation for MD5. That implementation is Copyright (C) 9 * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved. 10 * 11 * License to copy and use this software is granted provided that it 12 * is identified as the "RSA Data Security, Inc. MD5 Message-Digest 13 * Algorithm" in all material mentioning or referencing this software 14 * or this function. 15 * 16 * License is also granted to make and use derivative works provided 17 * that such works are identified as "derived from the RSA Data 18 * Security, Inc. MD5 Message-Digest Algorithm" in all material 19 * mentioning or referencing the derived work. 20 * 21 * RSA Data Security, Inc. makes no representations concerning either 22 * the merchantability of this software or the suitability of this 23 * software for any particular purpose. It is provided "as is" 24 * without express or implied warranty of any kind. 25 * 26 * These notices must be retained in any copies of any part of this 27 * documentation and/or software. 28 * 29 * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1 30 * standard, available at http://www.itl.nist.gov/fipspubs/fip180-1.htm 31 * Not as fast as one would like -- further optimizations are encouraged 32 * and appreciated. 33 */ 34 35 #if !defined(_KERNEL) && !defined(_BOOT) 36 #include <stdint.h> 37 #include <strings.h> 38 #include <stdlib.h> 39 #include <errno.h> 40 #include <sys/systeminfo.h> 41 #endif /* !_KERNEL && !_BOOT */ 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysmacros.h> 47 #include <sys/sha1.h> 48 #include <sys/sha1_consts.h> 49 50 #ifdef _LITTLE_ENDIAN 51 #include <sys/byteorder.h> 52 #define HAVE_HTONL 53 #endif 54 55 #ifdef _BOOT 56 #define bcopy(_s, _d, _l) ((void) memcpy((_d), (_s), (_l))) 57 #define bzero(_m, _l) ((void) memset((_m), 0, (_l))) 58 #endif 59 60 static void Encode(uint8_t *, const uint32_t *, size_t); 61 62 #if defined(__sparc) 63 64 #define SHA1_TRANSFORM(ctx, in) \ 65 SHA1Transform((ctx)->state[0], (ctx)->state[1], (ctx)->state[2], \ 66 (ctx)->state[3], (ctx)->state[4], (ctx), (in)) 67 68 static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, 69 SHA1_CTX *, const uint8_t *); 70 71 #elif defined(__amd64) 72 73 #define SHA1_TRANSFORM(ctx, in) sha1_block_data_order((ctx), (in), 1) 74 #define SHA1_TRANSFORM_BLOCKS(ctx, in, num) sha1_block_data_order((ctx), \ 75 (in), (num)) 76 77 void sha1_block_data_order(SHA1_CTX *ctx, const void *inpp, size_t num_blocks); 78 79 #else 80 81 #define SHA1_TRANSFORM(ctx, in) SHA1Transform((ctx), (in)) 82 83 static void SHA1Transform(SHA1_CTX *, const uint8_t *); 84 85 #endif 86 87 88 static uint8_t PADDING[64] = { 0x80, /* all zeros */ }; 89 90 /* 91 * F, G, and H are the basic SHA1 functions. 92 */ 93 #define F(b, c, d) (((b) & (c)) | ((~b) & (d))) 94 #define G(b, c, d) ((b) ^ (c) ^ (d)) 95 #define H(b, c, d) (((b) & (c)) | (((b)|(c)) & (d))) 96 97 /* 98 * ROTATE_LEFT rotates x left n bits. 99 */ 100 101 #if defined(__GNUC__) && defined(_LP64) 102 static __inline__ uint64_t 103 ROTATE_LEFT(uint64_t value, uint32_t n) 104 { 105 uint32_t t32; 106 107 t32 = (uint32_t)value; 108 return ((t32 << n) | (t32 >> (32 - n))); 109 } 110 111 #else 112 113 #define ROTATE_LEFT(x, n) \ 114 (((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n)))) 115 116 #endif 117 118 119 /* 120 * SHA1Init() 121 * 122 * purpose: initializes the sha1 context and begins and sha1 digest operation 123 * input: SHA1_CTX * : the context to initializes. 124 * output: void 125 */ 126 127 void 128 SHA1Init(SHA1_CTX *ctx) 129 { 130 ctx->count[0] = ctx->count[1] = 0; 131 132 /* 133 * load magic initialization constants. Tell lint 134 * that these constants are unsigned by using U. 135 */ 136 137 ctx->state[0] = 0x67452301U; 138 ctx->state[1] = 0xefcdab89U; 139 ctx->state[2] = 0x98badcfeU; 140 ctx->state[3] = 0x10325476U; 141 ctx->state[4] = 0xc3d2e1f0U; 142 } 143 144 #ifdef VIS_SHA1 145 #ifdef _KERNEL 146 147 #include <sys/regset.h> 148 #include <sys/vis.h> 149 #include <sys/fpu/fpusystm.h> 150 151 /* the alignment for block stores to save fp registers */ 152 #define VIS_ALIGN (64) 153 154 extern int sha1_savefp(kfpu_t *, int); 155 extern void sha1_restorefp(kfpu_t *); 156 157 uint32_t vis_sha1_svfp_threshold = 128; 158 159 #endif /* _KERNEL */ 160 161 /* 162 * VIS SHA-1 consts. 163 */ 164 static uint64_t VIS[] = { 165 0x8000000080000000ULL, 166 0x0002000200020002ULL, 167 0x5a8279996ed9eba1ULL, 168 0x8f1bbcdcca62c1d6ULL, 169 0x012389ab456789abULL}; 170 171 extern void SHA1TransformVIS(uint64_t *, uint32_t *, uint32_t *, uint64_t *); 172 173 174 /* 175 * SHA1Update() 176 * 177 * purpose: continues an sha1 digest operation, using the message block 178 * to update the context. 179 * input: SHA1_CTX * : the context to update 180 * void * : the message block 181 * size_t : the length of the message block in bytes 182 * output: void 183 */ 184 185 void 186 SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len) 187 { 188 uint32_t i, buf_index, buf_len; 189 uint64_t X0[40], input64[8]; 190 const uint8_t *input = inptr; 191 #ifdef _KERNEL 192 int usevis = 0; 193 #else 194 int usevis = 1; 195 #endif /* _KERNEL */ 196 197 /* check for noop */ 198 if (input_len == 0) 199 return; 200 201 /* compute number of bytes mod 64 */ 202 buf_index = (ctx->count[1] >> 3) & 0x3F; 203 204 /* update number of bits */ 205 if ((ctx->count[1] += (input_len << 3)) < (input_len << 3)) 206 ctx->count[0]++; 207 208 ctx->count[0] += (input_len >> 29); 209 210 buf_len = 64 - buf_index; 211 212 /* transform as many times as possible */ 213 i = 0; 214 if (input_len >= buf_len) { 215 #ifdef _KERNEL 216 kfpu_t *fpu; 217 if (fpu_exists) { 218 uint8_t fpua[sizeof (kfpu_t) + GSR_SIZE + VIS_ALIGN]; 219 uint32_t len = (input_len + buf_index) & ~0x3f; 220 int svfp_ok; 221 222 fpu = (kfpu_t *)P2ROUNDUP((uintptr_t)fpua, 64); 223 svfp_ok = ((len >= vis_sha1_svfp_threshold) ? 1 : 0); 224 usevis = fpu_exists && sha1_savefp(fpu, svfp_ok); 225 } else { 226 usevis = 0; 227 } 228 #endif /* _KERNEL */ 229 230 /* 231 * general optimization: 232 * 233 * only do initial bcopy() and SHA1Transform() if 234 * buf_index != 0. if buf_index == 0, we're just 235 * wasting our time doing the bcopy() since there 236 * wasn't any data left over from a previous call to 237 * SHA1Update(). 238 */ 239 240 if (buf_index) { 241 bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); 242 if (usevis) { 243 SHA1TransformVIS(X0, 244 ctx->buf_un.buf32, 245 &ctx->state[0], VIS); 246 } else { 247 SHA1_TRANSFORM(ctx, ctx->buf_un.buf8); 248 } 249 i = buf_len; 250 } 251 252 /* 253 * VIS SHA-1: uses the VIS 1.0 instructions to accelerate 254 * SHA-1 processing. This is achieved by "offloading" the 255 * computation of the message schedule (MS) to the VIS units. 256 * This allows the VIS computation of the message schedule 257 * to be performed in parallel with the standard integer 258 * processing of the remainder of the SHA-1 computation. 259 * performance by up to around 1.37X, compared to an optimized 260 * integer-only implementation. 261 * 262 * The VIS implementation of SHA1Transform has a different API 263 * to the standard integer version: 264 * 265 * void SHA1TransformVIS( 266 * uint64_t *, // Pointer to MS for ith block 267 * uint32_t *, // Pointer to ith block of message data 268 * uint32_t *, // Pointer to SHA state i.e ctx->state 269 * uint64_t *, // Pointer to various VIS constants 270 * ) 271 * 272 * Note: the message data must by 4-byte aligned. 273 * 274 * Function requires VIS 1.0 support. 275 * 276 * Handling is provided to deal with arbitrary byte alingment 277 * of the input data but the performance gains are reduced 278 * for alignments other than 4-bytes. 279 */ 280 if (usevis) { 281 if (!IS_P2ALIGNED(&input[i], sizeof (uint32_t))) { 282 /* 283 * Main processing loop - input misaligned 284 */ 285 for (; i + 63 < input_len; i += 64) { 286 bcopy(&input[i], input64, 64); 287 SHA1TransformVIS(X0, 288 (uint32_t *)input64, 289 &ctx->state[0], VIS); 290 } 291 } else { 292 /* 293 * Main processing loop - input 8-byte aligned 294 */ 295 for (; i + 63 < input_len; i += 64) { 296 SHA1TransformVIS(X0, 297 /* LINTED E_BAD_PTR_CAST_ALIGN */ 298 (uint32_t *)&input[i], /* CSTYLED */ 299 &ctx->state[0], VIS); 300 } 301 302 } 303 #ifdef _KERNEL 304 sha1_restorefp(fpu); 305 #endif /* _KERNEL */ 306 } else { 307 for (; i + 63 < input_len; i += 64) { 308 SHA1_TRANSFORM(ctx, &input[i]); 309 } 310 } 311 312 /* 313 * general optimization: 314 * 315 * if i and input_len are the same, return now instead 316 * of calling bcopy(), since the bcopy() in this case 317 * will be an expensive nop. 318 */ 319 320 if (input_len == i) 321 return; 322 323 buf_index = 0; 324 } 325 326 /* buffer remaining input */ 327 bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i); 328 } 329 330 #else /* VIS_SHA1 */ 331 332 void 333 SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len) 334 { 335 uint32_t i, buf_index, buf_len; 336 const uint8_t *input = inptr; 337 #if defined(__amd64) 338 uint32_t block_count; 339 #endif /* __amd64 */ 340 341 /* check for noop */ 342 if (input_len == 0) 343 return; 344 345 /* compute number of bytes mod 64 */ 346 buf_index = (ctx->count[1] >> 3) & 0x3F; 347 348 /* update number of bits */ 349 if ((ctx->count[1] += (input_len << 3)) < (input_len << 3)) 350 ctx->count[0]++; 351 352 ctx->count[0] += (input_len >> 29); 353 354 buf_len = 64 - buf_index; 355 356 /* transform as many times as possible */ 357 i = 0; 358 if (input_len >= buf_len) { 359 360 /* 361 * general optimization: 362 * 363 * only do initial bcopy() and SHA1Transform() if 364 * buf_index != 0. if buf_index == 0, we're just 365 * wasting our time doing the bcopy() since there 366 * wasn't any data left over from a previous call to 367 * SHA1Update(). 368 */ 369 370 if (buf_index) { 371 bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); 372 SHA1_TRANSFORM(ctx, ctx->buf_un.buf8); 373 i = buf_len; 374 } 375 376 #if !defined(__amd64) 377 for (; i + 63 < input_len; i += 64) 378 SHA1_TRANSFORM(ctx, &input[i]); 379 #else 380 block_count = (input_len - i) >> 6; 381 if (block_count > 0) { 382 SHA1_TRANSFORM_BLOCKS(ctx, &input[i], block_count); 383 i += block_count << 6; 384 } 385 #endif /* !__amd64 */ 386 387 /* 388 * general optimization: 389 * 390 * if i and input_len are the same, return now instead 391 * of calling bcopy(), since the bcopy() in this case 392 * will be an expensive nop. 393 */ 394 395 if (input_len == i) 396 return; 397 398 buf_index = 0; 399 } 400 401 /* buffer remaining input */ 402 bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i); 403 } 404 405 #endif /* VIS_SHA1 */ 406 407 /* 408 * SHA1Final() 409 * 410 * purpose: ends an sha1 digest operation, finalizing the message digest and 411 * zeroing the context. 412 * input: uchar_t * : A buffer to store the digest. 413 * : The function actually uses void* because many 414 * : callers pass things other than uchar_t here. 415 * SHA1_CTX * : the context to finalize, save, and zero 416 * output: void 417 */ 418 419 void 420 SHA1Final(void *digest, SHA1_CTX *ctx) 421 { 422 uint8_t bitcount_be[sizeof (ctx->count)]; 423 uint32_t index = (ctx->count[1] >> 3) & 0x3f; 424 425 /* store bit count, big endian */ 426 Encode(bitcount_be, ctx->count, sizeof (bitcount_be)); 427 428 /* pad out to 56 mod 64 */ 429 SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index); 430 431 /* append length (before padding) */ 432 SHA1Update(ctx, bitcount_be, sizeof (bitcount_be)); 433 434 /* store state in digest */ 435 Encode(digest, ctx->state, sizeof (ctx->state)); 436 437 /* zeroize sensitive information */ 438 bzero(ctx, sizeof (*ctx)); 439 } 440 441 442 #if !defined(__amd64) 443 444 typedef uint32_t sha1word; 445 446 /* 447 * sparc optimization: 448 * 449 * on the sparc, we can load big endian 32-bit data easily. note that 450 * special care must be taken to ensure the address is 32-bit aligned. 451 * in the interest of speed, we don't check to make sure, since 452 * careful programming can guarantee this for us. 453 */ 454 455 #if defined(_BIG_ENDIAN) 456 #define LOAD_BIG_32(addr) (*(uint32_t *)(addr)) 457 458 #elif defined(HAVE_HTONL) 459 #define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr))) 460 461 #else 462 /* little endian -- will work on big endian, but slowly */ 463 #define LOAD_BIG_32(addr) \ 464 (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3]) 465 #endif /* _BIG_ENDIAN */ 466 467 /* 468 * SHA1Transform() 469 */ 470 #if defined(W_ARRAY) 471 #define W(n) w[n] 472 #else /* !defined(W_ARRAY) */ 473 #define W(n) w_ ## n 474 #endif /* !defined(W_ARRAY) */ 475 476 477 #if defined(__sparc) 478 479 /* 480 * sparc register window optimization: 481 * 482 * `a', `b', `c', `d', and `e' are passed into SHA1Transform 483 * explicitly since it increases the number of registers available to 484 * the compiler. under this scheme, these variables can be held in 485 * %i0 - %i4, which leaves more local and out registers available. 486 * 487 * purpose: sha1 transformation -- updates the digest based on `block' 488 * input: uint32_t : bytes 1 - 4 of the digest 489 * uint32_t : bytes 5 - 8 of the digest 490 * uint32_t : bytes 9 - 12 of the digest 491 * uint32_t : bytes 12 - 16 of the digest 492 * uint32_t : bytes 16 - 20 of the digest 493 * SHA1_CTX * : the context to update 494 * uint8_t [64]: the block to use to update the digest 495 * output: void 496 */ 497 498 void 499 SHA1Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, 500 SHA1_CTX *ctx, const uint8_t blk[64]) 501 { 502 /* 503 * sparc optimization: 504 * 505 * while it is somewhat counter-intuitive, on sparc, it is 506 * more efficient to place all the constants used in this 507 * function in an array and load the values out of the array 508 * than to manually load the constants. this is because 509 * setting a register to a 32-bit value takes two ops in most 510 * cases: a `sethi' and an `or', but loading a 32-bit value 511 * from memory only takes one `ld' (or `lduw' on v9). while 512 * this increases memory usage, the compiler can find enough 513 * other things to do while waiting to keep the pipeline does 514 * not stall. additionally, it is likely that many of these 515 * constants are cached so that later accesses do not even go 516 * out to the bus. 517 * 518 * this array is declared `static' to keep the compiler from 519 * having to bcopy() this array onto the stack frame of 520 * SHA1Transform() each time it is called -- which is 521 * unacceptably expensive. 522 * 523 * the `const' is to ensure that callers are good citizens and 524 * do not try to munge the array. since these routines are 525 * going to be called from inside multithreaded kernelland, 526 * this is a good safety check. -- `sha1_consts' will end up in 527 * .rodata. 528 * 529 * unfortunately, loading from an array in this manner hurts 530 * performance under Intel. So, there is a macro, 531 * SHA1_CONST(), used in SHA1Transform(), that either expands to 532 * a reference to this array, or to the actual constant, 533 * depending on what platform this code is compiled for. 534 */ 535 536 static const uint32_t sha1_consts[] = { 537 SHA1_CONST_0, SHA1_CONST_1, SHA1_CONST_2, SHA1_CONST_3 538 }; 539 540 /* 541 * general optimization: 542 * 543 * use individual integers instead of using an array. this is a 544 * win, although the amount it wins by seems to vary quite a bit. 545 */ 546 547 uint32_t w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7; 548 uint32_t w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15; 549 550 /* 551 * sparc optimization: 552 * 553 * if `block' is already aligned on a 4-byte boundary, use 554 * LOAD_BIG_32() directly. otherwise, bcopy() into a 555 * buffer that *is* aligned on a 4-byte boundary and then do 556 * the LOAD_BIG_32() on that buffer. benchmarks have shown 557 * that using the bcopy() is better than loading the bytes 558 * individually and doing the endian-swap by hand. 559 * 560 * even though it's quite tempting to assign to do: 561 * 562 * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32)); 563 * 564 * and only have one set of LOAD_BIG_32()'s, the compiler 565 * *does not* like that, so please resist the urge. 566 */ 567 568 if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */ 569 bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32)); 570 w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15); 571 w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14); 572 w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13); 573 w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12); 574 w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11); 575 w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10); 576 w_9 = LOAD_BIG_32(ctx->buf_un.buf32 + 9); 577 w_8 = LOAD_BIG_32(ctx->buf_un.buf32 + 8); 578 w_7 = LOAD_BIG_32(ctx->buf_un.buf32 + 7); 579 w_6 = LOAD_BIG_32(ctx->buf_un.buf32 + 6); 580 w_5 = LOAD_BIG_32(ctx->buf_un.buf32 + 5); 581 w_4 = LOAD_BIG_32(ctx->buf_un.buf32 + 4); 582 w_3 = LOAD_BIG_32(ctx->buf_un.buf32 + 3); 583 w_2 = LOAD_BIG_32(ctx->buf_un.buf32 + 2); 584 w_1 = LOAD_BIG_32(ctx->buf_un.buf32 + 1); 585 w_0 = LOAD_BIG_32(ctx->buf_un.buf32 + 0); 586 } else { 587 /* LINTED E_BAD_PTR_CAST_ALIGN */ 588 w_15 = LOAD_BIG_32(blk + 60); 589 /* LINTED E_BAD_PTR_CAST_ALIGN */ 590 w_14 = LOAD_BIG_32(blk + 56); 591 /* LINTED E_BAD_PTR_CAST_ALIGN */ 592 w_13 = LOAD_BIG_32(blk + 52); 593 /* LINTED E_BAD_PTR_CAST_ALIGN */ 594 w_12 = LOAD_BIG_32(blk + 48); 595 /* LINTED E_BAD_PTR_CAST_ALIGN */ 596 w_11 = LOAD_BIG_32(blk + 44); 597 /* LINTED E_BAD_PTR_CAST_ALIGN */ 598 w_10 = LOAD_BIG_32(blk + 40); 599 /* LINTED E_BAD_PTR_CAST_ALIGN */ 600 w_9 = LOAD_BIG_32(blk + 36); 601 /* LINTED E_BAD_PTR_CAST_ALIGN */ 602 w_8 = LOAD_BIG_32(blk + 32); 603 /* LINTED E_BAD_PTR_CAST_ALIGN */ 604 w_7 = LOAD_BIG_32(blk + 28); 605 /* LINTED E_BAD_PTR_CAST_ALIGN */ 606 w_6 = LOAD_BIG_32(blk + 24); 607 /* LINTED E_BAD_PTR_CAST_ALIGN */ 608 w_5 = LOAD_BIG_32(blk + 20); 609 /* LINTED E_BAD_PTR_CAST_ALIGN */ 610 w_4 = LOAD_BIG_32(blk + 16); 611 /* LINTED E_BAD_PTR_CAST_ALIGN */ 612 w_3 = LOAD_BIG_32(blk + 12); 613 /* LINTED E_BAD_PTR_CAST_ALIGN */ 614 w_2 = LOAD_BIG_32(blk + 8); 615 /* LINTED E_BAD_PTR_CAST_ALIGN */ 616 w_1 = LOAD_BIG_32(blk + 4); 617 /* LINTED E_BAD_PTR_CAST_ALIGN */ 618 w_0 = LOAD_BIG_32(blk + 0); 619 } 620 #else /* !defined(__sparc) */ 621 622 void /* CSTYLED */ 623 SHA1Transform(SHA1_CTX *ctx, const uint8_t blk[64]) 624 { 625 /* CSTYLED */ 626 sha1word a = ctx->state[0]; 627 sha1word b = ctx->state[1]; 628 sha1word c = ctx->state[2]; 629 sha1word d = ctx->state[3]; 630 sha1word e = ctx->state[4]; 631 632 #if defined(W_ARRAY) 633 sha1word w[16]; 634 #else /* !defined(W_ARRAY) */ 635 sha1word w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7; 636 sha1word w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15; 637 #endif /* !defined(W_ARRAY) */ 638 639 W(0) = LOAD_BIG_32((void *)(blk + 0)); 640 W(1) = LOAD_BIG_32((void *)(blk + 4)); 641 W(2) = LOAD_BIG_32((void *)(blk + 8)); 642 W(3) = LOAD_BIG_32((void *)(blk + 12)); 643 W(4) = LOAD_BIG_32((void *)(blk + 16)); 644 W(5) = LOAD_BIG_32((void *)(blk + 20)); 645 W(6) = LOAD_BIG_32((void *)(blk + 24)); 646 W(7) = LOAD_BIG_32((void *)(blk + 28)); 647 W(8) = LOAD_BIG_32((void *)(blk + 32)); 648 W(9) = LOAD_BIG_32((void *)(blk + 36)); 649 W(10) = LOAD_BIG_32((void *)(blk + 40)); 650 W(11) = LOAD_BIG_32((void *)(blk + 44)); 651 W(12) = LOAD_BIG_32((void *)(blk + 48)); 652 W(13) = LOAD_BIG_32((void *)(blk + 52)); 653 W(14) = LOAD_BIG_32((void *)(blk + 56)); 654 W(15) = LOAD_BIG_32((void *)(blk + 60)); 655 656 #endif /* !defined(__sparc) */ 657 658 /* 659 * general optimization: 660 * 661 * even though this approach is described in the standard as 662 * being slower algorithmically, it is 30-40% faster than the 663 * "faster" version under SPARC, because this version has more 664 * of the constraints specified at compile-time and uses fewer 665 * variables (and therefore has better register utilization) 666 * than its "speedier" brother. (i've tried both, trust me) 667 * 668 * for either method given in the spec, there is an "assignment" 669 * phase where the following takes place: 670 * 671 * tmp = (main_computation); 672 * e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp; 673 * 674 * we can make the algorithm go faster by not doing this work, 675 * but just pretending that `d' is now `e', etc. this works 676 * really well and obviates the need for a temporary variable. 677 * however, we still explicitly perform the rotate action, 678 * since it is cheaper on SPARC to do it once than to have to 679 * do it over and over again. 680 */ 681 682 /* round 1 */ 683 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(0) + SHA1_CONST(0); /* 0 */ 684 b = ROTATE_LEFT(b, 30); 685 686 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(1) + SHA1_CONST(0); /* 1 */ 687 a = ROTATE_LEFT(a, 30); 688 689 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(2) + SHA1_CONST(0); /* 2 */ 690 e = ROTATE_LEFT(e, 30); 691 692 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(3) + SHA1_CONST(0); /* 3 */ 693 d = ROTATE_LEFT(d, 30); 694 695 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(4) + SHA1_CONST(0); /* 4 */ 696 c = ROTATE_LEFT(c, 30); 697 698 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(5) + SHA1_CONST(0); /* 5 */ 699 b = ROTATE_LEFT(b, 30); 700 701 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(6) + SHA1_CONST(0); /* 6 */ 702 a = ROTATE_LEFT(a, 30); 703 704 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(7) + SHA1_CONST(0); /* 7 */ 705 e = ROTATE_LEFT(e, 30); 706 707 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(8) + SHA1_CONST(0); /* 8 */ 708 d = ROTATE_LEFT(d, 30); 709 710 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(9) + SHA1_CONST(0); /* 9 */ 711 c = ROTATE_LEFT(c, 30); 712 713 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(10) + SHA1_CONST(0); /* 10 */ 714 b = ROTATE_LEFT(b, 30); 715 716 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(11) + SHA1_CONST(0); /* 11 */ 717 a = ROTATE_LEFT(a, 30); 718 719 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(12) + SHA1_CONST(0); /* 12 */ 720 e = ROTATE_LEFT(e, 30); 721 722 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(13) + SHA1_CONST(0); /* 13 */ 723 d = ROTATE_LEFT(d, 30); 724 725 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(14) + SHA1_CONST(0); /* 14 */ 726 c = ROTATE_LEFT(c, 30); 727 728 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(15) + SHA1_CONST(0); /* 15 */ 729 b = ROTATE_LEFT(b, 30); 730 731 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 16 */ 732 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(0) + SHA1_CONST(0); 733 a = ROTATE_LEFT(a, 30); 734 735 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 17 */ 736 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(1) + SHA1_CONST(0); 737 e = ROTATE_LEFT(e, 30); 738 739 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 18 */ 740 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(2) + SHA1_CONST(0); 741 d = ROTATE_LEFT(d, 30); 742 743 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 19 */ 744 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(3) + SHA1_CONST(0); 745 c = ROTATE_LEFT(c, 30); 746 747 /* round 2 */ 748 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 20 */ 749 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(4) + SHA1_CONST(1); 750 b = ROTATE_LEFT(b, 30); 751 752 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 21 */ 753 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(5) + SHA1_CONST(1); 754 a = ROTATE_LEFT(a, 30); 755 756 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 22 */ 757 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(6) + SHA1_CONST(1); 758 e = ROTATE_LEFT(e, 30); 759 760 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 23 */ 761 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(7) + SHA1_CONST(1); 762 d = ROTATE_LEFT(d, 30); 763 764 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 24 */ 765 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(8) + SHA1_CONST(1); 766 c = ROTATE_LEFT(c, 30); 767 768 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 25 */ 769 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(9) + SHA1_CONST(1); 770 b = ROTATE_LEFT(b, 30); 771 772 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 26 */ 773 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(10) + SHA1_CONST(1); 774 a = ROTATE_LEFT(a, 30); 775 776 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 27 */ 777 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(11) + SHA1_CONST(1); 778 e = ROTATE_LEFT(e, 30); 779 780 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 28 */ 781 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(12) + SHA1_CONST(1); 782 d = ROTATE_LEFT(d, 30); 783 784 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 29 */ 785 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(13) + SHA1_CONST(1); 786 c = ROTATE_LEFT(c, 30); 787 788 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 30 */ 789 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(14) + SHA1_CONST(1); 790 b = ROTATE_LEFT(b, 30); 791 792 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 31 */ 793 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(15) + SHA1_CONST(1); 794 a = ROTATE_LEFT(a, 30); 795 796 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 32 */ 797 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(0) + SHA1_CONST(1); 798 e = ROTATE_LEFT(e, 30); 799 800 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 33 */ 801 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(1) + SHA1_CONST(1); 802 d = ROTATE_LEFT(d, 30); 803 804 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 34 */ 805 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(2) + SHA1_CONST(1); 806 c = ROTATE_LEFT(c, 30); 807 808 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 35 */ 809 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(3) + SHA1_CONST(1); 810 b = ROTATE_LEFT(b, 30); 811 812 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 36 */ 813 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(4) + SHA1_CONST(1); 814 a = ROTATE_LEFT(a, 30); 815 816 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 37 */ 817 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(5) + SHA1_CONST(1); 818 e = ROTATE_LEFT(e, 30); 819 820 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 38 */ 821 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(6) + SHA1_CONST(1); 822 d = ROTATE_LEFT(d, 30); 823 824 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 39 */ 825 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(7) + SHA1_CONST(1); 826 c = ROTATE_LEFT(c, 30); 827 828 /* round 3 */ 829 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 40 */ 830 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(8) + SHA1_CONST(2); 831 b = ROTATE_LEFT(b, 30); 832 833 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 41 */ 834 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(9) + SHA1_CONST(2); 835 a = ROTATE_LEFT(a, 30); 836 837 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 42 */ 838 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(10) + SHA1_CONST(2); 839 e = ROTATE_LEFT(e, 30); 840 841 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 43 */ 842 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(11) + SHA1_CONST(2); 843 d = ROTATE_LEFT(d, 30); 844 845 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 44 */ 846 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(12) + SHA1_CONST(2); 847 c = ROTATE_LEFT(c, 30); 848 849 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 45 */ 850 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(13) + SHA1_CONST(2); 851 b = ROTATE_LEFT(b, 30); 852 853 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 46 */ 854 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(14) + SHA1_CONST(2); 855 a = ROTATE_LEFT(a, 30); 856 857 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 47 */ 858 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(15) + SHA1_CONST(2); 859 e = ROTATE_LEFT(e, 30); 860 861 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 48 */ 862 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(0) + SHA1_CONST(2); 863 d = ROTATE_LEFT(d, 30); 864 865 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 49 */ 866 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(1) + SHA1_CONST(2); 867 c = ROTATE_LEFT(c, 30); 868 869 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 50 */ 870 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(2) + SHA1_CONST(2); 871 b = ROTATE_LEFT(b, 30); 872 873 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 51 */ 874 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(3) + SHA1_CONST(2); 875 a = ROTATE_LEFT(a, 30); 876 877 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 52 */ 878 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(4) + SHA1_CONST(2); 879 e = ROTATE_LEFT(e, 30); 880 881 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 53 */ 882 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(5) + SHA1_CONST(2); 883 d = ROTATE_LEFT(d, 30); 884 885 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 54 */ 886 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(6) + SHA1_CONST(2); 887 c = ROTATE_LEFT(c, 30); 888 889 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 55 */ 890 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(7) + SHA1_CONST(2); 891 b = ROTATE_LEFT(b, 30); 892 893 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 56 */ 894 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(8) + SHA1_CONST(2); 895 a = ROTATE_LEFT(a, 30); 896 897 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 57 */ 898 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(9) + SHA1_CONST(2); 899 e = ROTATE_LEFT(e, 30); 900 901 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 58 */ 902 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(10) + SHA1_CONST(2); 903 d = ROTATE_LEFT(d, 30); 904 905 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 59 */ 906 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(11) + SHA1_CONST(2); 907 c = ROTATE_LEFT(c, 30); 908 909 /* round 4 */ 910 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 60 */ 911 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(12) + SHA1_CONST(3); 912 b = ROTATE_LEFT(b, 30); 913 914 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 61 */ 915 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(13) + SHA1_CONST(3); 916 a = ROTATE_LEFT(a, 30); 917 918 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 62 */ 919 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(14) + SHA1_CONST(3); 920 e = ROTATE_LEFT(e, 30); 921 922 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 63 */ 923 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(15) + SHA1_CONST(3); 924 d = ROTATE_LEFT(d, 30); 925 926 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 64 */ 927 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(0) + SHA1_CONST(3); 928 c = ROTATE_LEFT(c, 30); 929 930 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 65 */ 931 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(1) + SHA1_CONST(3); 932 b = ROTATE_LEFT(b, 30); 933 934 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 66 */ 935 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(2) + SHA1_CONST(3); 936 a = ROTATE_LEFT(a, 30); 937 938 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 67 */ 939 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(3) + SHA1_CONST(3); 940 e = ROTATE_LEFT(e, 30); 941 942 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 68 */ 943 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(4) + SHA1_CONST(3); 944 d = ROTATE_LEFT(d, 30); 945 946 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 69 */ 947 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(5) + SHA1_CONST(3); 948 c = ROTATE_LEFT(c, 30); 949 950 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 70 */ 951 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(6) + SHA1_CONST(3); 952 b = ROTATE_LEFT(b, 30); 953 954 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 71 */ 955 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(7) + SHA1_CONST(3); 956 a = ROTATE_LEFT(a, 30); 957 958 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 72 */ 959 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(8) + SHA1_CONST(3); 960 e = ROTATE_LEFT(e, 30); 961 962 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 73 */ 963 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(9) + SHA1_CONST(3); 964 d = ROTATE_LEFT(d, 30); 965 966 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 74 */ 967 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(10) + SHA1_CONST(3); 968 c = ROTATE_LEFT(c, 30); 969 970 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 75 */ 971 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(11) + SHA1_CONST(3); 972 b = ROTATE_LEFT(b, 30); 973 974 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 76 */ 975 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(12) + SHA1_CONST(3); 976 a = ROTATE_LEFT(a, 30); 977 978 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 77 */ 979 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(13) + SHA1_CONST(3); 980 e = ROTATE_LEFT(e, 30); 981 982 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 78 */ 983 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(14) + SHA1_CONST(3); 984 d = ROTATE_LEFT(d, 30); 985 986 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 79 */ 987 988 ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(15) + 989 SHA1_CONST(3); 990 ctx->state[1] += b; 991 ctx->state[2] += ROTATE_LEFT(c, 30); 992 ctx->state[3] += d; 993 ctx->state[4] += e; 994 995 /* zeroize sensitive information */ 996 W(0) = W(1) = W(2) = W(3) = W(4) = W(5) = W(6) = W(7) = W(8) = 0; 997 W(9) = W(10) = W(11) = W(12) = W(13) = W(14) = W(15) = 0; 998 } 999 #endif /* !__amd64 */ 1000 1001 1002 /* 1003 * Encode() 1004 * 1005 * purpose: to convert a list of numbers from little endian to big endian 1006 * input: uint8_t * : place to store the converted big endian numbers 1007 * uint32_t * : place to get numbers to convert from 1008 * size_t : the length of the input in bytes 1009 * output: void 1010 */ 1011 1012 static void 1013 Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input, 1014 size_t len) 1015 { 1016 size_t i, j; 1017 1018 #if defined(__sparc) 1019 if (IS_P2ALIGNED(output, sizeof (uint32_t))) { 1020 for (i = 0, j = 0; j < len; i++, j += 4) { 1021 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1022 *((uint32_t *)(output + j)) = input[i]; 1023 } 1024 } else { 1025 #endif /* little endian -- will work on big endian, but slowly */ 1026 for (i = 0, j = 0; j < len; i++, j += 4) { 1027 output[j] = (input[i] >> 24) & 0xff; 1028 output[j + 1] = (input[i] >> 16) & 0xff; 1029 output[j + 2] = (input[i] >> 8) & 0xff; 1030 output[j + 3] = input[i] & 0xff; 1031 } 1032 #if defined(__sparc) 1033 } 1034 #endif 1035 } 1036