1 /*- 2 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/malloc.h> 35 #include <sys/libkern.h> 36 #include <sys/endian.h> 37 #include <sys/pcpu.h> 38 #if defined(__amd64__) || (defined(__i386__) && !defined(PC98)) 39 #include <machine/cpufunc.h> 40 #include <machine/cputypes.h> 41 #include <machine/md_var.h> 42 #include <machine/specialreg.h> 43 #endif 44 #include <machine/pcb.h> 45 46 #include <opencrypto/cryptodev.h> 47 #include <opencrypto/cryptosoft.h> /* for hmac_ipad_buffer and hmac_opad_buffer */ 48 #include <opencrypto/xform.h> 49 50 #include <crypto/via/padlock.h> 51 52 /* 53 * Implementation notes. 54 * 55 * Some VIA CPUs provides SHA1 and SHA256 acceleration. 56 * We implement all HMAC algorithms provided by crypto(9) framework, but we do 57 * the crypto work in software unless this is HMAC/SHA1 or HMAC/SHA256 and 58 * our CPU can accelerate it. 59 * 60 * Additional CPU instructions, which preform SHA1 and SHA256 are one-shot 61 * functions - we have only one chance to give the data, CPU itself will add 62 * the padding and calculate hash automatically. 63 * This means, it is not possible to implement common init(), update(), final() 64 * methods. 65 * The way I've choosen is to keep adding data to the buffer on update() 66 * (reallocating the buffer if necessary) and call XSHA{1,256} instruction on 67 * final(). 68 */ 69 70 struct padlock_sha_ctx { 71 uint8_t *psc_buf; 72 int psc_offset; 73 int psc_size; 74 }; 75 CTASSERT(sizeof(struct padlock_sha_ctx) <= sizeof(union authctx)); 76 77 static void padlock_sha_init(struct padlock_sha_ctx *ctx); 78 static int padlock_sha_update(struct padlock_sha_ctx *ctx, uint8_t *buf, 79 uint16_t bufsize); 80 static void padlock_sha1_final(uint8_t *hash, struct padlock_sha_ctx *ctx); 81 static void padlock_sha256_final(uint8_t *hash, struct padlock_sha_ctx *ctx); 82 83 static struct auth_hash padlock_hmac_sha1 = { 84 CRYPTO_SHA1_HMAC, "HMAC-SHA1", 85 20, SHA1_HASH_LEN, SHA1_HMAC_BLOCK_LEN, sizeof(struct padlock_sha_ctx), 86 (void (*)(void *))padlock_sha_init, 87 (int (*)(void *, uint8_t *, uint16_t))padlock_sha_update, 88 (void (*)(uint8_t *, void *))padlock_sha1_final 89 }; 90 91 static struct auth_hash padlock_hmac_sha256 = { 92 CRYPTO_SHA2_256_HMAC, "HMAC-SHA2-256", 93 32, SHA2_256_HASH_LEN, SHA2_256_HMAC_BLOCK_LEN, sizeof(struct padlock_sha_ctx), 94 (void (*)(void *))padlock_sha_init, 95 (int (*)(void *, uint8_t *, uint16_t))padlock_sha_update, 96 (void (*)(uint8_t *, void *))padlock_sha256_final 97 }; 98 99 MALLOC_DECLARE(M_PADLOCK); 100 101 static __inline void 102 padlock_output_block(uint32_t *src, uint32_t *dst, size_t count) 103 { 104 105 while (count-- > 0) 106 *dst++ = bswap32(*src++); 107 } 108 109 static void 110 padlock_do_sha1(const u_char *in, u_char *out, int count) 111 { 112 u_char buf[128+16]; /* PadLock needs at least 128 bytes buffer. */ 113 u_char *result = PADLOCK_ALIGN(buf); 114 115 ((uint32_t *)result)[0] = 0x67452301; 116 ((uint32_t *)result)[1] = 0xEFCDAB89; 117 ((uint32_t *)result)[2] = 0x98BADCFE; 118 ((uint32_t *)result)[3] = 0x10325476; 119 ((uint32_t *)result)[4] = 0xC3D2E1F0; 120 121 #ifdef __GNUCLIKE_ASM 122 __asm __volatile( 123 ".byte 0xf3, 0x0f, 0xa6, 0xc8" /* rep xsha1 */ 124 : "+S"(in), "+D"(result) 125 : "c"(count), "a"(0) 126 ); 127 #endif 128 129 padlock_output_block((uint32_t *)result, (uint32_t *)out, 130 SHA1_HASH_LEN / sizeof(uint32_t)); 131 } 132 133 static void 134 padlock_do_sha256(const char *in, char *out, int count) 135 { 136 char buf[128+16]; /* PadLock needs at least 128 bytes buffer. */ 137 char *result = PADLOCK_ALIGN(buf); 138 139 ((uint32_t *)result)[0] = 0x6A09E667; 140 ((uint32_t *)result)[1] = 0xBB67AE85; 141 ((uint32_t *)result)[2] = 0x3C6EF372; 142 ((uint32_t *)result)[3] = 0xA54FF53A; 143 ((uint32_t *)result)[4] = 0x510E527F; 144 ((uint32_t *)result)[5] = 0x9B05688C; 145 ((uint32_t *)result)[6] = 0x1F83D9AB; 146 ((uint32_t *)result)[7] = 0x5BE0CD19; 147 148 #ifdef __GNUCLIKE_ASM 149 __asm __volatile( 150 ".byte 0xf3, 0x0f, 0xa6, 0xd0" /* rep xsha256 */ 151 : "+S"(in), "+D"(result) 152 : "c"(count), "a"(0) 153 ); 154 #endif 155 156 padlock_output_block((uint32_t *)result, (uint32_t *)out, 157 SHA2_256_HASH_LEN / sizeof(uint32_t)); 158 } 159 160 static void 161 padlock_sha_init(struct padlock_sha_ctx *ctx) 162 { 163 164 ctx->psc_buf = NULL; 165 ctx->psc_offset = 0; 166 ctx->psc_size = 0; 167 } 168 169 static int 170 padlock_sha_update(struct padlock_sha_ctx *ctx, uint8_t *buf, uint16_t bufsize) 171 { 172 173 if (ctx->psc_size - ctx->psc_offset < bufsize) { 174 ctx->psc_size = MAX(ctx->psc_size * 2, ctx->psc_size + bufsize); 175 ctx->psc_buf = realloc(ctx->psc_buf, ctx->psc_size, M_PADLOCK, 176 M_NOWAIT); 177 if(ctx->psc_buf == NULL) 178 return (ENOMEM); 179 } 180 bcopy(buf, ctx->psc_buf + ctx->psc_offset, bufsize); 181 ctx->psc_offset += bufsize; 182 return (0); 183 } 184 185 static void 186 padlock_sha_free(struct padlock_sha_ctx *ctx) 187 { 188 189 if (ctx->psc_buf != NULL) { 190 //bzero(ctx->psc_buf, ctx->psc_size); 191 free(ctx->psc_buf, M_PADLOCK); 192 ctx->psc_buf = NULL; 193 ctx->psc_offset = 0; 194 ctx->psc_size = 0; 195 } 196 } 197 198 static void 199 padlock_sha1_final(uint8_t *hash, struct padlock_sha_ctx *ctx) 200 { 201 202 padlock_do_sha1(ctx->psc_buf, hash, ctx->psc_offset); 203 padlock_sha_free(ctx); 204 } 205 206 static void 207 padlock_sha256_final(uint8_t *hash, struct padlock_sha_ctx *ctx) 208 { 209 210 padlock_do_sha256(ctx->psc_buf, hash, ctx->psc_offset); 211 padlock_sha_free(ctx); 212 } 213 214 static void 215 padlock_copy_ctx(struct auth_hash *axf, void *sctx, void *dctx) 216 { 217 218 if ((via_feature_xcrypt & VIA_HAS_SHA) != 0 && 219 (axf->type == CRYPTO_SHA1_HMAC || 220 axf->type == CRYPTO_SHA2_256_HMAC)) { 221 struct padlock_sha_ctx *spctx = sctx, *dpctx = dctx; 222 223 dpctx->psc_offset = spctx->psc_offset; 224 dpctx->psc_size = spctx->psc_size; 225 dpctx->psc_buf = malloc(dpctx->psc_size, M_PADLOCK, M_WAITOK); 226 bcopy(spctx->psc_buf, dpctx->psc_buf, dpctx->psc_size); 227 } else { 228 bcopy(sctx, dctx, axf->ctxsize); 229 } 230 } 231 232 static void 233 padlock_free_ctx(struct auth_hash *axf, void *ctx) 234 { 235 236 if ((via_feature_xcrypt & VIA_HAS_SHA) != 0 && 237 (axf->type == CRYPTO_SHA1_HMAC || 238 axf->type == CRYPTO_SHA2_256_HMAC)) { 239 padlock_sha_free(ctx); 240 } 241 } 242 243 static void 244 padlock_hash_key_setup(struct padlock_session *ses, caddr_t key, int klen) 245 { 246 struct auth_hash *axf; 247 int i; 248 249 klen /= 8; 250 axf = ses->ses_axf; 251 252 /* 253 * Try to free contexts before using them, because 254 * padlock_hash_key_setup() can be called twice - once from 255 * padlock_newsession() and again from padlock_process(). 256 */ 257 padlock_free_ctx(axf, ses->ses_ictx); 258 padlock_free_ctx(axf, ses->ses_octx); 259 260 for (i = 0; i < klen; i++) 261 key[i] ^= HMAC_IPAD_VAL; 262 263 axf->Init(ses->ses_ictx); 264 axf->Update(ses->ses_ictx, key, klen); 265 axf->Update(ses->ses_ictx, hmac_ipad_buffer, axf->blocksize - klen); 266 267 for (i = 0; i < klen; i++) 268 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 269 270 axf->Init(ses->ses_octx); 271 axf->Update(ses->ses_octx, key, klen); 272 axf->Update(ses->ses_octx, hmac_opad_buffer, axf->blocksize - klen); 273 274 for (i = 0; i < klen; i++) 275 key[i] ^= HMAC_OPAD_VAL; 276 } 277 278 /* 279 * Compute keyed-hash authenticator. 280 */ 281 static int 282 padlock_authcompute(struct padlock_session *ses, struct cryptodesc *crd, 283 caddr_t buf, int flags) 284 { 285 u_char hash[HASH_MAX_LEN]; 286 struct auth_hash *axf; 287 union authctx ctx; 288 int error; 289 290 axf = ses->ses_axf; 291 292 padlock_copy_ctx(axf, ses->ses_ictx, &ctx); 293 error = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 294 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 295 if (error != 0) { 296 padlock_free_ctx(axf, &ctx); 297 return (error); 298 } 299 axf->Final(hash, &ctx); 300 301 padlock_copy_ctx(axf, ses->ses_octx, &ctx); 302 axf->Update(&ctx, hash, axf->hashsize); 303 axf->Final(hash, &ctx); 304 305 /* Inject the authentication data */ 306 crypto_copyback(flags, buf, crd->crd_inject, 307 ses->ses_mlen == 0 ? axf->hashsize : ses->ses_mlen, hash); 308 return (0); 309 } 310 311 int 312 padlock_hash_setup(struct padlock_session *ses, struct cryptoini *macini) 313 { 314 315 ses->ses_mlen = macini->cri_mlen; 316 317 /* Find software structure which describes HMAC algorithm. */ 318 switch (macini->cri_alg) { 319 case CRYPTO_NULL_HMAC: 320 ses->ses_axf = &auth_hash_null; 321 break; 322 case CRYPTO_MD5_HMAC: 323 ses->ses_axf = &auth_hash_hmac_md5; 324 break; 325 case CRYPTO_SHA1_HMAC: 326 if ((via_feature_xcrypt & VIA_HAS_SHA) != 0) 327 ses->ses_axf = &padlock_hmac_sha1; 328 else 329 ses->ses_axf = &auth_hash_hmac_sha1; 330 break; 331 case CRYPTO_RIPEMD160_HMAC: 332 ses->ses_axf = &auth_hash_hmac_ripemd_160; 333 break; 334 case CRYPTO_SHA2_256_HMAC: 335 if ((via_feature_xcrypt & VIA_HAS_SHA) != 0) 336 ses->ses_axf = &padlock_hmac_sha256; 337 else 338 ses->ses_axf = &auth_hash_hmac_sha2_256; 339 break; 340 case CRYPTO_SHA2_384_HMAC: 341 ses->ses_axf = &auth_hash_hmac_sha2_384; 342 break; 343 case CRYPTO_SHA2_512_HMAC: 344 ses->ses_axf = &auth_hash_hmac_sha2_512; 345 break; 346 } 347 348 /* Allocate memory for HMAC inner and outer contexts. */ 349 ses->ses_ictx = malloc(ses->ses_axf->ctxsize, M_PADLOCK, 350 M_ZERO | M_NOWAIT); 351 ses->ses_octx = malloc(ses->ses_axf->ctxsize, M_PADLOCK, 352 M_ZERO | M_NOWAIT); 353 if (ses->ses_ictx == NULL || ses->ses_octx == NULL) 354 return (ENOMEM); 355 356 /* Setup key if given. */ 357 if (macini->cri_key != NULL) { 358 padlock_hash_key_setup(ses, macini->cri_key, 359 macini->cri_klen); 360 } 361 return (0); 362 } 363 364 int 365 padlock_hash_process(struct padlock_session *ses, struct cryptodesc *maccrd, 366 struct cryptop *crp) 367 { 368 struct thread *td; 369 int error; 370 371 td = curthread; 372 error = fpu_kern_enter(td, &ses->ses_fpu_ctx, FPU_KERN_NORMAL); 373 if (error != 0) 374 return (error); 375 if ((maccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) 376 padlock_hash_key_setup(ses, maccrd->crd_key, maccrd->crd_klen); 377 378 error = padlock_authcompute(ses, maccrd, crp->crp_buf, crp->crp_flags); 379 fpu_kern_leave(td, &ses->ses_fpu_ctx); 380 return (error); 381 } 382 383 void 384 padlock_hash_free(struct padlock_session *ses) 385 { 386 387 if (ses->ses_ictx != NULL) { 388 padlock_free_ctx(ses->ses_axf, ses->ses_ictx); 389 bzero(ses->ses_ictx, ses->ses_axf->ctxsize); 390 free(ses->ses_ictx, M_PADLOCK); 391 ses->ses_ictx = NULL; 392 } 393 if (ses->ses_octx != NULL) { 394 padlock_free_ctx(ses->ses_axf, ses->ses_octx); 395 bzero(ses->ses_octx, ses->ses_axf->ctxsize); 396 free(ses->ses_octx, M_PADLOCK); 397 ses->ses_octx = NULL; 398 } 399 } 400