xref: /freebsd/crypto/heimdal/lib/gssapi/krb5/cfx.c (revision aa24f48b361effe51163877d84f1b70d32b77e04)
1 /*
2  * Copyright (c) 2003, PADL Software Pty Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of PADL Software nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "gsskrb5_locl.h"
34 
35 /*
36  * Implementation of RFC 4121
37  */
38 
39 #define CFXSentByAcceptor	(1 << 0)
40 #define CFXSealed		(1 << 1)
41 #define CFXAcceptorSubkey	(1 << 2)
42 
43 krb5_error_code
44 _gsskrb5cfx_wrap_length_cfx(krb5_context context,
45 			    krb5_crypto crypto,
46 			    int conf_req_flag,
47 			    int dce_style,
48 			    size_t input_length,
49 			    size_t *output_length,
50 			    size_t *cksumsize,
51 			    uint16_t *padlength)
52 {
53     krb5_error_code ret;
54     krb5_cksumtype type;
55 
56     /* 16-byte header is always first */
57     *output_length = sizeof(gss_cfx_wrap_token_desc);
58     *padlength = 0;
59 
60     ret = krb5_crypto_get_checksum_type(context, crypto, &type);
61     if (ret)
62 	return ret;
63 
64     ret = krb5_checksumsize(context, type, cksumsize);
65     if (ret)
66 	return ret;
67 
68     if (conf_req_flag) {
69 	size_t padsize;
70 
71 	/* Header is concatenated with data before encryption */
72 	input_length += sizeof(gss_cfx_wrap_token_desc);
73 
74 	if (dce_style) {
75 		ret = krb5_crypto_getblocksize(context, crypto, &padsize);
76 	} else {
77 		ret = krb5_crypto_getpadsize(context, crypto, &padsize);
78 	}
79 	if (ret) {
80 	    return ret;
81 	}
82 	if (padsize > 1) {
83 	    /* XXX check this */
84 	    *padlength = padsize - (input_length % padsize);
85 
86 	    /* We add the pad ourselves (noted here for completeness only) */
87 	    input_length += *padlength;
88 	}
89 
90 	*output_length += krb5_get_wrapped_length(context,
91 						  crypto, input_length);
92     } else {
93 	/* Checksum is concatenated with data */
94 	*output_length += input_length + *cksumsize;
95     }
96 
97     assert(*output_length > input_length);
98 
99     return 0;
100 }
101 
102 OM_uint32
103 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
104 		      const gsskrb5_ctx ctx,
105 		      krb5_context context,
106 		      int conf_req_flag,
107 		      gss_qop_t qop_req,
108 		      OM_uint32 req_output_size,
109 		      OM_uint32 *max_input_size)
110 {
111     krb5_error_code ret;
112 
113     *max_input_size = 0;
114 
115     /* 16-byte header is always first */
116     if (req_output_size < 16)
117 	return 0;
118     req_output_size -= 16;
119 
120     if (conf_req_flag) {
121 	size_t wrapped_size, sz;
122 
123 	wrapped_size = req_output_size + 1;
124 	do {
125 	    wrapped_size--;
126 	    sz = krb5_get_wrapped_length(context,
127 					 ctx->crypto, wrapped_size);
128 	} while (wrapped_size && sz > req_output_size);
129 	if (wrapped_size == 0)
130 	    return 0;
131 
132 	/* inner header */
133 	if (wrapped_size < 16)
134 	    return 0;
135 
136 	wrapped_size -= 16;
137 
138 	*max_input_size = wrapped_size;
139     } else {
140 	krb5_cksumtype type;
141 	size_t cksumsize;
142 
143 	ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
144 	if (ret)
145 	    return ret;
146 
147 	ret = krb5_checksumsize(context, type, &cksumsize);
148 	if (ret)
149 	    return ret;
150 
151 	if (req_output_size < cksumsize)
152 	    return 0;
153 
154 	/* Checksum is concatenated with data */
155 	*max_input_size = req_output_size - cksumsize;
156     }
157 
158     return 0;
159 }
160 
161 /*
162  * Rotate "rrc" bytes to the front or back
163  */
164 
165 static krb5_error_code
166 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
167 {
168     u_char *tmp, buf[256];
169     size_t left;
170 
171     if (len == 0)
172 	return 0;
173 
174     rrc %= len;
175 
176     if (rrc == 0)
177 	return 0;
178 
179     left = len - rrc;
180 
181     if (rrc <= sizeof(buf)) {
182 	tmp = buf;
183     } else {
184 	tmp = malloc(rrc);
185 	if (tmp == NULL)
186 	    return ENOMEM;
187     }
188 
189     if (unrotate) {
190 	memcpy(tmp, data, rrc);
191 	memmove(data, (u_char *)data + rrc, left);
192 	memcpy((u_char *)data + left, tmp, rrc);
193     } else {
194 	memcpy(tmp, (u_char *)data + left, rrc);
195 	memmove((u_char *)data + rrc, data, left);
196 	memcpy(data, tmp, rrc);
197     }
198 
199     if (rrc > sizeof(buf))
200 	free(tmp);
201 
202     return 0;
203 }
204 
205 gss_iov_buffer_desc *
206 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
207 {
208     int i;
209 
210     for (i = 0; i < iov_count; i++)
211 	if (type == GSS_IOV_BUFFER_TYPE(iov[i].type))
212 	    return &iov[i];
213     return NULL;
214 }
215 
216 OM_uint32
217 _gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
218 {
219     if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
220 	if (buffer->buffer.length == size)
221 	    return GSS_S_COMPLETE;
222 	free(buffer->buffer.value);
223     }
224 
225     buffer->buffer.value = malloc(size);
226     buffer->buffer.length = size;
227     if (buffer->buffer.value == NULL) {
228 	*minor_status = ENOMEM;
229 	return GSS_S_FAILURE;
230     }
231     buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
232 
233     return GSS_S_COMPLETE;
234 }
235 
236 
237 OM_uint32
238 _gk_verify_buffers(OM_uint32 *minor_status,
239 		   const gsskrb5_ctx ctx,
240 		   const gss_iov_buffer_desc *header,
241 		   const gss_iov_buffer_desc *padding,
242 		   const gss_iov_buffer_desc *trailer)
243 {
244     if (header == NULL) {
245 	*minor_status = EINVAL;
246 	return GSS_S_FAILURE;
247     }
248 
249     if (IS_DCE_STYLE(ctx)) {
250 	/*
251 	 * In DCE style mode we reject having a padding or trailer buffer
252 	 */
253 	if (padding) {
254 	    *minor_status = EINVAL;
255 	    return GSS_S_FAILURE;
256 	}
257 	if (trailer) {
258 	    *minor_status = EINVAL;
259 	    return GSS_S_FAILURE;
260 	}
261     } else {
262 	/*
263 	 * In non-DCE style mode we require having a padding buffer
264 	 */
265 	if (padding == NULL) {
266 	    *minor_status = EINVAL;
267 	    return GSS_S_FAILURE;
268 	}
269     }
270 
271     *minor_status = 0;
272     return GSS_S_COMPLETE;
273 }
274 
275 #if 0
276 OM_uint32
277 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
278 		     gsskrb5_ctx ctx,
279 		     krb5_context context,
280 		     int conf_req_flag,
281 		     int *conf_state,
282 		     gss_iov_buffer_desc *iov,
283 		     int iov_count)
284 {
285     OM_uint32 major_status, junk;
286     gss_iov_buffer_desc *header, *trailer, *padding;
287     size_t gsshsize, k5hsize;
288     size_t gsstsize, k5tsize;
289     size_t rrc = 0, ec = 0;
290     int i;
291     gss_cfx_wrap_token token;
292     krb5_error_code ret;
293     int32_t seq_number;
294     unsigned usage;
295     krb5_crypto_iov *data = NULL;
296 
297     header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
298     if (header == NULL) {
299 	*minor_status = EINVAL;
300 	return GSS_S_FAILURE;
301     }
302 
303     padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
304     if (padding != NULL) {
305 	padding->buffer.length = 0;
306     }
307 
308     trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
309 
310     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
311     if (major_status != GSS_S_COMPLETE) {
312 	    return major_status;
313     }
314 
315     if (conf_req_flag) {
316 	size_t k5psize = 0;
317 	size_t k5pbase = 0;
318 	size_t k5bsize = 0;
319 	size_t size = 0;
320 
321 	for (i = 0; i < iov_count; i++) {
322 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
323 	    case GSS_IOV_BUFFER_TYPE_DATA:
324 		size += iov[i].buffer.length;
325 		break;
326 	    default:
327 		break;
328 	    }
329 	}
330 
331 	size += sizeof(gss_cfx_wrap_token_desc);
332 
333 	*minor_status = krb5_crypto_length(context, ctx->crypto,
334 					   KRB5_CRYPTO_TYPE_HEADER,
335 					   &k5hsize);
336 	if (*minor_status)
337 	    return GSS_S_FAILURE;
338 
339 	*minor_status = krb5_crypto_length(context, ctx->crypto,
340 					   KRB5_CRYPTO_TYPE_TRAILER,
341 					   &k5tsize);
342 	if (*minor_status)
343 	    return GSS_S_FAILURE;
344 
345 	*minor_status = krb5_crypto_length(context, ctx->crypto,
346 					   KRB5_CRYPTO_TYPE_PADDING,
347 					   &k5pbase);
348 	if (*minor_status)
349 	    return GSS_S_FAILURE;
350 
351 	if (k5pbase > 1) {
352 	    k5psize = k5pbase - (size % k5pbase);
353 	} else {
354 	    k5psize = 0;
355 	}
356 
357 	if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
358 	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
359 						     &k5bsize);
360 	    if (*minor_status)
361 		return GSS_S_FAILURE;
362 	    ec = k5bsize;
363 	} else {
364 	    ec = k5psize;
365 	}
366 
367 	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
368 	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
369     } else {
370 	if (IS_DCE_STYLE(ctx)) {
371 	    *minor_status = EINVAL;
372 	    return GSS_S_FAILURE;
373 	}
374 
375 	k5hsize = 0;
376 	*minor_status = krb5_crypto_length(context, ctx->crypto,
377 					   KRB5_CRYPTO_TYPE_CHECKSUM,
378 					   &k5tsize);
379 	if (*minor_status)
380 	    return GSS_S_FAILURE;
381 
382 	gsshsize = sizeof(gss_cfx_wrap_token_desc);
383 	gsstsize = k5tsize;
384     }
385 
386     /*
387      *
388      */
389 
390     if (trailer == NULL) {
391 	rrc = gsstsize;
392 	if (IS_DCE_STYLE(ctx))
393 	    rrc -= ec;
394 	gsshsize += gsstsize;
395 	gsstsize = 0;
396     } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
397 	major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
398 	if (major_status)
399 	    goto failure;
400     } else if (trailer->buffer.length < gsstsize) {
401 	*minor_status = KRB5_BAD_MSIZE;
402 	major_status = GSS_S_FAILURE;
403 	goto failure;
404     } else
405 	trailer->buffer.length = gsstsize;
406 
407     /*
408      *
409      */
410 
411     if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
412 	major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
413 	if (major_status != GSS_S_COMPLETE)
414 	    goto failure;
415     } else if (header->buffer.length < gsshsize) {
416 	*minor_status = KRB5_BAD_MSIZE;
417 	major_status = GSS_S_FAILURE;
418 	goto failure;
419     } else
420 	header->buffer.length = gsshsize;
421 
422     token = (gss_cfx_wrap_token)header->buffer.value;
423 
424     token->TOK_ID[0] = 0x05;
425     token->TOK_ID[1] = 0x04;
426     token->Flags     = 0;
427     token->Filler    = 0xFF;
428 
429     if ((ctx->more_flags & LOCAL) == 0)
430 	token->Flags |= CFXSentByAcceptor;
431 
432     if (ctx->more_flags & ACCEPTOR_SUBKEY)
433 	token->Flags |= CFXAcceptorSubkey;
434 
435     if (ctx->more_flags & LOCAL)
436 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
437     else
438 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
439 
440     if (conf_req_flag) {
441 	/*
442 	 * In Wrap tokens with confidentiality, the EC field is
443 	 * used to encode the size (in bytes) of the random filler.
444 	 */
445 	token->Flags |= CFXSealed;
446 	token->EC[0] = (ec >> 8) & 0xFF;
447 	token->EC[1] = (ec >> 0) & 0xFF;
448 
449     } else {
450 	/*
451 	 * In Wrap tokens without confidentiality, the EC field is
452 	 * used to encode the size (in bytes) of the trailing
453 	 * checksum.
454 	 *
455 	 * This is not used in the checksum calcuation itself,
456 	 * because the checksum length could potentially vary
457 	 * depending on the data length.
458 	 */
459 	token->EC[0] = 0;
460 	token->EC[1] = 0;
461     }
462 
463     /*
464      * In Wrap tokens that provide for confidentiality, the RRC
465      * field in the header contains the hex value 00 00 before
466      * encryption.
467      *
468      * In Wrap tokens that do not provide for confidentiality,
469      * both the EC and RRC fields in the appended checksum
470      * contain the hex value 00 00 for the purpose of calculating
471      * the checksum.
472      */
473     token->RRC[0] = 0;
474     token->RRC[1] = 0;
475 
476     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
477     krb5_auth_con_getlocalseqnumber(context,
478 				    ctx->auth_context,
479 				    &seq_number);
480     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
481     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
482     krb5_auth_con_setlocalseqnumber(context,
483 				    ctx->auth_context,
484 				    ++seq_number);
485     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
486 
487     data = calloc(iov_count + 3, sizeof(data[0]));
488     if (data == NULL) {
489 	*minor_status = ENOMEM;
490 	major_status = GSS_S_FAILURE;
491 	goto failure;
492     }
493 
494     if (conf_req_flag) {
495 	/*
496 	  plain packet:
497 
498 	  {"header" | encrypt(plaintext-data | ec-padding | E"header")}
499 
500 	  Expanded, this is with with RRC = 0:
501 
502 	  {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
503 
504 	  In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
505 
506 	  {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data  }
507 	 */
508 
509 	i = 0;
510 	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
511 	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
512 	data[i].data.length = k5hsize;
513 
514 	for (i = 1; i < iov_count + 1; i++) {
515 	    switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
516 	    case GSS_IOV_BUFFER_TYPE_DATA:
517 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
518 		break;
519 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
520 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
521 		break;
522 	    default:
523 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
524 		break;
525 	    }
526 	    data[i].data.length = iov[i - 1].buffer.length;
527 	    data[i].data.data = iov[i - 1].buffer.value;
528 	}
529 
530 	/*
531 	 * Any necessary padding is added here to ensure that the
532 	 * encrypted token header is always at the end of the
533 	 * ciphertext.
534 	 */
535 
536 	/* encrypted CFX header in trailer (or after the header if in
537 	   DCE mode). Copy in header into E"header"
538 	*/
539 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
540 	if (trailer)
541 	    data[i].data.data = trailer->buffer.value;
542 	else
543 	    data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
544 
545 	data[i].data.length = ec + sizeof(*token);
546 	memset(data[i].data.data, 0xFF, ec);
547 	memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
548 	i++;
549 
550 	/* Kerberos trailer comes after the gss trailer */
551 	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
552 	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
553 	data[i].data.length = k5tsize;
554 	i++;
555 
556 	ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
557 	if (ret != 0) {
558 	    *minor_status = ret;
559 	    major_status = GSS_S_FAILURE;
560 	    goto failure;
561 	}
562 
563 	if (rrc) {
564 	    token->RRC[0] = (rrc >> 8) & 0xFF;
565 	    token->RRC[1] = (rrc >> 0) & 0xFF;
566 	}
567 
568     } else {
569 	/*
570 	  plain packet:
571 
572 	  {data | "header" | gss-trailer (krb5 checksum)
573 
574 	  don't do RRC != 0
575 
576 	 */
577 
578 	for (i = 0; i < iov_count; i++) {
579 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
580 	    case GSS_IOV_BUFFER_TYPE_DATA:
581 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
582 		break;
583 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
584 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
585 		break;
586 	    default:
587 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
588 		break;
589 	    }
590 	    data[i].data.length = iov[i].buffer.length;
591 	    data[i].data.data = iov[i].buffer.value;
592 	}
593 
594 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
595 	data[i].data.data = header->buffer.value;
596 	data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
597 	i++;
598 
599 	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
600 	if (trailer) {
601 		data[i].data.data = trailer->buffer.value;
602 	} else {
603 		data[i].data.data = (uint8_t *)header->buffer.value +
604 				     sizeof(gss_cfx_wrap_token_desc);
605 	}
606 	data[i].data.length = k5tsize;
607 	i++;
608 
609 	ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
610 	if (ret) {
611 	    *minor_status = ret;
612 	    major_status = GSS_S_FAILURE;
613 	    goto failure;
614 	}
615 
616 	if (rrc) {
617 	    token->RRC[0] = (rrc >> 8) & 0xFF;
618 	    token->RRC[1] = (rrc >> 0) & 0xFF;
619 	}
620 
621 	token->EC[0] =  (k5tsize >> 8) & 0xFF;
622 	token->EC[1] =  (k5tsize >> 0) & 0xFF;
623     }
624 
625     if (conf_state != NULL)
626 	*conf_state = conf_req_flag;
627 
628     free(data);
629 
630     *minor_status = 0;
631     return GSS_S_COMPLETE;
632 
633  failure:
634     if (data)
635 	free(data);
636 
637     gss_release_iov_buffer(&junk, iov, iov_count);
638 
639     return major_status;
640 }
641 #endif
642 
643 /* This is slowpath */
644 static OM_uint32
645 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
646 {
647     uint8_t *p, *q;
648     size_t len = 0, skip;
649     int i;
650 
651     for (i = 0; i < iov_count; i++)
652 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
653 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
654 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
655 	    len += iov[i].buffer.length;
656 
657     p = malloc(len);
658     if (p == NULL) {
659 	*minor_status = ENOMEM;
660 	return GSS_S_FAILURE;
661     }
662     q = p;
663 
664     /* copy up */
665 
666     for (i = 0; i < iov_count; i++) {
667 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
668 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
669 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
670 	{
671 	    memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
672 	    q += iov[i].buffer.length;
673 	}
674     }
675     assert((size_t)(q - p) == len);
676 
677     /* unrotate first part */
678     q = p + rrc;
679     skip = rrc;
680     for (i = 0; i < iov_count; i++) {
681 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
682 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
683 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
684 	{
685 	    if (iov[i].buffer.length <= skip) {
686 		skip -= iov[i].buffer.length;
687 	    } else {
688 		memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
689 		q += iov[i].buffer.length - skip;
690 		skip = 0;
691 	    }
692 	}
693     }
694     /* copy trailer */
695     q = p;
696     skip = rrc;
697     for (i = 0; i < iov_count; i++) {
698 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
699 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
700 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
701 	{
702 	    memcpy(q, iov[i].buffer.value, min(iov[i].buffer.length, skip));
703 	    if (iov[i].buffer.length > skip)
704 		break;
705 	    skip -= iov[i].buffer.length;
706 	    q += iov[i].buffer.length;
707 	}
708     }
709     return GSS_S_COMPLETE;
710 }
711 
712 #if 0
713 
714 OM_uint32
715 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
716 		       gsskrb5_ctx ctx,
717 		       krb5_context context,
718 		       int *conf_state,
719 		       gss_qop_t *qop_state,
720 		       gss_iov_buffer_desc *iov,
721 		       int iov_count)
722 {
723     OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
724     gss_iov_buffer_desc *header, *trailer, *padding;
725     gss_cfx_wrap_token token, ttoken;
726     u_char token_flags;
727     krb5_error_code ret;
728     unsigned usage;
729     uint16_t ec, rrc;
730     krb5_crypto_iov *data = NULL;
731     int i, j;
732 
733     *minor_status = 0;
734 
735     header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
736     if (header == NULL) {
737 	*minor_status = EINVAL;
738 	return GSS_S_FAILURE;
739     }
740 
741     if (header->buffer.length < sizeof(*token)) /* we check exact below */
742 	return GSS_S_DEFECTIVE_TOKEN;
743 
744     padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
745     if (padding != NULL && padding->buffer.length != 0) {
746 	*minor_status = EINVAL;
747 	return GSS_S_FAILURE;
748     }
749 
750     trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
751 
752     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
753     if (major_status != GSS_S_COMPLETE) {
754 	    return major_status;
755     }
756 
757     token = (gss_cfx_wrap_token)header->buffer.value;
758 
759     if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
760 	return GSS_S_DEFECTIVE_TOKEN;
761 
762     /* Ignore unknown flags */
763     token_flags = token->Flags &
764 	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
765 
766     if (token_flags & CFXSentByAcceptor) {
767 	if ((ctx->more_flags & LOCAL) == 0)
768 	    return GSS_S_DEFECTIVE_TOKEN;
769     }
770 
771     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
772 	if ((token_flags & CFXAcceptorSubkey) == 0)
773 	    return GSS_S_DEFECTIVE_TOKEN;
774     } else {
775 	if (token_flags & CFXAcceptorSubkey)
776 	    return GSS_S_DEFECTIVE_TOKEN;
777     }
778 
779     if (token->Filler != 0xFF)
780 	return GSS_S_DEFECTIVE_TOKEN;
781 
782     if (conf_state != NULL)
783 	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
784 
785     ec  = (token->EC[0]  << 8) | token->EC[1];
786     rrc = (token->RRC[0] << 8) | token->RRC[1];
787 
788     /*
789      * Check sequence number
790      */
791     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
792     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
793     if (seq_number_hi) {
794 	/* no support for 64-bit sequence numbers */
795 	*minor_status = ERANGE;
796 	return GSS_S_UNSEQ_TOKEN;
797     }
798 
799     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
800     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
801     if (ret != 0) {
802 	*minor_status = 0;
803 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
804 	return ret;
805     }
806     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
807 
808     /*
809      * Decrypt and/or verify checksum
810      */
811 
812     if (ctx->more_flags & LOCAL) {
813 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
814     } else {
815 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
816     }
817 
818     data = calloc(iov_count + 3, sizeof(data[0]));
819     if (data == NULL) {
820 	*minor_status = ENOMEM;
821 	major_status = GSS_S_FAILURE;
822 	goto failure;
823     }
824 
825     if (token_flags & CFXSealed) {
826 	size_t k5tsize, k5hsize;
827 
828 	krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
829 	krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
830 
831 	/* Rotate by RRC; bogus to do this in-place XXX */
832 	/* Check RRC */
833 
834 	if (trailer == NULL) {
835 	    size_t gsstsize = k5tsize + sizeof(*token);
836 	    size_t gsshsize = k5hsize + sizeof(*token);
837 
838 	    if (rrc != gsstsize) {
839 		major_status = GSS_S_DEFECTIVE_TOKEN;
840 		goto failure;
841 	    }
842 
843 	    if (IS_DCE_STYLE(ctx))
844 		gsstsize += ec;
845 
846 	    gsshsize += gsstsize;
847 
848 	    if (header->buffer.length != gsshsize) {
849 		major_status = GSS_S_DEFECTIVE_TOKEN;
850 		goto failure;
851 	    }
852 	} else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
853 	    major_status = GSS_S_DEFECTIVE_TOKEN;
854 	    goto failure;
855 	} else if (header->buffer.length != sizeof(*token) + k5hsize) {
856 	    major_status = GSS_S_DEFECTIVE_TOKEN;
857 	    goto failure;
858 	} else if (rrc != 0) {
859 	    /* go though slowpath */
860 	    major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
861 	    if (major_status)
862 		goto failure;
863 	}
864 
865 	i = 0;
866 	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
867 	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
868 	data[i].data.length = k5hsize;
869 	i++;
870 
871 	for (j = 0; j < iov_count; i++, j++) {
872 	    switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
873 	    case GSS_IOV_BUFFER_TYPE_DATA:
874 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
875 		break;
876 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
877 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
878 		break;
879 	    default:
880 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
881 		break;
882 	    }
883 	    data[i].data.length = iov[j].buffer.length;
884 	    data[i].data.data = iov[j].buffer.value;
885 	}
886 
887 	/* encrypted CFX header in trailer (or after the header if in
888 	   DCE mode). Copy in header into E"header"
889 	*/
890 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
891 	if (trailer) {
892 	    data[i].data.data = trailer->buffer.value;
893 	} else {
894 	    data[i].data.data = ((uint8_t *)header->buffer.value) +
895 		header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
896 	}
897 
898 	data[i].data.length = ec + sizeof(*token);
899 	ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
900 	i++;
901 
902 	/* Kerberos trailer comes after the gss trailer */
903 	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
904 	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
905 	data[i].data.length = k5tsize;
906 	i++;
907 
908 	ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
909 	if (ret != 0) {
910 	    *minor_status = ret;
911 	    major_status = GSS_S_FAILURE;
912 	    goto failure;
913 	}
914 
915 	ttoken->RRC[0] = token->RRC[0];
916 	ttoken->RRC[1] = token->RRC[1];
917 
918 	/* Check the integrity of the header */
919 	if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
920 	    major_status = GSS_S_BAD_MIC;
921 	    goto failure;
922 	}
923     } else {
924 	size_t gsstsize = ec;
925 	size_t gsshsize = sizeof(*token);
926 
927 	if (trailer == NULL) {
928 	    /* Check RRC */
929 	    if (rrc != gsstsize) {
930 	       *minor_status = EINVAL;
931 	       major_status = GSS_S_FAILURE;
932 	       goto failure;
933 	    }
934 
935 	    gsshsize += gsstsize;
936 	    gsstsize = 0;
937 	} else if (trailer->buffer.length != gsstsize) {
938 	    major_status = GSS_S_DEFECTIVE_TOKEN;
939 	    goto failure;
940 	} else if (rrc != 0) {
941 	    /* Check RRC */
942 	    *minor_status = EINVAL;
943 	    major_status = GSS_S_FAILURE;
944 	    goto failure;
945 	}
946 
947 	if (header->buffer.length != gsshsize) {
948 	    major_status = GSS_S_DEFECTIVE_TOKEN;
949 	    goto failure;
950 	}
951 
952 	for (i = 0; i < iov_count; i++) {
953 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
954 	    case GSS_IOV_BUFFER_TYPE_DATA:
955 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
956 		break;
957 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
958 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
959 		break;
960 	    default:
961 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
962 		break;
963 	    }
964 	    data[i].data.length = iov[i].buffer.length;
965 	    data[i].data.data = iov[i].buffer.value;
966 	}
967 
968 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
969 	data[i].data.data = header->buffer.value;
970 	data[i].data.length = sizeof(*token);
971 	i++;
972 
973 	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
974 	if (trailer) {
975 		data[i].data.data = trailer->buffer.value;
976 	} else {
977 		data[i].data.data = (uint8_t *)header->buffer.value +
978 				     sizeof(*token);
979 	}
980 	data[i].data.length = ec;
981 	i++;
982 
983 	token = (gss_cfx_wrap_token)header->buffer.value;
984 	token->EC[0]  = 0;
985 	token->EC[1]  = 0;
986 	token->RRC[0] = 0;
987 	token->RRC[1] = 0;
988 
989 	ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
990 	if (ret) {
991 	    *minor_status = ret;
992 	    major_status = GSS_S_FAILURE;
993 	    goto failure;
994 	}
995     }
996 
997     if (qop_state != NULL) {
998 	*qop_state = GSS_C_QOP_DEFAULT;
999     }
1000 
1001     free(data);
1002 
1003     *minor_status = 0;
1004     return GSS_S_COMPLETE;
1005 
1006  failure:
1007     if (data)
1008 	free(data);
1009 
1010     gss_release_iov_buffer(&junk, iov, iov_count);
1011 
1012     return major_status;
1013 }
1014 #endif
1015 
1016 OM_uint32
1017 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
1018 			    gsskrb5_ctx ctx,
1019 			    krb5_context context,
1020 			    int conf_req_flag,
1021 			    gss_qop_t qop_req,
1022 			    int *conf_state,
1023 			    gss_iov_buffer_desc *iov,
1024 			    int iov_count)
1025 {
1026     OM_uint32 major_status;
1027     size_t size;
1028     int i;
1029     gss_iov_buffer_desc *header = NULL;
1030     gss_iov_buffer_desc *padding = NULL;
1031     gss_iov_buffer_desc *trailer = NULL;
1032     size_t gsshsize = 0;
1033     size_t gsstsize = 0;
1034     size_t k5hsize = 0;
1035     size_t k5tsize = 0;
1036 
1037     GSSAPI_KRB5_INIT (&context);
1038     *minor_status = 0;
1039 
1040     for (size = 0, i = 0; i < iov_count; i++) {
1041 	switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
1042 	case GSS_IOV_BUFFER_TYPE_EMPTY:
1043 	    break;
1044 	case GSS_IOV_BUFFER_TYPE_DATA:
1045 	    size += iov[i].buffer.length;
1046 	    break;
1047 	case GSS_IOV_BUFFER_TYPE_HEADER:
1048 	    if (header != NULL) {
1049 		*minor_status = 0;
1050 		return GSS_S_FAILURE;
1051 	    }
1052 	    header = &iov[i];
1053 	    break;
1054 	case GSS_IOV_BUFFER_TYPE_TRAILER:
1055 	    if (trailer != NULL) {
1056 		*minor_status = 0;
1057 		return GSS_S_FAILURE;
1058 	    }
1059 	    trailer = &iov[i];
1060 	    break;
1061 	case GSS_IOV_BUFFER_TYPE_PADDING:
1062 	    if (padding != NULL) {
1063 		*minor_status = 0;
1064 		return GSS_S_FAILURE;
1065 	    }
1066 	    padding = &iov[i];
1067 	    break;
1068 	case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
1069 	    break;
1070 	default:
1071 	    *minor_status = EINVAL;
1072 	    return GSS_S_FAILURE;
1073 	}
1074     }
1075 
1076     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
1077     if (major_status != GSS_S_COMPLETE) {
1078 	    return major_status;
1079     }
1080 
1081     if (conf_req_flag) {
1082 	size_t k5psize = 0;
1083 	size_t k5pbase = 0;
1084 	size_t k5bsize = 0;
1085 	size_t ec = 0;
1086 
1087 	size += sizeof(gss_cfx_wrap_token_desc);
1088 
1089 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1090 					   KRB5_CRYPTO_TYPE_HEADER,
1091 					   &k5hsize);
1092 	if (*minor_status)
1093 	    return GSS_S_FAILURE;
1094 
1095 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1096 					   KRB5_CRYPTO_TYPE_TRAILER,
1097 					   &k5tsize);
1098 	if (*minor_status)
1099 	    return GSS_S_FAILURE;
1100 
1101 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1102 					   KRB5_CRYPTO_TYPE_PADDING,
1103 					   &k5pbase);
1104 	if (*minor_status)
1105 	    return GSS_S_FAILURE;
1106 
1107 	if (k5pbase > 1) {
1108 	    k5psize = k5pbase - (size % k5pbase);
1109 	} else {
1110 	    k5psize = 0;
1111 	}
1112 
1113 	if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
1114 	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
1115 						     &k5bsize);
1116 	    if (*minor_status)
1117 		return GSS_S_FAILURE;
1118 
1119 	    ec = k5bsize;
1120 	} else {
1121 	    ec = k5psize;
1122 	}
1123 
1124 	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
1125 	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
1126     } else {
1127 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1128 					   KRB5_CRYPTO_TYPE_CHECKSUM,
1129 					   &k5tsize);
1130 	if (*minor_status)
1131 	    return GSS_S_FAILURE;
1132 
1133 	gsshsize = sizeof(gss_cfx_wrap_token_desc);
1134 	gsstsize = k5tsize;
1135     }
1136 
1137     if (trailer != NULL) {
1138 	trailer->buffer.length = gsstsize;
1139     } else {
1140 	gsshsize += gsstsize;
1141     }
1142 
1143     header->buffer.length = gsshsize;
1144 
1145     if (padding) {
1146 	/* padding is done via EC and is contained in the header or trailer */
1147 	padding->buffer.length = 0;
1148     }
1149 
1150     if (conf_state) {
1151 	*conf_state = conf_req_flag;
1152     }
1153 
1154     return GSS_S_COMPLETE;
1155 }
1156 
1157 
1158 
1159 
1160 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
1161 			   const gsskrb5_ctx ctx,
1162 			   krb5_context context,
1163 			   int conf_req_flag,
1164 			   const gss_buffer_t input_message_buffer,
1165 			   int *conf_state,
1166 			   gss_buffer_t output_message_buffer)
1167 {
1168     gss_cfx_wrap_token token;
1169     krb5_error_code ret;
1170     unsigned usage;
1171     krb5_data cipher;
1172     size_t wrapped_len, cksumsize;
1173     uint16_t padlength, rrc = 0;
1174     int32_t seq_number;
1175     u_char *p;
1176 
1177     ret = _gsskrb5cfx_wrap_length_cfx(context,
1178 				      ctx->crypto, conf_req_flag,
1179 				      IS_DCE_STYLE(ctx),
1180 				      input_message_buffer->length,
1181 				      &wrapped_len, &cksumsize, &padlength);
1182     if (ret != 0) {
1183 	*minor_status = ret;
1184 	return GSS_S_FAILURE;
1185     }
1186 
1187     /* Always rotate encrypted token (if any) and checksum to header */
1188     rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
1189 
1190     output_message_buffer->length = wrapped_len;
1191     output_message_buffer->value = malloc(output_message_buffer->length);
1192     if (output_message_buffer->value == NULL) {
1193 	*minor_status = ENOMEM;
1194 	return GSS_S_FAILURE;
1195     }
1196 
1197     p = output_message_buffer->value;
1198     token = (gss_cfx_wrap_token)p;
1199     token->TOK_ID[0] = 0x05;
1200     token->TOK_ID[1] = 0x04;
1201     token->Flags     = 0;
1202     token->Filler    = 0xFF;
1203     if ((ctx->more_flags & LOCAL) == 0)
1204 	token->Flags |= CFXSentByAcceptor;
1205     if (ctx->more_flags & ACCEPTOR_SUBKEY)
1206 	token->Flags |= CFXAcceptorSubkey;
1207     if (conf_req_flag) {
1208 	/*
1209 	 * In Wrap tokens with confidentiality, the EC field is
1210 	 * used to encode the size (in bytes) of the random filler.
1211 	 */
1212 	token->Flags |= CFXSealed;
1213 	token->EC[0] = (padlength >> 8) & 0xFF;
1214 	token->EC[1] = (padlength >> 0) & 0xFF;
1215     } else {
1216 	/*
1217 	 * In Wrap tokens without confidentiality, the EC field is
1218 	 * used to encode the size (in bytes) of the trailing
1219 	 * checksum.
1220 	 *
1221 	 * This is not used in the checksum calcuation itself,
1222 	 * because the checksum length could potentially vary
1223 	 * depending on the data length.
1224 	 */
1225 	token->EC[0] = 0;
1226 	token->EC[1] = 0;
1227     }
1228 
1229     /*
1230      * In Wrap tokens that provide for confidentiality, the RRC
1231      * field in the header contains the hex value 00 00 before
1232      * encryption.
1233      *
1234      * In Wrap tokens that do not provide for confidentiality,
1235      * both the EC and RRC fields in the appended checksum
1236      * contain the hex value 00 00 for the purpose of calculating
1237      * the checksum.
1238      */
1239     token->RRC[0] = 0;
1240     token->RRC[1] = 0;
1241 
1242     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1243     krb5_auth_con_getlocalseqnumber(context,
1244 				    ctx->auth_context,
1245 				    &seq_number);
1246     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
1247     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1248     krb5_auth_con_setlocalseqnumber(context,
1249 				    ctx->auth_context,
1250 				    ++seq_number);
1251     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1252 
1253     /*
1254      * If confidentiality is requested, the token header is
1255      * appended to the plaintext before encryption; the resulting
1256      * token is {"header" | encrypt(plaintext | pad | "header")}.
1257      *
1258      * If no confidentiality is requested, the checksum is
1259      * calculated over the plaintext concatenated with the
1260      * token header.
1261      */
1262     if (ctx->more_flags & LOCAL) {
1263 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1264     } else {
1265 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1266     }
1267 
1268     if (conf_req_flag) {
1269 	/*
1270 	 * Any necessary padding is added here to ensure that the
1271 	 * encrypted token header is always at the end of the
1272 	 * ciphertext.
1273 	 *
1274 	 * The specification does not require that the padding
1275 	 * bytes are initialized.
1276 	 */
1277 	p += sizeof(*token);
1278 	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1279 	memset(p + input_message_buffer->length, 0xFF, padlength);
1280 	memcpy(p + input_message_buffer->length + padlength,
1281 	       token, sizeof(*token));
1282 
1283 	ret = krb5_encrypt(context, ctx->crypto,
1284 			   usage, p,
1285 			   input_message_buffer->length + padlength +
1286 				sizeof(*token),
1287 			   &cipher);
1288 	if (ret != 0) {
1289 	    *minor_status = ret;
1290 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1291 	    return GSS_S_FAILURE;
1292 	}
1293 	assert(sizeof(*token) + cipher.length == wrapped_len);
1294 	token->RRC[0] = (rrc >> 8) & 0xFF;
1295 	token->RRC[1] = (rrc >> 0) & 0xFF;
1296 
1297 	/*
1298 	 * this is really ugly, but needed against windows
1299 	 * for DCERPC, as windows rotates by EC+RRC.
1300 	 */
1301 	if (IS_DCE_STYLE(ctx)) {
1302 		ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1303 	} else {
1304 		ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1305 	}
1306 	if (ret != 0) {
1307 	    *minor_status = ret;
1308 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1309 	    return GSS_S_FAILURE;
1310 	}
1311 	memcpy(p, cipher.data, cipher.length);
1312 	krb5_data_free(&cipher);
1313     } else {
1314 	char *buf;
1315 	Checksum cksum;
1316 
1317 	buf = malloc(input_message_buffer->length + sizeof(*token));
1318 	if (buf == NULL) {
1319 	    *minor_status = ENOMEM;
1320 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1321 	    return GSS_S_FAILURE;
1322 	}
1323 	memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1324 	memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1325 
1326 	ret = krb5_create_checksum(context, ctx->crypto,
1327 				   usage, 0, buf,
1328 				   input_message_buffer->length +
1329 					sizeof(*token),
1330 				   &cksum);
1331 	if (ret != 0) {
1332 	    *minor_status = ret;
1333 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1334 	    free(buf);
1335 	    return GSS_S_FAILURE;
1336 	}
1337 
1338 	free(buf);
1339 
1340 	assert(cksum.checksum.length == cksumsize);
1341 	token->EC[0] =  (cksum.checksum.length >> 8) & 0xFF;
1342 	token->EC[1] =  (cksum.checksum.length >> 0) & 0xFF;
1343 	token->RRC[0] = (rrc >> 8) & 0xFF;
1344 	token->RRC[1] = (rrc >> 0) & 0xFF;
1345 
1346 	p += sizeof(*token);
1347 	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1348 	memcpy(p + input_message_buffer->length,
1349 	       cksum.checksum.data, cksum.checksum.length);
1350 
1351 	ret = rrc_rotate(p,
1352 	    input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1353 	if (ret != 0) {
1354 	    *minor_status = ret;
1355 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1356 	    free_Checksum(&cksum);
1357 	    return GSS_S_FAILURE;
1358 	}
1359 	free_Checksum(&cksum);
1360     }
1361 
1362     if (conf_state != NULL) {
1363 	*conf_state = conf_req_flag;
1364     }
1365 
1366     *minor_status = 0;
1367     return GSS_S_COMPLETE;
1368 }
1369 
1370 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1371 			     const gsskrb5_ctx ctx,
1372 			     krb5_context context,
1373 			     const gss_buffer_t input_message_buffer,
1374 			     gss_buffer_t output_message_buffer,
1375 			     int *conf_state,
1376 			     gss_qop_t *qop_state)
1377 {
1378     gss_cfx_wrap_token token;
1379     u_char token_flags;
1380     krb5_error_code ret;
1381     unsigned usage;
1382     krb5_data data;
1383     uint16_t ec, rrc;
1384     OM_uint32 seq_number_lo, seq_number_hi;
1385     size_t len;
1386     u_char *p;
1387 
1388     *minor_status = 0;
1389 
1390     if (input_message_buffer->length < sizeof(*token)) {
1391 	return GSS_S_DEFECTIVE_TOKEN;
1392     }
1393 
1394     p = input_message_buffer->value;
1395 
1396     token = (gss_cfx_wrap_token)p;
1397 
1398     if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) {
1399 	return GSS_S_DEFECTIVE_TOKEN;
1400     }
1401 
1402     /* Ignore unknown flags */
1403     token_flags = token->Flags &
1404 	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1405 
1406     if (token_flags & CFXSentByAcceptor) {
1407 	if ((ctx->more_flags & LOCAL) == 0)
1408 	    return GSS_S_DEFECTIVE_TOKEN;
1409     }
1410 
1411     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1412 	if ((token_flags & CFXAcceptorSubkey) == 0)
1413 	    return GSS_S_DEFECTIVE_TOKEN;
1414     } else {
1415 	if (token_flags & CFXAcceptorSubkey)
1416 	    return GSS_S_DEFECTIVE_TOKEN;
1417     }
1418 
1419     if (token->Filler != 0xFF) {
1420 	return GSS_S_DEFECTIVE_TOKEN;
1421     }
1422 
1423     if (conf_state != NULL) {
1424 	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
1425     }
1426 
1427     ec  = (token->EC[0]  << 8) | token->EC[1];
1428     rrc = (token->RRC[0] << 8) | token->RRC[1];
1429 
1430     /*
1431      * Check sequence number
1432      */
1433     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1434     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1435     if (seq_number_hi) {
1436 	/* no support for 64-bit sequence numbers */
1437 	*minor_status = ERANGE;
1438 	return GSS_S_UNSEQ_TOKEN;
1439     }
1440 
1441     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1442     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1443     if (ret != 0) {
1444 	*minor_status = 0;
1445 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1446 	_gsskrb5_release_buffer(minor_status, output_message_buffer);
1447 	return ret;
1448     }
1449     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1450 
1451     /*
1452      * Decrypt and/or verify checksum
1453      */
1454 
1455     if (ctx->more_flags & LOCAL) {
1456 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1457     } else {
1458 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1459     }
1460 
1461     p += sizeof(*token);
1462     len = input_message_buffer->length;
1463     len -= (p - (u_char *)input_message_buffer->value);
1464 
1465     if (token_flags & CFXSealed) {
1466 	/*
1467 	 * this is really ugly, but needed against windows
1468 	 * for DCERPC, as windows rotates by EC+RRC.
1469 	 */
1470 	if (IS_DCE_STYLE(ctx)) {
1471 		*minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1472 	} else {
1473 		*minor_status = rrc_rotate(p, len, rrc, TRUE);
1474 	}
1475 	if (*minor_status != 0) {
1476 	    return GSS_S_FAILURE;
1477 	}
1478 
1479 	ret = krb5_decrypt(context, ctx->crypto, usage,
1480 	    p, len, &data);
1481 	if (ret != 0) {
1482 	    *minor_status = ret;
1483 	    return GSS_S_BAD_MIC;
1484 	}
1485 
1486 	/* Check that there is room for the pad and token header */
1487 	if (data.length < ec + sizeof(*token)) {
1488 	    krb5_data_free(&data);
1489 	    return GSS_S_DEFECTIVE_TOKEN;
1490 	}
1491 	p = data.data;
1492 	p += data.length - sizeof(*token);
1493 
1494 	/* RRC is unprotected; don't modify input buffer */
1495 	((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0];
1496 	((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
1497 
1498 	/* Check the integrity of the header */
1499 	if (ct_memcmp(p, token, sizeof(*token)) != 0) {
1500 	    krb5_data_free(&data);
1501 	    return GSS_S_BAD_MIC;
1502 	}
1503 
1504 	output_message_buffer->value = data.data;
1505 	output_message_buffer->length = data.length - ec - sizeof(*token);
1506     } else {
1507 	Checksum cksum;
1508 
1509 	/* Rotate by RRC; bogus to do this in-place XXX */
1510 	*minor_status = rrc_rotate(p, len, rrc, TRUE);
1511 	if (*minor_status != 0) {
1512 	    return GSS_S_FAILURE;
1513 	}
1514 
1515 	/* Determine checksum type */
1516 	ret = krb5_crypto_get_checksum_type(context,
1517 					    ctx->crypto,
1518 					    &cksum.cksumtype);
1519 	if (ret != 0) {
1520 	    *minor_status = ret;
1521 	    return GSS_S_FAILURE;
1522 	}
1523 
1524 	cksum.checksum.length = ec;
1525 
1526 	/* Check we have at least as much data as the checksum */
1527 	if (len < cksum.checksum.length) {
1528 	    *minor_status = ERANGE;
1529 	    return GSS_S_BAD_MIC;
1530 	}
1531 
1532 	/* Length now is of the plaintext only, no checksum */
1533 	len -= cksum.checksum.length;
1534 	cksum.checksum.data = p + len;
1535 
1536 	output_message_buffer->length = len; /* for later */
1537 	output_message_buffer->value = malloc(len + sizeof(*token));
1538 	if (output_message_buffer->value == NULL) {
1539 	    *minor_status = ENOMEM;
1540 	    return GSS_S_FAILURE;
1541 	}
1542 
1543 	/* Checksum is over (plaintext-data | "header") */
1544 	memcpy(output_message_buffer->value, p, len);
1545 	memcpy((u_char *)output_message_buffer->value + len,
1546 	       token, sizeof(*token));
1547 
1548 	/* EC is not included in checksum calculation */
1549 	token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1550 				     len);
1551 	token->EC[0]  = 0;
1552 	token->EC[1]  = 0;
1553 	token->RRC[0] = 0;
1554 	token->RRC[1] = 0;
1555 
1556 	ret = krb5_verify_checksum(context, ctx->crypto,
1557 				   usage,
1558 				   output_message_buffer->value,
1559 				   len + sizeof(*token),
1560 				   &cksum);
1561 	if (ret != 0) {
1562 	    *minor_status = ret;
1563 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1564 	    return GSS_S_BAD_MIC;
1565 	}
1566     }
1567 
1568     if (qop_state != NULL) {
1569 	*qop_state = GSS_C_QOP_DEFAULT;
1570     }
1571 
1572     *minor_status = 0;
1573     return GSS_S_COMPLETE;
1574 }
1575 
1576 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1577 			  const gsskrb5_ctx ctx,
1578 			  krb5_context context,
1579 			  gss_qop_t qop_req,
1580 			  const gss_buffer_t message_buffer,
1581 			  gss_buffer_t message_token)
1582 {
1583     gss_cfx_mic_token token;
1584     krb5_error_code ret;
1585     unsigned usage;
1586     Checksum cksum;
1587     u_char *buf;
1588     size_t len;
1589     int32_t seq_number;
1590 
1591     len = message_buffer->length + sizeof(*token);
1592     buf = malloc(len);
1593     if (buf == NULL) {
1594 	*minor_status = ENOMEM;
1595 	return GSS_S_FAILURE;
1596     }
1597 
1598     memcpy(buf, message_buffer->value, message_buffer->length);
1599 
1600     token = (gss_cfx_mic_token)(buf + message_buffer->length);
1601     token->TOK_ID[0] = 0x04;
1602     token->TOK_ID[1] = 0x04;
1603     token->Flags = 0;
1604     if ((ctx->more_flags & LOCAL) == 0)
1605 	token->Flags |= CFXSentByAcceptor;
1606     if (ctx->more_flags & ACCEPTOR_SUBKEY)
1607 	token->Flags |= CFXAcceptorSubkey;
1608     memset(token->Filler, 0xFF, 5);
1609 
1610     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1611     krb5_auth_con_getlocalseqnumber(context,
1612 				    ctx->auth_context,
1613 				    &seq_number);
1614     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
1615     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1616     krb5_auth_con_setlocalseqnumber(context,
1617 				    ctx->auth_context,
1618 				    ++seq_number);
1619     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1620 
1621     if (ctx->more_flags & LOCAL) {
1622 	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1623     } else {
1624 	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1625     }
1626 
1627     ret = krb5_create_checksum(context, ctx->crypto,
1628 	usage, 0, buf, len, &cksum);
1629     if (ret != 0) {
1630 	*minor_status = ret;
1631 	free(buf);
1632 	return GSS_S_FAILURE;
1633     }
1634 
1635     /* Determine MIC length */
1636     message_token->length = sizeof(*token) + cksum.checksum.length;
1637     message_token->value = malloc(message_token->length);
1638     if (message_token->value == NULL) {
1639 	*minor_status = ENOMEM;
1640 	free_Checksum(&cksum);
1641 	free(buf);
1642 	return GSS_S_FAILURE;
1643     }
1644 
1645     /* Token is { "header" | get_mic("header" | plaintext-data) } */
1646     memcpy(message_token->value, token, sizeof(*token));
1647     memcpy((u_char *)message_token->value + sizeof(*token),
1648 	   cksum.checksum.data, cksum.checksum.length);
1649 
1650     free_Checksum(&cksum);
1651     free(buf);
1652 
1653     *minor_status = 0;
1654     return GSS_S_COMPLETE;
1655 }
1656 
1657 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1658 				 const gsskrb5_ctx ctx,
1659 				 krb5_context context,
1660 				 const gss_buffer_t message_buffer,
1661 				 const gss_buffer_t token_buffer,
1662 				 gss_qop_t *qop_state)
1663 {
1664     gss_cfx_mic_token token;
1665     u_char token_flags;
1666     krb5_error_code ret;
1667     unsigned usage;
1668     OM_uint32 seq_number_lo, seq_number_hi;
1669     u_char *buf, *p;
1670     Checksum cksum;
1671 
1672     *minor_status = 0;
1673 
1674     if (token_buffer->length < sizeof(*token)) {
1675 	return GSS_S_DEFECTIVE_TOKEN;
1676     }
1677 
1678     p = token_buffer->value;
1679 
1680     token = (gss_cfx_mic_token)p;
1681 
1682     if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) {
1683 	return GSS_S_DEFECTIVE_TOKEN;
1684     }
1685 
1686     /* Ignore unknown flags */
1687     token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1688 
1689     if (token_flags & CFXSentByAcceptor) {
1690 	if ((ctx->more_flags & LOCAL) == 0)
1691 	    return GSS_S_DEFECTIVE_TOKEN;
1692     }
1693     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1694 	if ((token_flags & CFXAcceptorSubkey) == 0)
1695 	    return GSS_S_DEFECTIVE_TOKEN;
1696     } else {
1697 	if (token_flags & CFXAcceptorSubkey)
1698 	    return GSS_S_DEFECTIVE_TOKEN;
1699     }
1700 
1701     if (ct_memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1702 	return GSS_S_DEFECTIVE_TOKEN;
1703     }
1704 
1705     /*
1706      * Check sequence number
1707      */
1708     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1709     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1710     if (seq_number_hi) {
1711 	*minor_status = ERANGE;
1712 	return GSS_S_UNSEQ_TOKEN;
1713     }
1714 
1715     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1716     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1717     if (ret != 0) {
1718 	*minor_status = 0;
1719 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1720 	return ret;
1721     }
1722     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1723 
1724     /*
1725      * Verify checksum
1726      */
1727     ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1728 					&cksum.cksumtype);
1729     if (ret != 0) {
1730 	*minor_status = ret;
1731 	return GSS_S_FAILURE;
1732     }
1733 
1734     cksum.checksum.data = p + sizeof(*token);
1735     cksum.checksum.length = token_buffer->length - sizeof(*token);
1736 
1737     if (ctx->more_flags & LOCAL) {
1738 	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1739     } else {
1740 	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1741     }
1742 
1743     buf = malloc(message_buffer->length + sizeof(*token));
1744     if (buf == NULL) {
1745 	*minor_status = ENOMEM;
1746 	return GSS_S_FAILURE;
1747     }
1748     memcpy(buf, message_buffer->value, message_buffer->length);
1749     memcpy(buf + message_buffer->length, token, sizeof(*token));
1750 
1751     ret = krb5_verify_checksum(context, ctx->crypto,
1752 			       usage,
1753 			       buf,
1754 			       sizeof(*token) + message_buffer->length,
1755 			       &cksum);
1756     if (ret != 0) {
1757 	*minor_status = ret;
1758 	free(buf);
1759 	return GSS_S_BAD_MIC;
1760     }
1761 
1762     free(buf);
1763 
1764     if (qop_state != NULL) {
1765 	*qop_state = GSS_C_QOP_DEFAULT;
1766     }
1767 
1768     return GSS_S_COMPLETE;
1769 }
1770