1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ 2 /* 3 * Copyright (C) 2011-2018 PADL Software Pty Ltd. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 29 * OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "gssapiP_spnego.h" 33 #include <generic/gssapiP_generic.h> 34 #include "k5-input.h" 35 36 static void 37 release_auth_mech(struct negoex_auth_mech *mech); 38 39 OM_uint32 40 negoex_random(OM_uint32 *minor, spnego_gss_ctx_id_t ctx, 41 uint8_t *data, size_t length) 42 { 43 krb5_data d = make_data(data, length); 44 45 *minor = krb5_c_random_make_octets(ctx->kctx, &d); 46 return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE; 47 } 48 49 /* 50 * SPNEGO functions expect to find the active mech context in ctx->ctx_handle, 51 * but the metadata exchange APIs force us to have one mech context per mech 52 * entry. To address this mismatch, move the active mech context (if we have 53 * one) to ctx->ctx_handle at the end of NegoEx processing. 54 */ 55 void 56 negoex_prep_context_for_spnego(spnego_gss_ctx_id_t ctx) 57 { 58 struct negoex_auth_mech *mech; 59 60 mech = K5_TAILQ_FIRST(&ctx->negoex_mechs); 61 if (mech == NULL || mech->mech_context == GSS_C_NO_CONTEXT) 62 return; 63 64 assert(ctx->ctx_handle == GSS_C_NO_CONTEXT); 65 ctx->ctx_handle = mech->mech_context; 66 mech->mech_context = GSS_C_NO_CONTEXT; 67 } 68 69 OM_uint32 70 negoex_prep_context_for_negoex(OM_uint32 *minor, spnego_gss_ctx_id_t ctx) 71 { 72 krb5_error_code ret; 73 struct negoex_auth_mech *mech; 74 75 if (ctx->kctx != NULL) { 76 /* The context is already initialized for NegoEx. Undo what 77 * negoex_prep_for_spnego() did, if applicable. */ 78 if (ctx->ctx_handle != GSS_C_NO_CONTEXT) { 79 mech = K5_TAILQ_FIRST(&ctx->negoex_mechs); 80 assert(mech != NULL && mech->mech_context == GSS_C_NO_CONTEXT); 81 mech->mech_context = ctx->ctx_handle; 82 ctx->ctx_handle = GSS_C_NO_CONTEXT; 83 } 84 return GSS_S_COMPLETE; 85 } 86 87 /* Initialize the NegoEX context fields. (negoex_mechs is already set up 88 * by SPNEGO.) */ 89 ret = krb5_init_context(&ctx->kctx); 90 if (ret) { 91 *minor = ret; 92 return GSS_S_FAILURE; 93 } 94 95 k5_buf_init_dynamic(&ctx->negoex_transcript); 96 97 return GSS_S_COMPLETE; 98 } 99 100 static void 101 release_all_mechs(spnego_gss_ctx_id_t ctx) 102 { 103 struct negoex_auth_mech *mech, *next; 104 105 K5_TAILQ_FOREACH_SAFE(mech, &ctx->negoex_mechs, links, next) 106 release_auth_mech(mech); 107 K5_TAILQ_INIT(&ctx->negoex_mechs); 108 } 109 110 void 111 negoex_release_context(spnego_gss_ctx_id_t ctx) 112 { 113 k5_buf_free(&ctx->negoex_transcript); 114 release_all_mechs(ctx); 115 krb5_free_context(ctx->kctx); 116 ctx->kctx = NULL; 117 } 118 119 static const char * 120 typestr(enum message_type type) 121 { 122 if (type == INITIATOR_NEGO) 123 return "INITIATOR_NEGO"; 124 else if (type == ACCEPTOR_NEGO) 125 return "ACCEPTOR_NEGO"; 126 else if (type == INITIATOR_META_DATA) 127 return "INITIATOR_META_DATA"; 128 else if (type == ACCEPTOR_META_DATA) 129 return "ACCEPTOR_META_DATA"; 130 else if (type == CHALLENGE) 131 return "CHALLENGE"; 132 else if (type == AP_REQUEST) 133 return "AP_REQUEST"; 134 else if (type == VERIFY) 135 return "VERIFY"; 136 else if (type == ALERT) 137 return "ALERT"; 138 else 139 return "UNKNOWN"; 140 } 141 142 static void 143 add_guid(struct k5buf *buf, const uint8_t guid[GUID_LENGTH]) 144 { 145 uint32_t data1 = load_32_le(guid); 146 uint16_t data2 = load_16_le(guid + 4), data3 = load_16_le(guid + 6); 147 148 k5_buf_add_fmt(buf, "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", 149 data1, data2, data3, guid[8], guid[9], guid[10], guid[11], 150 guid[12], guid[13], guid[14], guid[15]); 151 } 152 153 static char * 154 guid_to_string(const uint8_t guid[GUID_LENGTH]) 155 { 156 struct k5buf buf; 157 158 k5_buf_init_dynamic(&buf); 159 add_guid(&buf, guid); 160 return k5_buf_cstring(&buf); 161 } 162 163 /* Check that the described vector lies within the message, and return a 164 * pointer to its first element. */ 165 static inline const uint8_t * 166 vector_base(size_t offset, size_t count, size_t width, 167 const uint8_t *msg_base, size_t msg_len) 168 { 169 if (offset > msg_len || count > (msg_len - offset) / width) 170 return NULL; 171 return msg_base + offset; 172 } 173 174 /* Trace a received message. Call after the context sequence number is 175 * incremented. */ 176 static void 177 trace_received_message(spnego_gss_ctx_id_t ctx, 178 const struct negoex_message *msg) 179 { 180 struct k5buf buf; 181 uint16_t i; 182 char *info = NULL; 183 184 if (msg->type == INITIATOR_NEGO || msg->type == ACCEPTOR_NEGO) { 185 k5_buf_init_dynamic(&buf); 186 for (i = 0; i < msg->u.n.nschemes; i++) { 187 add_guid(&buf, msg->u.n.schemes + i * GUID_LENGTH); 188 if (i + 1 < msg->u.n.nschemes) 189 k5_buf_add(&buf, " "); 190 } 191 info = k5_buf_cstring(&buf); 192 } else if (msg->type == INITIATOR_META_DATA || 193 msg->type == ACCEPTOR_META_DATA || 194 msg->type == CHALLENGE || msg->type == AP_REQUEST) { 195 info = guid_to_string(msg->u.e.scheme); 196 } else if (msg->type == VERIFY) { 197 info = guid_to_string(msg->u.v.scheme); 198 } else if (msg->type == ALERT) { 199 info = guid_to_string(msg->u.a.scheme); 200 } 201 202 if (info == NULL) 203 return; 204 205 TRACE_NEGOEX_INCOMING(ctx->kctx, ctx->negoex_seqnum - 1, 206 typestr(msg->type), info); 207 free(info); 208 } 209 210 /* Trace an outgoing message with a GUID info string. Call after the context 211 * sequence number is incremented. */ 212 static void 213 trace_outgoing_message(spnego_gss_ctx_id_t ctx, enum message_type type, 214 const uint8_t guid[GUID_LENGTH]) 215 { 216 char *info = guid_to_string(guid); 217 218 if (info == NULL) 219 return; 220 TRACE_NEGOEX_OUTGOING(ctx->kctx, ctx->negoex_seqnum - 1, typestr(type), 221 info); 222 free(info); 223 } 224 225 static OM_uint32 226 parse_nego_message(OM_uint32 *minor, struct k5input *in, 227 const uint8_t *msg_base, size_t msg_len, 228 struct nego_message *msg) 229 { 230 const uint8_t *p; 231 uint64_t protocol_version; 232 uint32_t extension_type; 233 size_t offset, count, i; 234 235 p = k5_input_get_bytes(in, sizeof(msg->random)); 236 if (p != NULL) 237 memcpy(msg->random, p, sizeof(msg->random)); 238 protocol_version = k5_input_get_uint64_le(in); 239 if (protocol_version != 0) { 240 *minor = ERR_NEGOEX_UNSUPPORTED_VERSION; 241 return GSS_S_UNAVAILABLE; 242 } 243 244 offset = k5_input_get_uint32_le(in); 245 count = k5_input_get_uint16_le(in); 246 msg->schemes = vector_base(offset, count, GUID_LENGTH, msg_base, msg_len); 247 msg->nschemes = count; 248 if (msg->schemes == NULL) { 249 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIZE; 250 return GSS_S_DEFECTIVE_TOKEN; 251 } 252 253 offset = k5_input_get_uint32_le(in); 254 count = k5_input_get_uint16_le(in); 255 p = vector_base(offset, count, EXTENSION_LENGTH, msg_base, msg_len); 256 for (i = 0; i < count; i++) { 257 extension_type = load_32_le(p + i * EXTENSION_LENGTH); 258 if (extension_type & EXTENSION_FLAG_CRITICAL) { 259 *minor = ERR_NEGOEX_UNSUPPORTED_CRITICAL_EXTENSION; 260 return GSS_S_UNAVAILABLE; 261 } 262 } 263 264 return GSS_S_COMPLETE; 265 } 266 267 static OM_uint32 268 parse_exchange_message(OM_uint32 *minor, struct k5input *in, 269 const uint8_t *msg_base, size_t msg_len, 270 struct exchange_message *msg) 271 { 272 const uint8_t *p; 273 size_t offset, len; 274 275 p = k5_input_get_bytes(in, GUID_LENGTH); 276 if (p != NULL) 277 memcpy(msg->scheme, p, GUID_LENGTH); 278 279 offset = k5_input_get_uint32_le(in); 280 len = k5_input_get_uint32_le(in); 281 p = vector_base(offset, len, 1, msg_base, msg_len); 282 if (p == NULL) { 283 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIZE; 284 return GSS_S_DEFECTIVE_TOKEN; 285 } 286 msg->token.value = (void *)p; 287 msg->token.length = len; 288 289 return GSS_S_COMPLETE; 290 } 291 292 static OM_uint32 293 parse_verify_message(OM_uint32 *minor, struct k5input *in, 294 const uint8_t *msg_base, size_t msg_len, 295 size_t token_offset, struct verify_message *msg) 296 { 297 const uint8_t *p; 298 size_t offset, len; 299 uint32_t hdrlen, cksum_scheme; 300 301 p = k5_input_get_bytes(in, GUID_LENGTH); 302 if (p != NULL) 303 memcpy(msg->scheme, p, GUID_LENGTH); 304 305 hdrlen = k5_input_get_uint32_le(in); 306 if (hdrlen != CHECKSUM_HEADER_LENGTH) { 307 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIZE; 308 return GSS_S_DEFECTIVE_TOKEN; 309 } 310 cksum_scheme = k5_input_get_uint32_le(in); 311 if (cksum_scheme != CHECKSUM_SCHEME_RFC3961) { 312 *minor = ERR_NEGOEX_UNKNOWN_CHECKSUM_SCHEME; 313 return GSS_S_UNAVAILABLE; 314 } 315 msg->cksum_type = k5_input_get_uint32_le(in); 316 317 offset = k5_input_get_uint32_le(in); 318 len = k5_input_get_uint32_le(in); 319 msg->cksum = vector_base(offset, len, 1, msg_base, msg_len); 320 msg->cksum_len = len; 321 if (msg->cksum == NULL) { 322 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIZE; 323 return GSS_S_DEFECTIVE_TOKEN; 324 } 325 326 msg->offset_in_token = token_offset; 327 return GSS_S_COMPLETE; 328 } 329 330 static OM_uint32 331 parse_alert_message(OM_uint32 *minor, struct k5input *in, 332 const uint8_t *msg_base, size_t msg_len, 333 struct alert_message *msg) 334 { 335 const uint8_t *p; 336 uint32_t atype, reason; 337 size_t alerts_offset, nalerts, value_offset, value_len, i; 338 struct k5input alerts_in, pulse_in; 339 340 p = k5_input_get_bytes(in, GUID_LENGTH); 341 if (p != NULL) 342 memcpy(msg->scheme, p, GUID_LENGTH); 343 (void)k5_input_get_uint32_le(in); /* skip over ErrorCode */ 344 alerts_offset = k5_input_get_uint32_le(in); 345 nalerts = k5_input_get_uint32_le(in); 346 p = vector_base(alerts_offset, nalerts, ALERT_LENGTH, msg_base, msg_len); 347 if (p == NULL) { 348 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIZE; 349 return GSS_S_DEFECTIVE_TOKEN; 350 } 351 352 /* Look for a VERIFY_NO_KEY pulse alert in the alerts vector. */ 353 msg->verify_no_key = FALSE; 354 k5_input_init(&alerts_in, p, nalerts * ALERT_LENGTH); 355 for (i = 0; i < nalerts; i++) { 356 atype = k5_input_get_uint32_le(&alerts_in); 357 value_offset = k5_input_get_uint32_le(&alerts_in); 358 value_len = k5_input_get_uint32_le(&alerts_in); 359 p = vector_base(value_offset, value_len, 1, msg_base, msg_len); 360 if (p == NULL) { 361 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIZE; 362 return GSS_S_DEFECTIVE_TOKEN; 363 } 364 365 if (atype == ALERT_TYPE_PULSE && value_len >= ALERT_PULSE_LENGTH) { 366 k5_input_init(&pulse_in, p, value_len); 367 (void)k5_input_get_uint32_le(&pulse_in); /* skip header length */ 368 reason = k5_input_get_uint32_le(&pulse_in); 369 if (reason == ALERT_VERIFY_NO_KEY) 370 msg->verify_no_key = TRUE; 371 } 372 } 373 374 return GSS_S_COMPLETE; 375 } 376 377 static OM_uint32 378 parse_message(OM_uint32 *minor, spnego_gss_ctx_id_t ctx, struct k5input *in, 379 const uint8_t *token_base, struct negoex_message *msg) 380 { 381 OM_uint32 major; 382 const uint8_t *msg_base = in->ptr, *conv_id; 383 size_t token_remaining = in->len, header_len, msg_len; 384 uint64_t signature; 385 uint32_t type, seqnum; 386 387 signature = k5_input_get_uint64_le(in); 388 type = k5_input_get_uint32_le(in); 389 seqnum = k5_input_get_uint32_le(in); 390 header_len = k5_input_get_uint32_le(in); 391 msg_len = k5_input_get_uint32_le(in); 392 conv_id = k5_input_get_bytes(in, GUID_LENGTH); 393 394 if (in->status || msg_len > token_remaining || header_len > msg_len) { 395 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIZE; 396 return GSS_S_DEFECTIVE_TOKEN; 397 } 398 if (signature != MESSAGE_SIGNATURE) { 399 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIGNATURE; 400 return GSS_S_DEFECTIVE_TOKEN; 401 } 402 if (seqnum != ctx->negoex_seqnum) { 403 *minor = ERR_NEGOEX_MESSAGE_OUT_OF_SEQUENCE; 404 return GSS_S_DEFECTIVE_TOKEN; 405 } 406 if (seqnum == 0) { 407 memcpy(ctx->negoex_conv_id, conv_id, GUID_LENGTH); 408 } else if (!GUID_EQ(conv_id, ctx->negoex_conv_id)) { 409 *minor = ERR_NEGOEX_INVALID_CONVERSATION_ID; 410 return GSS_S_DEFECTIVE_TOKEN; 411 } 412 413 /* Restrict the input region to the header. */ 414 in->len = header_len - (in->ptr - msg_base); 415 416 msg->type = type; 417 if (type == INITIATOR_NEGO || type == ACCEPTOR_NEGO) { 418 major = parse_nego_message(minor, in, msg_base, msg_len, &msg->u.n); 419 } else if (type == INITIATOR_META_DATA || type == ACCEPTOR_META_DATA || 420 type == CHALLENGE || type == AP_REQUEST) { 421 major = parse_exchange_message(minor, in, msg_base, msg_len, 422 &msg->u.e); 423 } else if (type == VERIFY) { 424 major = parse_verify_message(minor, in, msg_base, msg_len, 425 msg_base - token_base, &msg->u.v); 426 } else if (type == ALERT) { 427 major = parse_alert_message(minor, in, msg_base, msg_len, &msg->u.a); 428 } else { 429 *minor = ERR_NEGOEX_INVALID_MESSAGE_TYPE; 430 return GSS_S_DEFECTIVE_TOKEN; 431 } 432 if (major != GSS_S_COMPLETE) 433 return major; 434 435 /* Reset the input buffer to the remainder of the token. */ 436 if (!in->status) 437 k5_input_init(in, msg_base + msg_len, token_remaining - msg_len); 438 439 ctx->negoex_seqnum++; 440 trace_received_message(ctx, msg); 441 return GSS_S_COMPLETE; 442 } 443 444 /* 445 * Parse token into an array of negoex_message structures. All pointer fields 446 * within the parsed messages are aliases into token, so the result can be 447 * freed with free(). An unknown protocol version, a critical extension, or an 448 * unknown checksum scheme will cause a parsing failure. Increment the 449 * sequence number in ctx for each message, and record and check the 450 * conversation ID in ctx as appropriate. 451 */ 452 OM_uint32 453 negoex_parse_token(OM_uint32 *minor, spnego_gss_ctx_id_t ctx, 454 gss_const_buffer_t token, 455 struct negoex_message **messages_out, size_t *count_out) 456 { 457 OM_uint32 major = GSS_S_COMPLETE; 458 size_t count = 0; 459 struct k5input in; 460 struct negoex_message *messages = NULL, *newptr; 461 462 *messages_out = NULL; 463 *count_out = 0; 464 assert(token != GSS_C_NO_BUFFER); 465 k5_input_init(&in, token->value, token->length); 466 467 while (in.status == 0 && in.len > 0) { 468 newptr = realloc(messages, (count + 1) * sizeof(*newptr)); 469 if (newptr == NULL) { 470 free(messages); 471 *minor = ENOMEM; 472 return GSS_S_FAILURE; 473 } 474 messages = newptr; 475 476 major = parse_message(minor, ctx, &in, token->value, &messages[count]); 477 if (major != GSS_S_COMPLETE) 478 break; 479 480 count++; 481 } 482 483 if (in.status) { 484 *minor = ERR_NEGOEX_INVALID_MESSAGE_SIZE; 485 major = GSS_S_DEFECTIVE_TOKEN; 486 } 487 if (major != GSS_S_COMPLETE) { 488 free(messages); 489 return major; 490 } 491 492 *messages_out = messages; 493 *count_out = count; 494 return GSS_S_COMPLETE; 495 } 496 497 static struct negoex_message * 498 locate_message(struct negoex_message *messages, size_t nmessages, 499 enum message_type type) 500 { 501 uint32_t i; 502 503 for (i = 0; i < nmessages; i++) { 504 if (messages[i].type == type) 505 return &messages[i]; 506 } 507 508 return NULL; 509 } 510 511 struct nego_message * 512 negoex_locate_nego_message(struct negoex_message *messages, size_t nmessages, 513 enum message_type type) 514 { 515 struct negoex_message *msg = locate_message(messages, nmessages, type); 516 517 return (msg == NULL) ? NULL : &msg->u.n; 518 } 519 520 struct exchange_message * 521 negoex_locate_exchange_message(struct negoex_message *messages, 522 size_t nmessages, enum message_type type) 523 { 524 struct negoex_message *msg = locate_message(messages, nmessages, type); 525 526 return (msg == NULL) ? NULL : &msg->u.e; 527 } 528 529 struct verify_message * 530 negoex_locate_verify_message(struct negoex_message *messages, 531 size_t nmessages) 532 { 533 struct negoex_message *msg = locate_message(messages, nmessages, VERIFY); 534 535 return (msg == NULL) ? NULL : &msg->u.v; 536 } 537 538 struct alert_message * 539 negoex_locate_alert_message(struct negoex_message *messages, size_t nmessages) 540 { 541 struct negoex_message *msg = locate_message(messages, nmessages, ALERT); 542 543 return (msg == NULL) ? NULL : &msg->u.a; 544 } 545 546 /* 547 * Add the encoding of a MESSAGE_HEADER structure to buf, given the number of 548 * bytes of the payload following the full header. Increment the sequence 549 * number in ctx. Set *payload_start_out to the position of the payload within 550 * the message. 551 */ 552 static void 553 put_message_header(spnego_gss_ctx_id_t ctx, enum message_type type, 554 uint32_t payload_len, uint32_t *payload_start_out) 555 { 556 size_t header_len; 557 558 if (type == INITIATOR_NEGO || type == ACCEPTOR_NEGO) 559 header_len = NEGO_MESSAGE_HEADER_LENGTH; 560 else if (type == INITIATOR_META_DATA || type == ACCEPTOR_META_DATA || 561 type == CHALLENGE || type == AP_REQUEST) 562 header_len = EXCHANGE_MESSAGE_HEADER_LENGTH; 563 else if (type == VERIFY) 564 header_len = VERIFY_MESSAGE_HEADER_LENGTH; 565 else if (type == ALERT) 566 header_len = ALERT_MESSAGE_HEADER_LENGTH; 567 else 568 abort(); 569 570 k5_buf_add_uint64_le(&ctx->negoex_transcript, MESSAGE_SIGNATURE); 571 k5_buf_add_uint32_le(&ctx->negoex_transcript, type); 572 k5_buf_add_uint32_le(&ctx->negoex_transcript, ctx->negoex_seqnum++); 573 k5_buf_add_uint32_le(&ctx->negoex_transcript, header_len); 574 k5_buf_add_uint32_le(&ctx->negoex_transcript, header_len + payload_len); 575 k5_buf_add_len(&ctx->negoex_transcript, ctx->negoex_conv_id, GUID_LENGTH); 576 577 *payload_start_out = header_len; 578 } 579 580 void 581 negoex_add_nego_message(spnego_gss_ctx_id_t ctx, enum message_type type, 582 uint8_t random[32]) 583 { 584 struct negoex_auth_mech *mech; 585 uint32_t payload_start, seqnum = ctx->negoex_seqnum; 586 uint16_t nschemes; 587 struct k5buf buf; 588 589 nschemes = 0; 590 K5_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links) 591 nschemes++; 592 593 put_message_header(ctx, type, nschemes * GUID_LENGTH, &payload_start); 594 k5_buf_add_len(&ctx->negoex_transcript, random, 32); 595 /* ProtocolVersion */ 596 k5_buf_add_uint64_le(&ctx->negoex_transcript, 0); 597 /* AuthSchemes vector */ 598 k5_buf_add_uint32_le(&ctx->negoex_transcript, payload_start); 599 k5_buf_add_uint16_le(&ctx->negoex_transcript, nschemes); 600 /* Extensions vector */ 601 k5_buf_add_uint32_le(&ctx->negoex_transcript, payload_start); 602 k5_buf_add_uint16_le(&ctx->negoex_transcript, 0); 603 /* Four bytes of padding to reach a multiple of 8 bytes. */ 604 k5_buf_add_len(&ctx->negoex_transcript, "\0\0\0\0", 4); 605 606 /* Payload (auth schemes); also build guid string for tracing. */ 607 k5_buf_init_dynamic(&buf); 608 K5_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links) { 609 k5_buf_add_len(&ctx->negoex_transcript, mech->scheme, GUID_LENGTH); 610 add_guid(&buf, mech->scheme); 611 k5_buf_add(&buf, " "); 612 } 613 614 if (buf.len > 0) { 615 k5_buf_truncate(&buf, buf.len - 1); 616 TRACE_NEGOEX_OUTGOING(ctx->kctx, seqnum, typestr(type), 617 k5_buf_cstring(&buf)); 618 k5_buf_free(&buf); 619 } 620 } 621 622 void 623 negoex_add_exchange_message(spnego_gss_ctx_id_t ctx, enum message_type type, 624 const auth_scheme scheme, gss_buffer_t token) 625 { 626 uint32_t payload_start; 627 628 put_message_header(ctx, type, token->length, &payload_start); 629 k5_buf_add_len(&ctx->negoex_transcript, scheme, GUID_LENGTH); 630 /* Exchange byte vector */ 631 k5_buf_add_uint32_le(&ctx->negoex_transcript, payload_start); 632 k5_buf_add_uint32_le(&ctx->negoex_transcript, token->length); 633 /* Payload (token) */ 634 k5_buf_add_len(&ctx->negoex_transcript, token->value, token->length); 635 636 trace_outgoing_message(ctx, type, scheme); 637 } 638 639 void 640 negoex_add_verify_message(spnego_gss_ctx_id_t ctx, const auth_scheme scheme, 641 uint32_t cksum_type, const uint8_t *cksum, 642 uint32_t cksum_len) 643 { 644 uint32_t payload_start; 645 646 put_message_header(ctx, VERIFY, cksum_len, &payload_start); 647 k5_buf_add_len(&ctx->negoex_transcript, scheme, GUID_LENGTH); 648 k5_buf_add_uint32_le(&ctx->negoex_transcript, CHECKSUM_HEADER_LENGTH); 649 k5_buf_add_uint32_le(&ctx->negoex_transcript, CHECKSUM_SCHEME_RFC3961); 650 k5_buf_add_uint32_le(&ctx->negoex_transcript, cksum_type); 651 /* ChecksumValue vector */ 652 k5_buf_add_uint32_le(&ctx->negoex_transcript, payload_start); 653 k5_buf_add_uint32_le(&ctx->negoex_transcript, cksum_len); 654 /* Four bytes of padding to reach a multiple of 8 bytes. */ 655 k5_buf_add_len(&ctx->negoex_transcript, "\0\0\0\0", 4); 656 /* Payload (checksum contents) */ 657 k5_buf_add_len(&ctx->negoex_transcript, cksum, cksum_len); 658 659 trace_outgoing_message(ctx, VERIFY, scheme); 660 } 661 662 /* Add an ALERT_MESSAGE containing a single ALERT_TYPE_PULSE alert with the 663 * reason ALERT_VERIFY_NO_KEY. */ 664 void 665 negoex_add_verify_no_key_alert(spnego_gss_ctx_id_t ctx, 666 const auth_scheme scheme) 667 { 668 uint32_t payload_start; 669 670 put_message_header(ctx, ALERT, ALERT_LENGTH + ALERT_PULSE_LENGTH, 671 &payload_start); 672 k5_buf_add_len(&ctx->negoex_transcript, scheme, GUID_LENGTH); 673 /* ErrorCode */ 674 k5_buf_add_uint32_le(&ctx->negoex_transcript, 0); 675 /* Alerts vector */ 676 k5_buf_add_uint32_le(&ctx->negoex_transcript, payload_start); 677 k5_buf_add_uint16_le(&ctx->negoex_transcript, 1); 678 /* Six bytes of padding to reach a multiple of 8 bytes. */ 679 k5_buf_add_len(&ctx->negoex_transcript, "\0\0\0\0\0\0", 6); 680 /* Payload part 1: a single ALERT element */ 681 k5_buf_add_uint32_le(&ctx->negoex_transcript, ALERT_TYPE_PULSE); 682 k5_buf_add_uint32_le(&ctx->negoex_transcript, 683 payload_start + ALERT_LENGTH); 684 k5_buf_add_uint32_le(&ctx->negoex_transcript, ALERT_PULSE_LENGTH); 685 /* Payload part 2: ALERT_PULSE */ 686 k5_buf_add_uint32_le(&ctx->negoex_transcript, ALERT_PULSE_LENGTH); 687 k5_buf_add_uint32_le(&ctx->negoex_transcript, ALERT_VERIFY_NO_KEY); 688 689 trace_outgoing_message(ctx, ALERT, scheme); 690 } 691 692 static void 693 release_auth_mech(struct negoex_auth_mech *mech) 694 { 695 OM_uint32 tmpmin; 696 697 if (mech == NULL) 698 return; 699 700 gss_delete_sec_context(&tmpmin, &mech->mech_context, NULL); 701 generic_gss_release_oid(&tmpmin, &mech->oid); 702 gss_release_buffer(&tmpmin, &mech->metadata); 703 krb5_free_keyblock_contents(NULL, &mech->key); 704 krb5_free_keyblock_contents(NULL, &mech->verify_key); 705 706 free(mech); 707 } 708 709 void 710 negoex_delete_auth_mech(spnego_gss_ctx_id_t ctx, 711 struct negoex_auth_mech *mech) 712 { 713 K5_TAILQ_REMOVE(&ctx->negoex_mechs, mech, links); 714 release_auth_mech(mech); 715 } 716 717 /* Remove all auth mech entries except for mech from ctx->mechs. */ 718 void 719 negoex_select_auth_mech(spnego_gss_ctx_id_t ctx, 720 struct negoex_auth_mech *mech) 721 { 722 assert(mech != NULL); 723 K5_TAILQ_REMOVE(&ctx->negoex_mechs, mech, links); 724 release_all_mechs(ctx); 725 K5_TAILQ_INSERT_HEAD(&ctx->negoex_mechs, mech, links); 726 } 727 728 OM_uint32 729 negoex_add_auth_mech(OM_uint32 *minor, spnego_gss_ctx_id_t ctx, 730 gss_const_OID oid, auth_scheme scheme) 731 { 732 OM_uint32 major; 733 struct negoex_auth_mech *mech; 734 735 mech = calloc(1, sizeof(*mech)); 736 if (mech == NULL) { 737 *minor = ENOMEM; 738 return GSS_S_FAILURE; 739 } 740 741 major = generic_gss_copy_oid(minor, (gss_OID)oid, &mech->oid); 742 if (major != GSS_S_COMPLETE) { 743 free(mech); 744 return major; 745 } 746 747 memcpy(mech->scheme, scheme, GUID_LENGTH); 748 749 K5_TAILQ_INSERT_TAIL(&ctx->negoex_mechs, mech, links); 750 751 *minor = 0; 752 return GSS_S_COMPLETE; 753 } 754 755 struct negoex_auth_mech * 756 negoex_locate_auth_scheme(spnego_gss_ctx_id_t ctx, const auth_scheme scheme) 757 { 758 struct negoex_auth_mech *mech; 759 760 K5_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links) { 761 if (GUID_EQ(mech->scheme, scheme)) 762 return mech; 763 } 764 765 return NULL; 766 } 767 768 /* Prune ctx->mechs to the schemes present in schemes, and reorder them to 769 * match its order. */ 770 void 771 negoex_common_auth_schemes(spnego_gss_ctx_id_t ctx, 772 const uint8_t *schemes, uint16_t nschemes) 773 { 774 struct negoex_mech_list list; 775 struct negoex_auth_mech *mech; 776 uint16_t i; 777 778 /* Construct a new list in the order of schemes. */ 779 K5_TAILQ_INIT(&list); 780 for (i = 0; i < nschemes; i++) { 781 mech = negoex_locate_auth_scheme(ctx, schemes + i * GUID_LENGTH); 782 if (mech == NULL) 783 continue; 784 K5_TAILQ_REMOVE(&ctx->negoex_mechs, mech, links); 785 K5_TAILQ_INSERT_TAIL(&list, mech, links); 786 } 787 788 /* Release any leftover entries and replace the context list. */ 789 release_all_mechs(ctx); 790 K5_TAILQ_CONCAT(&ctx->negoex_mechs, &list, links); 791 } 792 793 /* Prune ctx->mechs to the schemes present in schemes, but do not change 794 * their order. */ 795 void 796 negoex_restrict_auth_schemes(spnego_gss_ctx_id_t ctx, 797 const uint8_t *schemes, uint16_t nschemes) 798 { 799 struct negoex_auth_mech *mech, *next; 800 uint16_t i; 801 int found; 802 803 K5_TAILQ_FOREACH_SAFE(mech, &ctx->negoex_mechs, links, next) { 804 found = FALSE; 805 for (i = 0; i < nschemes && !found; i++) { 806 if (GUID_EQ(mech->scheme, schemes + i * GUID_LENGTH)) 807 found = TRUE; 808 } 809 810 if (!found) 811 negoex_delete_auth_mech(ctx, mech); 812 } 813 } 814