1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3 * Copyright 1996 by Sun Microsystems, Inc.
4 *
5 * Permission to use, copy, modify, distribute, and sell this software
6 * and its documentation for any purpose is hereby granted without fee,
7 * provided that the above copyright notice appears in all copies and
8 * that both that copyright notice and this permission notice appear in
9 * supporting documentation, and that the name of Sun Microsystems not be used
10 * in advertising or publicity pertaining to distribution of the software
11 * without specific, written prior permission. Sun Microsystems makes no
12 * representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied warranty.
14 *
15 * SUN MICROSYSTEMS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL SUN MICROSYSTEMS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
19 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
20 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
21 * PERFORMANCE OF THIS SOFTWARE.
22 */
23
24 /*
25 * glue routine for gss_wrap
26 */
27
28 #include "mglueP.h"
29
30 static OM_uint32
val_wrap_args(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,gss_buffer_t input_message_buffer,int * conf_state,gss_buffer_t output_message_buffer)31 val_wrap_args(OM_uint32 *minor_status,
32 gss_ctx_id_t context_handle,
33 int conf_req_flag,
34 gss_qop_t qop_req,
35 gss_buffer_t input_message_buffer,
36 int *conf_state,
37 gss_buffer_t output_message_buffer)
38 {
39 /* Initialize outputs. */
40
41 if (minor_status != NULL)
42 *minor_status = 0;
43
44 if (output_message_buffer != GSS_C_NO_BUFFER) {
45 output_message_buffer->length = 0;
46 output_message_buffer->value = NULL;
47 }
48
49 /* Validate arguments. */
50
51 if (minor_status == NULL)
52 return (GSS_S_CALL_INACCESSIBLE_WRITE);
53
54 if (context_handle == GSS_C_NO_CONTEXT)
55 return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT);
56
57 if (input_message_buffer == GSS_C_NO_BUFFER)
58 return (GSS_S_CALL_INACCESSIBLE_READ);
59
60 if (output_message_buffer == GSS_C_NO_BUFFER)
61 return (GSS_S_CALL_INACCESSIBLE_WRITE);
62
63 return (GSS_S_COMPLETE);
64 }
65
66 OM_uint32 KRB5_CALLCONV
gss_wrap(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,gss_buffer_t input_message_buffer,int * conf_state,gss_buffer_t output_message_buffer)67 gss_wrap( OM_uint32 *minor_status,
68 gss_ctx_id_t context_handle,
69 int conf_req_flag,
70 gss_qop_t qop_req,
71 gss_buffer_t input_message_buffer,
72 int *conf_state,
73 gss_buffer_t output_message_buffer)
74 {
75
76 /* EXPORT DELETE START */
77
78 OM_uint32 status;
79 gss_union_ctx_id_t ctx;
80 gss_mechanism mech;
81
82 status = val_wrap_args(minor_status, context_handle,
83 conf_req_flag, qop_req,
84 input_message_buffer, conf_state,
85 output_message_buffer);
86 if (status != GSS_S_COMPLETE)
87 return (status);
88
89 /*
90 * select the approprate underlying mechanism routine and
91 * call it.
92 */
93
94 ctx = (gss_union_ctx_id_t) context_handle;
95 if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
96 return (GSS_S_NO_CONTEXT);
97 mech = gssint_get_mechanism (ctx->mech_type);
98
99 if (mech) {
100 if (mech->gss_wrap) {
101 status = mech->gss_wrap(minor_status,
102 ctx->internal_ctx_id,
103 conf_req_flag,
104 qop_req,
105 input_message_buffer,
106 conf_state,
107 output_message_buffer);
108 if (status != GSS_S_COMPLETE)
109 map_error(minor_status, mech);
110 } else if (mech->gss_wrap_aead ||
111 (mech->gss_wrap_iov && mech->gss_wrap_iov_length)) {
112 status = gssint_wrap_aead(mech,
113 minor_status,
114 ctx,
115 conf_req_flag,
116 (gss_qop_t)qop_req,
117 GSS_C_NO_BUFFER,
118 input_message_buffer,
119 conf_state,
120 output_message_buffer);
121 } else
122 status = GSS_S_UNAVAILABLE;
123
124 return(status);
125 }
126 /* EXPORT DELETE END */
127
128 return (GSS_S_BAD_MECH);
129 }
130
131 OM_uint32 KRB5_CALLCONV
gss_seal(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,int qop_req,gss_buffer_t input_message_buffer,int * conf_state,gss_buffer_t output_message_buffer)132 gss_seal(OM_uint32 *minor_status,
133 gss_ctx_id_t context_handle,
134 int conf_req_flag,
135 int qop_req,
136 gss_buffer_t input_message_buffer,
137 int *conf_state,
138 gss_buffer_t output_message_buffer)
139 {
140
141 return gss_wrap(minor_status, context_handle,
142 conf_req_flag, (gss_qop_t) qop_req,
143 input_message_buffer, conf_state,
144 output_message_buffer);
145 }
146
147 /*
148 * It is only possible to implement gss_wrap_size_limit() on top
149 * of gss_wrap_iov_length() for mechanisms that do not use any
150 * padding and have fixed length headers/trailers.
151 */
152 static OM_uint32
gssint_wrap_size_limit_iov_shim(gss_mechanism mech,OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,OM_uint32 req_output_size,OM_uint32 * max_input_size)153 gssint_wrap_size_limit_iov_shim(gss_mechanism mech,
154 OM_uint32 *minor_status,
155 gss_ctx_id_t context_handle,
156 int conf_req_flag,
157 gss_qop_t qop_req,
158 OM_uint32 req_output_size,
159 OM_uint32 *max_input_size)
160 {
161 gss_iov_buffer_desc iov[4];
162 OM_uint32 status;
163 OM_uint32 ohlen;
164
165 iov[0].type = GSS_IOV_BUFFER_TYPE_HEADER;
166 iov[0].buffer.value = NULL;
167 iov[0].buffer.length = 0;
168
169 iov[1].type = GSS_IOV_BUFFER_TYPE_DATA;
170 iov[1].buffer.length = req_output_size;
171 iov[1].buffer.value = NULL;
172
173 iov[2].type = GSS_IOV_BUFFER_TYPE_PADDING;
174 iov[2].buffer.value = NULL;
175 iov[2].buffer.length = 0;
176
177 iov[3].type = GSS_IOV_BUFFER_TYPE_TRAILER;
178 iov[3].buffer.value = NULL;
179 iov[3].buffer.length = 0;
180
181 assert(mech->gss_wrap_iov_length);
182
183 status = mech->gss_wrap_iov_length(minor_status, context_handle,
184 conf_req_flag, qop_req,
185 NULL, iov,
186 sizeof(iov)/sizeof(iov[0]));
187 if (status != GSS_S_COMPLETE) {
188 map_error(minor_status, mech);
189 return status;
190 }
191
192 ohlen = iov[0].buffer.length + iov[3].buffer.length;
193
194 if (iov[2].buffer.length == 0 && ohlen < req_output_size)
195 *max_input_size = req_output_size - ohlen;
196 else
197 *max_input_size = 0;
198
199 return GSS_S_COMPLETE;
200 }
201
202 /*
203 * New for V2
204 */
205 OM_uint32 KRB5_CALLCONV
gss_wrap_size_limit(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,OM_uint32 req_output_size,OM_uint32 * max_input_size)206 gss_wrap_size_limit(OM_uint32 *minor_status,
207 gss_ctx_id_t context_handle,
208 int conf_req_flag,
209 gss_qop_t qop_req, OM_uint32 req_output_size, OM_uint32 *max_input_size)
210 {
211 gss_union_ctx_id_t ctx;
212 gss_mechanism mech;
213 OM_uint32 major_status;
214
215 if (minor_status == NULL)
216 return (GSS_S_CALL_INACCESSIBLE_WRITE);
217 *minor_status = 0;
218
219 if (context_handle == GSS_C_NO_CONTEXT)
220 return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT);
221
222 if (max_input_size == NULL)
223 return (GSS_S_CALL_INACCESSIBLE_WRITE);
224
225 /*
226 * select the approprate underlying mechanism routine and
227 * call it.
228 */
229
230 ctx = (gss_union_ctx_id_t) context_handle;
231 if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
232 return (GSS_S_NO_CONTEXT);
233 mech = gssint_get_mechanism (ctx->mech_type);
234
235 if (!mech)
236 return (GSS_S_BAD_MECH);
237
238 if (mech->gss_wrap_size_limit)
239 major_status = mech->gss_wrap_size_limit(minor_status,
240 ctx->internal_ctx_id,
241 conf_req_flag, qop_req,
242 req_output_size, max_input_size);
243 else if (mech->gss_wrap_iov_length)
244 major_status = gssint_wrap_size_limit_iov_shim(mech, minor_status,
245 ctx->internal_ctx_id,
246 conf_req_flag, qop_req,
247 req_output_size, max_input_size);
248 else
249 major_status = GSS_S_UNAVAILABLE;
250 if (major_status != GSS_S_COMPLETE)
251 map_error(minor_status, mech);
252 return major_status;
253 }
254