xref: /freebsd/crypto/krb5/src/lib/gssapi/mechglue/g_wrap_aead.c (revision 7f2fe78b9dd5f51c821d771b63d2e096f6fd49e9)
1 /* #pragma ident	"@(#)g_seal.c	1.19	98/04/21 SMI" */
2 
3 /*
4  * Copyright 1996 by Sun Microsystems, Inc.
5  *
6  * Permission to use, copy, modify, distribute, and sell this software
7  * and its documentation for any purpose is hereby granted without fee,
8  * provided that the above copyright notice appears in all copies and
9  * that both that copyright notice and this permission notice appear in
10  * supporting documentation, and that the name of Sun Microsystems not be used
11  * in advertising or publicity pertaining to distribution of the software
12  * without specific, written prior permission. Sun Microsystems makes no
13  * representations about the suitability of this software for any
14  * purpose.  It is provided "as is" without express or implied warranty.
15  *
16  * SUN MICROSYSTEMS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
17  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
18  * EVENT SHALL SUN MICROSYSTEMS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
19  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
20  * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
21  * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
22  * PERFORMANCE OF THIS SOFTWARE.
23  */
24 
25 /*
26  *  glue routine for gss_wrap_aead
27  */
28 
29 #include "mglueP.h"
30 
31 static OM_uint32
val_wrap_aead_args(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,gss_buffer_t input_assoc_buffer,gss_buffer_t input_payload_buffer,int * conf_state,gss_buffer_t output_message_buffer)32 val_wrap_aead_args(
33     OM_uint32 *minor_status,
34     gss_ctx_id_t context_handle,
35     int conf_req_flag,
36     gss_qop_t qop_req,
37     gss_buffer_t input_assoc_buffer,
38     gss_buffer_t input_payload_buffer,
39     int *conf_state,
40     gss_buffer_t output_message_buffer)
41 {
42 
43     /* Initialize outputs. */
44 
45     if (minor_status != NULL)
46 	*minor_status = 0;
47 
48     /* Validate arguments. */
49 
50     if (minor_status == NULL)
51 	return (GSS_S_CALL_INACCESSIBLE_WRITE);
52 
53     if (context_handle == GSS_C_NO_CONTEXT)
54 	return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT);
55 
56     if (input_payload_buffer == GSS_C_NO_BUFFER)
57 	return (GSS_S_CALL_INACCESSIBLE_READ);
58 
59     if (output_message_buffer == GSS_C_NO_BUFFER)
60 	return (GSS_S_CALL_INACCESSIBLE_WRITE);
61 
62     return (GSS_S_COMPLETE);
63 }
64 
65 static OM_uint32
gssint_wrap_aead_iov_shim(gss_mechanism mech,OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,gss_buffer_t input_assoc_buffer,gss_buffer_t input_payload_buffer,int * conf_state,gss_buffer_t output_message_buffer)66 gssint_wrap_aead_iov_shim(gss_mechanism mech,
67 			  OM_uint32 *minor_status,
68 			  gss_ctx_id_t context_handle,
69 			  int conf_req_flag,
70 			  gss_qop_t qop_req,
71 			  gss_buffer_t input_assoc_buffer,
72 			  gss_buffer_t input_payload_buffer,
73 			  int *conf_state,
74 			  gss_buffer_t output_message_buffer)
75 {
76     gss_iov_buffer_desc	iov[5];
77     OM_uint32		status;
78     size_t		offset;
79     int			i = 0, iov_count;
80 
81     /* HEADER | SIGN_ONLY_DATA | DATA | PADDING | TRAILER */
82 
83     iov[i].type = GSS_IOV_BUFFER_TYPE_HEADER;
84     iov[i].buffer.value = NULL;
85     iov[i].buffer.length = 0;
86     i++;
87 
88     if (input_assoc_buffer != GSS_C_NO_BUFFER) {
89 	iov[i].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY;
90 	iov[i].buffer = *input_assoc_buffer;
91 	i++;
92     }
93 
94     iov[i].type = GSS_IOV_BUFFER_TYPE_DATA;
95     iov[i].buffer = *input_payload_buffer;
96     i++;
97 
98     iov[i].type = GSS_IOV_BUFFER_TYPE_PADDING;
99     iov[i].buffer.value = NULL;
100     iov[i].buffer.length = 0;
101     i++;
102 
103     iov[i].type = GSS_IOV_BUFFER_TYPE_TRAILER;
104     iov[i].buffer.value = NULL;
105     iov[i].buffer.length = 0;
106     i++;
107 
108     iov_count = i;
109 
110     assert(mech->gss_wrap_iov_length);
111 
112     status = mech->gss_wrap_iov_length(minor_status, context_handle,
113 				       conf_req_flag, qop_req,
114 				       NULL, iov, iov_count);
115     if (status != GSS_S_COMPLETE) {
116 	map_error(minor_status, mech);
117 	return status;
118     }
119 
120     /* Format output token (does not include associated data) */
121     for (i = 0, output_message_buffer->length = 0; i < iov_count; i++) {
122 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
123 	    continue;
124 
125 	output_message_buffer->length += iov[i].buffer.length;
126     }
127 
128     output_message_buffer->value = gssalloc_malloc(output_message_buffer->length);
129     if (output_message_buffer->value == NULL) {
130 	*minor_status = ENOMEM;
131 	return GSS_S_FAILURE;
132     }
133 
134     i = 0, offset = 0;
135 
136     /* HEADER */
137     iov[i].buffer.value = (unsigned char *)output_message_buffer->value + offset;
138     offset += iov[i].buffer.length;
139     i++;
140 
141     /* SIGN_ONLY_DATA */
142     if (input_assoc_buffer != GSS_C_NO_BUFFER)
143 	i++;
144 
145     /* DATA */
146     iov[i].buffer.value = (unsigned char *)output_message_buffer->value + offset;
147     offset += iov[i].buffer.length;
148 
149     memcpy(iov[i].buffer.value, input_payload_buffer->value, iov[i].buffer.length);
150     i++;
151 
152     /* PADDING */
153     iov[i].buffer.value = (unsigned char *)output_message_buffer->value + offset;
154     offset += iov[i].buffer.length;
155     i++;
156 
157     /* TRAILER */
158     iov[i].buffer.value = (unsigned char *)output_message_buffer->value + offset;
159     offset += iov[i].buffer.length;
160     i++;
161 
162     assert(offset == output_message_buffer->length);
163 
164     assert(mech->gss_wrap_iov);
165 
166     status = mech->gss_wrap_iov(minor_status, context_handle,
167 				conf_req_flag, qop_req,
168 				conf_state, iov, iov_count);
169     if (status != GSS_S_COMPLETE) {
170 	OM_uint32 minor;
171 
172 	map_error(minor_status, mech);
173 	gss_release_buffer(&minor, output_message_buffer);
174     }
175 
176     return status;
177 }
178 
179 OM_uint32
gssint_wrap_aead(gss_mechanism mech,OM_uint32 * minor_status,gss_union_ctx_id_t ctx,int conf_req_flag,gss_qop_t qop_req,gss_buffer_t input_assoc_buffer,gss_buffer_t input_payload_buffer,int * conf_state,gss_buffer_t output_message_buffer)180 gssint_wrap_aead (gss_mechanism mech,
181 		  OM_uint32 *minor_status,
182 		  gss_union_ctx_id_t ctx,
183 		  int conf_req_flag,
184 		  gss_qop_t qop_req,
185 		  gss_buffer_t input_assoc_buffer,
186 		  gss_buffer_t input_payload_buffer,
187 		  int *conf_state,
188 		  gss_buffer_t output_message_buffer)
189 {
190  /* EXPORT DELETE START */
191     OM_uint32		status;
192 
193     assert(ctx != NULL);
194     assert(mech != NULL);
195 
196     if (mech->gss_wrap_aead) {
197 	status = mech->gss_wrap_aead(minor_status,
198 				     ctx->internal_ctx_id,
199 				     conf_req_flag,
200 				     qop_req,
201 				     input_assoc_buffer,
202 				     input_payload_buffer,
203 				     conf_state,
204 				     output_message_buffer);
205 	if (status != GSS_S_COMPLETE)
206 	    map_error(minor_status, mech);
207     } else if (mech->gss_wrap_iov && mech->gss_wrap_iov_length) {
208 	status = gssint_wrap_aead_iov_shim(mech,
209 					   minor_status,
210 					   ctx->internal_ctx_id,
211 					   conf_req_flag,
212 					   qop_req,
213 					   input_assoc_buffer,
214 					   input_payload_buffer,
215 					   conf_state,
216 					   output_message_buffer);
217     } else
218 	status = GSS_S_UNAVAILABLE;
219 
220  /* EXPORT DELETE END */
221 
222     return status;
223 }
224 
225 OM_uint32 KRB5_CALLCONV
gss_wrap_aead(minor_status,context_handle,conf_req_flag,qop_req,input_assoc_buffer,input_payload_buffer,conf_state,output_message_buffer)226 gss_wrap_aead (minor_status,
227                context_handle,
228                conf_req_flag,
229                qop_req,
230 	       input_assoc_buffer,
231 	       input_payload_buffer,
232                conf_state,
233                output_message_buffer)
234 OM_uint32 *		minor_status;
235 gss_ctx_id_t		context_handle;
236 int			conf_req_flag;
237 gss_qop_t		qop_req;
238 gss_buffer_t		input_assoc_buffer;
239 gss_buffer_t		input_payload_buffer;
240 int *			conf_state;
241 gss_buffer_t		output_message_buffer;
242 {
243     OM_uint32		status;
244     gss_mechanism	mech;
245     gss_union_ctx_id_t	ctx;
246 
247     status = val_wrap_aead_args(minor_status, context_handle,
248 				conf_req_flag, qop_req,
249 				input_assoc_buffer, input_payload_buffer,
250 				conf_state, output_message_buffer);
251     if (status != GSS_S_COMPLETE)
252 	return (status);
253 
254     /*
255      * select the approprate underlying mechanism routine and
256      * call it.
257      */
258     ctx = (gss_union_ctx_id_t)context_handle;
259     if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
260 	return (GSS_S_NO_CONTEXT);
261     mech = gssint_get_mechanism (ctx->mech_type);
262     if (!mech)
263 	return (GSS_S_BAD_MECH);
264 
265     return gssint_wrap_aead(mech, minor_status, ctx,
266 			    conf_req_flag, qop_req,
267 			    input_assoc_buffer, input_payload_buffer,
268 			    conf_state, output_message_buffer);
269 }
270