1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /* lib/krb5/ccache/cc_kcm.c - KCM cache type (client side) */
3 /*
4 * Copyright (C) 2014 by the Massachusetts Institute of Technology.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
24 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
30 * OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * This cache type contacts a daemon for each cache operation, using Heimdal's
35 * KCM protocol. On macOS, the preferred transport is Mach RPC; on other
36 * Unix-like platforms or if the daemon is not available via RPC, Unix domain
37 * sockets are used instead.
38 */
39
40 #ifndef _WIN32
41 #include "k5-int.h"
42 #include "k5-input.h"
43 #include "cc-int.h"
44 #include "kcm.h"
45 #include "../os/os-proto.h"
46 #include <sys/socket.h>
47 #include <sys/un.h>
48 #ifdef __APPLE__
49 #include <mach/mach.h>
50 #include <servers/bootstrap.h>
51 #include "kcmrpc.h"
52 #endif
53
54 #define MAX_REPLY_SIZE (10 * 1024 * 1024)
55
56 const krb5_cc_ops krb5_kcm_ops;
57
58 struct uuid_list {
59 unsigned char *uuidbytes; /* all of the uuids concatenated together */
60 size_t count;
61 size_t pos;
62 };
63
64 struct cred_list {
65 krb5_creds *creds;
66 size_t count;
67 size_t pos;
68 };
69
70 struct kcm_cursor {
71 struct uuid_list *uuids;
72 struct cred_list *creds;
73 };
74
75 struct kcmio {
76 SOCKET fd;
77 #ifdef __APPLE__
78 mach_port_t mport;
79 #endif
80 };
81
82 /* This structure bundles together a KCM request and reply, to minimize how
83 * much we have to declare and clean up in each method. */
84 struct kcmreq {
85 struct k5buf reqbuf;
86 struct k5input reply;
87 void *reply_mem;
88 };
89 #define EMPTY_KCMREQ { EMPTY_K5BUF }
90
91 struct kcm_cache_data {
92 char *residual; /* immutable; may be accessed without lock */
93 k5_cc_mutex lock; /* protects io */
94 struct kcmio *io;
95 };
96
97 struct kcm_ptcursor {
98 char *residual; /* primary or singleton subsidiary */
99 struct uuid_list *uuids; /* NULL for singleton subsidiary */
100 struct kcmio *io;
101 krb5_boolean first;
102 };
103
104 /* Map EINVAL or KRB5_CC_FORMAT to KRB5_KCM_MALFORMED_REPLY; pass through all
105 * other codes. */
106 static inline krb5_error_code
map_invalid(krb5_error_code code)107 map_invalid(krb5_error_code code)
108 {
109 return (code == EINVAL || code == KRB5_CC_FORMAT) ?
110 KRB5_KCM_MALFORMED_REPLY : code;
111 }
112
113 /*
114 * Map an MIT krb5 KRB5_TC flag word to the equivalent Heimdal flag word. Note
115 * that there is no MIT krb5 equivalent for Heimdal's KRB5_TC_DONT_MATCH_REALM
116 * (which is like KRB5_TC_MATCH_SRV_NAMEONLY but also applies to the client
117 * principal) and no Heimdal equivalent for MIT krb5's KRB5_TC_SUPPORTED_KTYPES
118 * (which matches against enctypes from the krb5_context rather than the
119 * matching cred).
120 */
121 static inline krb5_flags
map_tcflags(krb5_flags mitflags)122 map_tcflags(krb5_flags mitflags)
123 {
124 krb5_flags heimflags = 0;
125
126 if (mitflags & KRB5_TC_MATCH_TIMES)
127 heimflags |= KCM_TC_MATCH_TIMES;
128 if (mitflags & KRB5_TC_MATCH_IS_SKEY)
129 heimflags |= KCM_TC_MATCH_IS_SKEY;
130 if (mitflags & KRB5_TC_MATCH_FLAGS)
131 heimflags |= KCM_TC_MATCH_FLAGS;
132 if (mitflags & KRB5_TC_MATCH_TIMES_EXACT)
133 heimflags |= KCM_TC_MATCH_TIMES_EXACT;
134 if (mitflags & KRB5_TC_MATCH_FLAGS_EXACT)
135 heimflags |= KCM_TC_MATCH_FLAGS_EXACT;
136 if (mitflags & KRB5_TC_MATCH_AUTHDATA)
137 heimflags |= KCM_TC_MATCH_AUTHDATA;
138 if (mitflags & KRB5_TC_MATCH_SRV_NAMEONLY)
139 heimflags |= KCM_TC_MATCH_SRV_NAMEONLY;
140 if (mitflags & KRB5_TC_MATCH_2ND_TKT)
141 heimflags |= KCM_TC_MATCH_2ND_TKT;
142 if (mitflags & KRB5_TC_MATCH_KTYPE)
143 heimflags |= KCM_TC_MATCH_KEYTYPE;
144 return heimflags;
145 }
146
147 /*
148 * Return true if code could indicate an unsupported operation. Heimdal's KCM
149 * returns KRB5_FCC_INTERNAL. sssd's KCM daemon (as of sssd 2.4) returns
150 * KRB5_CC_NO_SUPP if it recognizes the operation but does not implement it,
151 * and KRB5_CC_IO if it doesn't recognize the operation (which is unfortunate
152 * since it could also indicate a communication failure).
153 */
154 static krb5_boolean
unsupported_op_error(krb5_error_code code)155 unsupported_op_error(krb5_error_code code)
156 {
157 return code == KRB5_FCC_INTERNAL || code == KRB5_CC_IO ||
158 code == KRB5_CC_NOSUPP;
159 }
160
161 /* Begin a request for the given opcode. If cache is non-null, supply the
162 * cache name as a request parameter. */
163 static void
kcmreq_init(struct kcmreq * req,kcm_opcode opcode,krb5_ccache cache)164 kcmreq_init(struct kcmreq *req, kcm_opcode opcode, krb5_ccache cache)
165 {
166 unsigned char bytes[4];
167 const char *name;
168
169 memset(req, 0, sizeof(*req));
170
171 bytes[0] = KCM_PROTOCOL_VERSION_MAJOR;
172 bytes[1] = KCM_PROTOCOL_VERSION_MINOR;
173 store_16_be(opcode, bytes + 2);
174
175 k5_buf_init_dynamic(&req->reqbuf);
176 k5_buf_add_len(&req->reqbuf, bytes, 4);
177 if (cache != NULL) {
178 name = ((struct kcm_cache_data *)cache->data)->residual;
179 k5_buf_add_len(&req->reqbuf, name, strlen(name) + 1);
180 }
181 }
182
183 #ifdef __APPLE__
184
185 /* The maximum length of an in-band request or reply as defined by the RPC
186 * protocol. */
187 #define MAX_INBAND_SIZE 2048
188
189 /* Connect or reconnect to the KCM daemon via Mach RPC, if possible. */
190 static krb5_error_code
kcmio_mach_connect(krb5_context context,struct kcmio * io)191 kcmio_mach_connect(krb5_context context, struct kcmio *io)
192 {
193 krb5_error_code ret;
194 kern_return_t st;
195 mach_port_t mport;
196 char *service;
197
198 ret = profile_get_string(context->profile, KRB5_CONF_LIBDEFAULTS,
199 KRB5_CONF_KCM_MACH_SERVICE, NULL,
200 DEFAULT_KCM_MACH_SERVICE, &service);
201 if (ret)
202 return ret;
203 if (strcmp(service, "-") == 0) {
204 profile_release_string(service);
205 return KRB5_KCM_NO_SERVER;
206 }
207
208 st = bootstrap_look_up(bootstrap_port, service, &mport);
209 profile_release_string(service);
210 if (st)
211 return KRB5_KCM_NO_SERVER;
212 if (io->mport != MACH_PORT_NULL)
213 mach_port_deallocate(mach_task_self(), io->mport);
214 io->mport = mport;
215 return 0;
216 }
217
218 /* Invoke the Mach RPC to get a reply from the KCM daemon. */
219 static krb5_error_code
kcmio_mach_call(krb5_context context,struct kcmio * io,void * data,size_t len,void ** reply_out,size_t * len_out)220 kcmio_mach_call(krb5_context context, struct kcmio *io, void *data,
221 size_t len, void **reply_out, size_t *len_out)
222 {
223 krb5_error_code ret;
224 size_t inband_req_len = 0, outband_req_len = 0, reply_len;
225 char *inband_req = NULL, *outband_req = NULL, *outband_reply, *copy;
226 char inband_reply[MAX_INBAND_SIZE];
227 mach_msg_type_number_t inband_reply_len, outband_reply_len;
228 const void *reply;
229 kern_return_t st;
230 int code;
231
232 *reply_out = NULL;
233 *len_out = 0;
234
235 /* Use the in-band or out-of-band request buffer depending on len. */
236 if (len <= MAX_INBAND_SIZE) {
237 inband_req = data;
238 inband_req_len = len;
239 } else {
240 outband_req = data;
241 outband_req_len = len;
242 }
243
244 st = k5_kcmrpc_call(io->mport, inband_req, inband_req_len, outband_req,
245 outband_req_len, &code, inband_reply,
246 &inband_reply_len, &outband_reply, &outband_reply_len);
247 if (st == MACH_SEND_INVALID_DEST) {
248 /* Get a new port and try again. */
249 st = kcmio_mach_connect(context, io);
250 if (st)
251 return KRB5_KCM_RPC_ERROR;
252 st = k5_kcmrpc_call(io->mport, inband_req, inband_req_len, outband_req,
253 outband_req_len, &code, inband_reply,
254 &inband_reply_len, &outband_reply,
255 &outband_reply_len);
256 }
257 if (st)
258 return KRB5_KCM_RPC_ERROR;
259
260 if (code) {
261 ret = code;
262 goto cleanup;
263 }
264
265 /* The reply could be in the in-band or out-of-band reply buffer. */
266 reply = outband_reply_len ? outband_reply : inband_reply;
267 reply_len = outband_reply_len ? outband_reply_len : inband_reply_len;
268 copy = k5memdup(reply, reply_len, &ret);
269 if (copy == NULL)
270 goto cleanup;
271
272 *reply_out = copy;
273 *len_out = reply_len;
274
275 cleanup:
276 if (outband_reply_len) {
277 vm_deallocate(mach_task_self(), (vm_address_t)outband_reply,
278 outband_reply_len);
279 }
280 return ret;
281 }
282
283 /* Release any Mach RPC state within io. */
284 static void
kcmio_mach_close(struct kcmio * io)285 kcmio_mach_close(struct kcmio *io)
286 {
287 if (io->mport != MACH_PORT_NULL)
288 mach_port_deallocate(mach_task_self(), io->mport);
289 }
290
291 #else /* __APPLE__ */
292
293 #define kcmio_mach_connect(context, io) EINVAL
294 #define kcmio_mach_call(context, io, data, len, reply_out, len_out) EINVAL
295 #define kcmio_mach_close(io)
296
297 #endif
298
299 /* Connect to the KCM daemon via a Unix domain socket. */
300 static krb5_error_code
kcmio_unix_socket_connect(krb5_context context,struct kcmio * io)301 kcmio_unix_socket_connect(krb5_context context, struct kcmio *io)
302 {
303 krb5_error_code ret;
304 SOCKET fd = INVALID_SOCKET;
305 struct sockaddr_un addr;
306 char *path = NULL;
307
308 ret = profile_get_string(context->profile, KRB5_CONF_LIBDEFAULTS,
309 KRB5_CONF_KCM_SOCKET, NULL,
310 DEFAULT_KCM_SOCKET_PATH, &path);
311 if (ret)
312 goto cleanup;
313 if (strcmp(path, "-") == 0) {
314 ret = KRB5_KCM_NO_SERVER;
315 goto cleanup;
316 }
317
318 fd = socket(AF_UNIX, SOCK_STREAM, 0);
319 if (fd == INVALID_SOCKET) {
320 ret = SOCKET_ERRNO;
321 goto cleanup;
322 }
323
324 memset(&addr, 0, sizeof(addr));
325 addr.sun_family = AF_UNIX;
326 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
327 if (SOCKET_CONNECT(fd, (struct sockaddr *)&addr, sizeof(addr)) != 0) {
328 ret = (SOCKET_ERRNO == ENOENT) ? KRB5_KCM_NO_SERVER : SOCKET_ERRNO;
329 goto cleanup;
330 }
331
332 io->fd = fd;
333 fd = INVALID_SOCKET;
334
335 cleanup:
336 if (fd != INVALID_SOCKET)
337 closesocket(fd);
338 profile_release_string(path);
339 return ret;
340 }
341
342 /* Write a KCM request: 4-byte big-endian length, then the marshalled
343 * request. */
344 static krb5_error_code
kcmio_unix_socket_write(krb5_context context,struct kcmio * io,void * request,size_t len)345 kcmio_unix_socket_write(krb5_context context, struct kcmio *io, void *request,
346 size_t len)
347 {
348 char lenbytes[4];
349 sg_buf sg[2];
350 int ret;
351 krb5_boolean reconnected = FALSE;
352
353 SG_SET(&sg[0], lenbytes, sizeof(lenbytes));
354 SG_SET(&sg[1], request, len);
355 store_32_be(len, lenbytes);
356
357 for (;;) {
358 ret = krb5int_net_writev(context, io->fd, sg, 2);
359 if (ret >= 0)
360 return 0;
361 ret = errno;
362 if (ret != EPIPE || reconnected)
363 return ret;
364
365 /*
366 * Try once to reconnect on an EPIPE, in case the server has an idle
367 * timeout (like sssd does) and we went too long between ccache
368 * operations. Reconnecting might also help if the server was
369 * restarted for an upgrade--although the server must be designed to
370 * always listen for connections on the socket during upgrades, or a
371 * single reconnect attempt won't be robust.
372 */
373 close(io->fd);
374 ret = kcmio_unix_socket_connect(context, io);
375 if (ret)
376 return ret;
377 reconnected = TRUE;
378 }
379 }
380
381 /* Read a KCM reply: 4-byte big-endian length, 4-byte big-endian status code,
382 * then the marshalled reply. */
383 static krb5_error_code
kcmio_unix_socket_read(krb5_context context,struct kcmio * io,void ** reply_out,size_t * len_out)384 kcmio_unix_socket_read(krb5_context context, struct kcmio *io,
385 void **reply_out, size_t *len_out)
386 {
387 krb5_error_code code;
388 char lenbytes[4], codebytes[4], *reply;
389 size_t len;
390 int st;
391
392 *reply_out = NULL;
393 *len_out = 0;
394
395 st = krb5_net_read(context, io->fd, lenbytes, 4);
396 if (st != 4)
397 return (st == -1) ? errno : KRB5_CC_IO;
398 len = load_32_be(lenbytes);
399 if (len > MAX_REPLY_SIZE)
400 return KRB5_KCM_REPLY_TOO_BIG;
401
402 st = krb5_net_read(context, io->fd, codebytes, 4);
403 if (st != 4)
404 return (st == -1) ? errno : KRB5_CC_IO;
405 code = load_32_be(codebytes);
406 if (code != 0)
407 return code;
408
409 reply = malloc(len);
410 if (reply == NULL)
411 return ENOMEM;
412 st = krb5_net_read(context, io->fd, reply, len);
413 if (st == -1 || (size_t)st != len) {
414 free(reply);
415 return (st < 0) ? errno : KRB5_CC_IO;
416 }
417
418 *reply_out = reply;
419 *len_out = len;
420 return 0;
421 }
422
423 static krb5_error_code
kcmio_connect(krb5_context context,struct kcmio ** io_out)424 kcmio_connect(krb5_context context, struct kcmio **io_out)
425 {
426 krb5_error_code ret;
427 struct kcmio *io;
428
429 *io_out = NULL;
430 io = calloc(1, sizeof(*io));
431 if (io == NULL)
432 return ENOMEM;
433 io->fd = INVALID_SOCKET;
434
435 /* Try Mach RPC (macOS only), then fall back to Unix domain sockets */
436 ret = kcmio_mach_connect(context, io);
437 if (ret)
438 ret = kcmio_unix_socket_connect(context, io);
439 if (ret) {
440 free(io);
441 return ret;
442 }
443
444 *io_out = io;
445 return 0;
446 }
447
448 /* Check req->reqbuf for an error condition and return it. Otherwise, send the
449 * request to the KCM daemon and get a response. */
450 static krb5_error_code
kcmio_call(krb5_context context,struct kcmio * io,struct kcmreq * req)451 kcmio_call(krb5_context context, struct kcmio *io, struct kcmreq *req)
452 {
453 krb5_error_code ret;
454 size_t reply_len = 0;
455
456 if (k5_buf_status(&req->reqbuf) != 0)
457 return ENOMEM;
458
459 if (io->fd != INVALID_SOCKET) {
460 ret = kcmio_unix_socket_write(context, io, req->reqbuf.data,
461 req->reqbuf.len);
462 if (ret)
463 return ret;
464 ret = kcmio_unix_socket_read(context, io, &req->reply_mem, &reply_len);
465 if (ret)
466 return ret;
467 } else {
468 /* We must be using Mach RPC. */
469 ret = kcmio_mach_call(context, io, req->reqbuf.data, req->reqbuf.len,
470 &req->reply_mem, &reply_len);
471 if (ret)
472 return ret;
473 }
474
475 /* Read the status code from the marshalled reply. */
476 k5_input_init(&req->reply, req->reply_mem, reply_len);
477 ret = k5_input_get_uint32_be(&req->reply);
478 return req->reply.status ? KRB5_KCM_MALFORMED_REPLY : ret;
479 }
480
481 static void
kcmio_close(struct kcmio * io)482 kcmio_close(struct kcmio *io)
483 {
484 if (io != NULL) {
485 kcmio_mach_close(io);
486 if (io->fd != INVALID_SOCKET)
487 closesocket(io->fd);
488 free(io);
489 }
490 }
491
492 /* Fetch a zero-terminated name string from req->reply. The returned pointer
493 * is an alias and must not be freed by the caller. */
494 static krb5_error_code
kcmreq_get_name(struct kcmreq * req,const char ** name_out)495 kcmreq_get_name(struct kcmreq *req, const char **name_out)
496 {
497 const unsigned char *end;
498 struct k5input *in = &req->reply;
499
500 *name_out = NULL;
501 end = memchr(in->ptr, '\0', in->len);
502 if (end == NULL)
503 return KRB5_KCM_MALFORMED_REPLY;
504 *name_out = (const char *)in->ptr;
505 (void)k5_input_get_bytes(in, end + 1 - in->ptr);
506 return 0;
507 }
508
509 /* Fetch a UUID list from req->reply. UUID lists are not delimited, so we
510 * consume the rest of the input. */
511 static krb5_error_code
kcmreq_get_uuid_list(struct kcmreq * req,struct uuid_list ** uuids_out)512 kcmreq_get_uuid_list(struct kcmreq *req, struct uuid_list **uuids_out)
513 {
514 struct uuid_list *uuids;
515
516 *uuids_out = NULL;
517
518 if (req->reply.len % KCM_UUID_LEN != 0)
519 return KRB5_KCM_MALFORMED_REPLY;
520
521 uuids = malloc(sizeof(*uuids));
522 if (uuids == NULL)
523 return ENOMEM;
524 uuids->count = req->reply.len / KCM_UUID_LEN;
525 uuids->pos = 0;
526
527 if (req->reply.len > 0) {
528 uuids->uuidbytes = malloc(req->reply.len);
529 if (uuids->uuidbytes == NULL) {
530 free(uuids);
531 return ENOMEM;
532 }
533 memcpy(uuids->uuidbytes, req->reply.ptr, req->reply.len);
534 (void)k5_input_get_bytes(&req->reply, req->reply.len);
535 } else {
536 uuids->uuidbytes = NULL;
537 }
538
539 *uuids_out = uuids;
540 return 0;
541 }
542
543 static void
free_uuid_list(struct uuid_list * uuids)544 free_uuid_list(struct uuid_list *uuids)
545 {
546 if (uuids != NULL)
547 free(uuids->uuidbytes);
548 free(uuids);
549 }
550
551 static void
free_cred_list(struct cred_list * list)552 free_cred_list(struct cred_list *list)
553 {
554 size_t i;
555
556 if (list == NULL)
557 return;
558
559 /* Creds are transferred to the caller as list->pos is incremented, so we
560 * can start freeing there. */
561 for (i = list->pos; i < list->count; i++)
562 krb5_free_cred_contents(NULL, &list->creds[i]);
563 free(list->creds);
564 free(list);
565 }
566
567 /* Fetch a cred list from req->reply. */
568 static krb5_error_code
kcmreq_get_cred_list(struct kcmreq * req,struct cred_list ** creds_out)569 kcmreq_get_cred_list(struct kcmreq *req, struct cred_list **creds_out)
570 {
571 struct cred_list *list;
572 const unsigned char *data;
573 krb5_error_code ret = 0;
574 size_t count, len, i;
575
576 *creds_out = NULL;
577
578 /* Check a rough bound on the count to prevent very large allocations. */
579 count = k5_input_get_uint32_be(&req->reply);
580 if (count > req->reply.len / 4)
581 return KRB5_KCM_MALFORMED_REPLY;
582
583 list = malloc(sizeof(*list));
584 if (list == NULL)
585 return ENOMEM;
586
587 list->creds = NULL;
588 list->count = count;
589 list->pos = 0;
590 list->creds = k5calloc(count, sizeof(*list->creds), &ret);
591 if (list->creds == NULL) {
592 free(list);
593 return ret;
594 }
595
596 for (i = 0; i < count; i++) {
597 len = k5_input_get_uint32_be(&req->reply);
598 data = k5_input_get_bytes(&req->reply, len);
599 if (data == NULL)
600 break;
601 ret = k5_unmarshal_cred(data, len, 4, &list->creds[i]);
602 if (ret)
603 break;
604 }
605 if (i < count) {
606 free_cred_list(list);
607 return (ret == ENOMEM) ? ENOMEM : KRB5_KCM_MALFORMED_REPLY;
608 }
609
610 *creds_out = list;
611 return 0;
612 }
613
614 static void
kcmreq_free(struct kcmreq * req)615 kcmreq_free(struct kcmreq *req)
616 {
617 k5_buf_free(&req->reqbuf);
618 free(req->reply_mem);
619 }
620
621 /* Create a krb5_ccache structure. If io is NULL, make a new connection for
622 * the cache. Otherwise, always take ownership of io. */
623 static krb5_error_code
make_cache(krb5_context context,const char * residual,struct kcmio * io,krb5_ccache * cache_out)624 make_cache(krb5_context context, const char *residual, struct kcmio *io,
625 krb5_ccache *cache_out)
626 {
627 krb5_error_code ret;
628 krb5_ccache cache = NULL;
629 struct kcm_cache_data *data = NULL;
630 char *residual_copy = NULL;
631
632 *cache_out = NULL;
633
634 if (io == NULL) {
635 ret = kcmio_connect(context, &io);
636 if (ret)
637 return ret;
638 }
639
640 cache = malloc(sizeof(*cache));
641 if (cache == NULL)
642 goto oom;
643 data = calloc(1, sizeof(*data));
644 if (data == NULL)
645 goto oom;
646 residual_copy = strdup(residual);
647 if (residual_copy == NULL)
648 goto oom;
649 if (k5_cc_mutex_init(&data->lock) != 0)
650 goto oom;
651
652 data->residual = residual_copy;
653 data->io = io;
654 cache->ops = &krb5_kcm_ops;
655 cache->data = data;
656 cache->magic = KV5M_CCACHE;
657 *cache_out = cache;
658 return 0;
659
660 oom:
661 free(cache);
662 free(data);
663 free(residual_copy);
664 kcmio_close(io);
665 return ENOMEM;
666 }
667
668 /* Lock cache's I/O structure and use it to call the KCM daemon. */
669 static krb5_error_code
cache_call(krb5_context context,krb5_ccache cache,struct kcmreq * req)670 cache_call(krb5_context context, krb5_ccache cache, struct kcmreq *req)
671 {
672 krb5_error_code ret;
673 struct kcm_cache_data *data = cache->data;
674
675 k5_cc_mutex_lock(context, &data->lock);
676 ret = kcmio_call(context, data->io, req);
677 k5_cc_mutex_unlock(context, &data->lock);
678 return ret;
679 }
680
681 /* Try to propagate the KDC time offset from the cache to the krb5 context. */
682 static void
get_kdc_offset(krb5_context context,krb5_ccache cache)683 get_kdc_offset(krb5_context context, krb5_ccache cache)
684 {
685 struct kcmreq req = EMPTY_KCMREQ;
686 int32_t time_offset;
687
688 kcmreq_init(&req, KCM_OP_GET_KDC_OFFSET, cache);
689 if (cache_call(context, cache, &req) != 0)
690 goto cleanup;
691 time_offset = k5_input_get_uint32_be(&req.reply);
692 if (req.reply.status)
693 goto cleanup;
694 context->os_context.time_offset = time_offset;
695 context->os_context.usec_offset = 0;
696 context->os_context.os_flags &= ~KRB5_OS_TOFFSET_TIME;
697 context->os_context.os_flags |= KRB5_OS_TOFFSET_VALID;
698
699 cleanup:
700 kcmreq_free(&req);
701 }
702
703 /* Try to propagate the KDC offset from the krb5 context to the cache. */
704 static void
set_kdc_offset(krb5_context context,krb5_ccache cache)705 set_kdc_offset(krb5_context context, krb5_ccache cache)
706 {
707 struct kcmreq req;
708
709 if (context->os_context.os_flags & KRB5_OS_TOFFSET_VALID) {
710 kcmreq_init(&req, KCM_OP_SET_KDC_OFFSET, cache);
711 k5_buf_add_uint32_be(&req.reqbuf, context->os_context.time_offset);
712 (void)cache_call(context, cache, &req);
713 kcmreq_free(&req);
714 }
715 }
716
717 static const char * KRB5_CALLCONV
kcm_get_name(krb5_context context,krb5_ccache cache)718 kcm_get_name(krb5_context context, krb5_ccache cache)
719 {
720 return ((struct kcm_cache_data *)cache->data)->residual;
721 }
722
723 /* Fetch the primary name within the collection. The result is only valid for
724 * the lifetime of req and should not be freed. */
725 static krb5_error_code
get_primary_name(krb5_context context,struct kcmreq * req,struct kcmio * io,const char ** name_out)726 get_primary_name(krb5_context context, struct kcmreq *req, struct kcmio *io,
727 const char **name_out)
728 {
729 krb5_error_code ret;
730
731 *name_out = NULL;
732
733 kcmreq_init(req, KCM_OP_GET_DEFAULT_CACHE, NULL);
734 ret = kcmio_call(context, io, req);
735 if (ret)
736 return ret;
737 return kcmreq_get_name(req, name_out);
738 }
739
740 static krb5_error_code KRB5_CALLCONV
kcm_resolve(krb5_context context,krb5_ccache * cache_out,const char * residual)741 kcm_resolve(krb5_context context, krb5_ccache *cache_out, const char *residual)
742 {
743 krb5_error_code ret;
744 struct kcmreq req = EMPTY_KCMREQ;
745 struct kcmio *io = NULL;
746 const char *defname = NULL;
747
748 *cache_out = NULL;
749
750 ret = kcmio_connect(context, &io);
751 if (ret)
752 goto cleanup;
753
754 if (*residual == '\0') {
755 ret = get_primary_name(context, &req, io, &defname);
756 if (ret)
757 goto cleanup;
758 residual = defname;
759 }
760
761 ret = make_cache(context, residual, io, cache_out);
762 io = NULL;
763
764 cleanup:
765 kcmio_close(io);
766 kcmreq_free(&req);
767 return ret;
768 }
769
770 krb5_error_code
k5_kcm_primary_name(krb5_context context,char ** name_out)771 k5_kcm_primary_name(krb5_context context, char **name_out)
772 {
773 krb5_error_code ret;
774 struct kcmreq req = EMPTY_KCMREQ;
775 struct kcmio *io = NULL;
776 const char *name;
777
778 *name_out = NULL;
779
780 ret = kcmio_connect(context, &io);
781 if (ret)
782 goto cleanup;
783 ret = get_primary_name(context, &req, io, &name);
784 if (ret)
785 goto cleanup;
786 *name_out = strdup(name);
787 ret = (*name_out == NULL) ? ENOMEM : 0;
788
789 cleanup:
790 kcmio_close(io);
791 kcmreq_free(&req);
792 return ret;
793 }
794
795 static krb5_error_code KRB5_CALLCONV
kcm_gen_new(krb5_context context,krb5_ccache * cache_out)796 kcm_gen_new(krb5_context context, krb5_ccache *cache_out)
797 {
798 krb5_error_code ret;
799 struct kcmreq req = EMPTY_KCMREQ;
800 struct kcmio *io = NULL;
801 const char *name;
802
803 *cache_out = NULL;
804
805 ret = kcmio_connect(context, &io);
806 if (ret)
807 goto cleanup;
808 kcmreq_init(&req, KCM_OP_GEN_NEW, NULL);
809 ret = kcmio_call(context, io, &req);
810 if (ret)
811 goto cleanup;
812 ret = kcmreq_get_name(&req, &name);
813 if (ret)
814 goto cleanup;
815 ret = make_cache(context, name, io, cache_out);
816 io = NULL;
817
818 cleanup:
819 kcmreq_free(&req);
820 kcmio_close(io);
821 return ret;
822 }
823
824 static krb5_error_code KRB5_CALLCONV
kcm_initialize(krb5_context context,krb5_ccache cache,krb5_principal princ)825 kcm_initialize(krb5_context context, krb5_ccache cache, krb5_principal princ)
826 {
827 krb5_error_code ret;
828 struct kcmreq req;
829
830 kcmreq_init(&req, KCM_OP_INITIALIZE, cache);
831 k5_marshal_princ(&req.reqbuf, 4, princ);
832 ret = cache_call(context, cache, &req);
833 kcmreq_free(&req);
834 set_kdc_offset(context, cache);
835 return ret;
836 }
837
838 static krb5_error_code KRB5_CALLCONV
kcm_close(krb5_context context,krb5_ccache cache)839 kcm_close(krb5_context context, krb5_ccache cache)
840 {
841 struct kcm_cache_data *data = cache->data;
842
843 k5_cc_mutex_destroy(&data->lock);
844 kcmio_close(data->io);
845 free(data->residual);
846 free(data);
847 free(cache);
848 return 0;
849 }
850
851 static krb5_error_code KRB5_CALLCONV
kcm_destroy(krb5_context context,krb5_ccache cache)852 kcm_destroy(krb5_context context, krb5_ccache cache)
853 {
854 krb5_error_code ret;
855 struct kcmreq req;
856
857 kcmreq_init(&req, KCM_OP_DESTROY, cache);
858 ret = cache_call(context, cache, &req);
859 kcmreq_free(&req);
860 (void)kcm_close(context, cache);
861 return ret;
862 }
863
864 static krb5_error_code KRB5_CALLCONV
kcm_store(krb5_context context,krb5_ccache cache,krb5_creds * cred)865 kcm_store(krb5_context context, krb5_ccache cache, krb5_creds *cred)
866 {
867 krb5_error_code ret;
868 struct kcmreq req;
869
870 kcmreq_init(&req, KCM_OP_STORE, cache);
871 k5_marshal_cred(&req.reqbuf, 4, cred);
872 ret = cache_call(context, cache, &req);
873 kcmreq_free(&req);
874 return ret;
875 }
876
877 static krb5_error_code KRB5_CALLCONV
kcm_retrieve(krb5_context context,krb5_ccache cache,krb5_flags flags,krb5_creds * mcred,krb5_creds * cred_out)878 kcm_retrieve(krb5_context context, krb5_ccache cache, krb5_flags flags,
879 krb5_creds *mcred, krb5_creds *cred_out)
880 {
881 krb5_error_code ret;
882 struct kcmreq req = EMPTY_KCMREQ;
883 krb5_creds cred;
884 krb5_enctype *enctypes = NULL;
885
886 memset(&cred, 0, sizeof(cred));
887
888 /* Include KCM_GC_CACHED in flags to prevent Heimdal's sssd from making a
889 * TGS request itself. */
890 kcmreq_init(&req, KCM_OP_RETRIEVE, cache);
891 k5_buf_add_uint32_be(&req.reqbuf, map_tcflags(flags) | KCM_GC_CACHED);
892 k5_marshal_mcred(&req.reqbuf, mcred);
893 ret = cache_call(context, cache, &req);
894
895 /* Fall back to iteration if the server does not support retrieval. */
896 if (unsupported_op_error(ret)) {
897 ret = k5_cc_retrieve_cred_default(context, cache, flags, mcred,
898 cred_out);
899 goto cleanup;
900 }
901 if (ret)
902 goto cleanup;
903
904 ret = k5_unmarshal_cred(req.reply.ptr, req.reply.len, 4, &cred);
905 if (ret)
906 goto cleanup;
907
908 /* In rare cases we might retrieve a credential with a session key this
909 * context can't support, in which case we must retry using iteration. */
910 if (flags & KRB5_TC_SUPPORTED_KTYPES) {
911 ret = krb5_get_tgs_ktypes(context, cred.server, &enctypes);
912 if (ret)
913 goto cleanup;
914 if (!k5_etypes_contains(enctypes, cred.keyblock.enctype)) {
915 ret = k5_cc_retrieve_cred_default(context, cache, flags, mcred,
916 cred_out);
917 goto cleanup;
918 }
919 }
920
921 *cred_out = cred;
922 memset(&cred, 0, sizeof(cred));
923
924 cleanup:
925 kcmreq_free(&req);
926 krb5_free_cred_contents(context, &cred);
927 free(enctypes);
928 /* Heimdal's KCM returns KRB5_CC_END if no cred is found. */
929 return (ret == KRB5_CC_END) ? KRB5_CC_NOTFOUND : map_invalid(ret);
930 }
931
932 static krb5_error_code KRB5_CALLCONV
kcm_get_princ(krb5_context context,krb5_ccache cache,krb5_principal * princ_out)933 kcm_get_princ(krb5_context context, krb5_ccache cache,
934 krb5_principal *princ_out)
935 {
936 krb5_error_code ret;
937 struct kcmreq req;
938 struct kcm_cache_data *data = cache->data;
939
940 kcmreq_init(&req, KCM_OP_GET_PRINCIPAL, cache);
941 ret = cache_call(context, cache, &req);
942 /* Heimdal KCM can respond with code 0 and no principal. */
943 if (!ret && req.reply.len == 0)
944 ret = KRB5_FCC_NOFILE;
945 if (ret == KRB5_FCC_NOFILE) {
946 k5_setmsg(context, ret, _("Credentials cache 'KCM:%s' not found"),
947 data->residual);
948 }
949
950 if (!ret)
951 ret = k5_unmarshal_princ(req.reply.ptr, req.reply.len, 4, princ_out);
952 kcmreq_free(&req);
953 return map_invalid(ret);
954 }
955
956 static krb5_error_code KRB5_CALLCONV
kcm_start_seq_get(krb5_context context,krb5_ccache cache,krb5_cc_cursor * cursor_out)957 kcm_start_seq_get(krb5_context context, krb5_ccache cache,
958 krb5_cc_cursor *cursor_out)
959 {
960 krb5_error_code ret;
961 struct kcmreq req = EMPTY_KCMREQ;
962 struct uuid_list *uuids = NULL;
963 struct cred_list *creds = NULL;
964 struct kcm_cursor *cursor;
965
966 *cursor_out = NULL;
967
968 get_kdc_offset(context, cache);
969
970 kcmreq_init(&req, KCM_OP_GET_CRED_LIST, cache);
971 ret = cache_call(context, cache, &req);
972 if (ret == 0) {
973 /* GET_CRED_LIST is available. */
974 ret = kcmreq_get_cred_list(&req, &creds);
975 if (ret)
976 goto cleanup;
977 } else if (unsupported_op_error(ret)) {
978 /* Fall back to GET_CRED_UUID_LIST. */
979 kcmreq_free(&req);
980 kcmreq_init(&req, KCM_OP_GET_CRED_UUID_LIST, cache);
981 ret = cache_call(context, cache, &req);
982 if (ret)
983 goto cleanup;
984 ret = kcmreq_get_uuid_list(&req, &uuids);
985 if (ret)
986 goto cleanup;
987 } else {
988 goto cleanup;
989 }
990
991 cursor = k5alloc(sizeof(*cursor), &ret);
992 if (cursor == NULL)
993 goto cleanup;
994 cursor->uuids = uuids;
995 cursor->creds = creds;
996 *cursor_out = (krb5_cc_cursor)cursor;
997
998 cleanup:
999 kcmreq_free(&req);
1000 return ret;
1001 }
1002
1003 static krb5_error_code
next_cred_by_uuid(krb5_context context,krb5_ccache cache,struct uuid_list * uuids,krb5_creds * cred_out)1004 next_cred_by_uuid(krb5_context context, krb5_ccache cache,
1005 struct uuid_list *uuids, krb5_creds *cred_out)
1006 {
1007 krb5_error_code ret;
1008 struct kcmreq req;
1009
1010 memset(cred_out, 0, sizeof(*cred_out));
1011
1012 if (uuids->pos >= uuids->count)
1013 return KRB5_CC_END;
1014
1015 kcmreq_init(&req, KCM_OP_GET_CRED_BY_UUID, cache);
1016 k5_buf_add_len(&req.reqbuf, uuids->uuidbytes + (uuids->pos * KCM_UUID_LEN),
1017 KCM_UUID_LEN);
1018 uuids->pos++;
1019 ret = cache_call(context, cache, &req);
1020 if (!ret)
1021 ret = k5_unmarshal_cred(req.reply.ptr, req.reply.len, 4, cred_out);
1022 kcmreq_free(&req);
1023 return map_invalid(ret);
1024 }
1025
1026 static krb5_error_code KRB5_CALLCONV
kcm_next_cred(krb5_context context,krb5_ccache cache,krb5_cc_cursor * cursor,krb5_creds * cred_out)1027 kcm_next_cred(krb5_context context, krb5_ccache cache, krb5_cc_cursor *cursor,
1028 krb5_creds *cred_out)
1029 {
1030 struct kcm_cursor *c = (struct kcm_cursor *)*cursor;
1031 struct cred_list *list;
1032
1033 if (c->uuids != NULL)
1034 return next_cred_by_uuid(context, cache, c->uuids, cred_out);
1035
1036 list = c->creds;
1037 if (list->pos >= list->count)
1038 return KRB5_CC_END;
1039
1040 /* Transfer memory ownership of one cred to the caller. */
1041 *cred_out = list->creds[list->pos];
1042 memset(&list->creds[list->pos], 0, sizeof(*list->creds));
1043 list->pos++;
1044
1045 return 0;
1046 }
1047
1048 static krb5_error_code KRB5_CALLCONV
kcm_end_seq_get(krb5_context context,krb5_ccache cache,krb5_cc_cursor * cursor)1049 kcm_end_seq_get(krb5_context context, krb5_ccache cache,
1050 krb5_cc_cursor *cursor)
1051 {
1052 struct kcm_cursor *c = *cursor;
1053
1054 if (c == NULL)
1055 return 0;
1056 free_uuid_list(c->uuids);
1057 free_cred_list(c->creds);
1058 free(c);
1059 *cursor = NULL;
1060 return 0;
1061 }
1062
1063 static krb5_error_code KRB5_CALLCONV
kcm_remove_cred(krb5_context context,krb5_ccache cache,krb5_flags flags,krb5_creds * mcred)1064 kcm_remove_cred(krb5_context context, krb5_ccache cache, krb5_flags flags,
1065 krb5_creds *mcred)
1066 {
1067 krb5_error_code ret;
1068 struct kcmreq req;
1069
1070 kcmreq_init(&req, KCM_OP_REMOVE_CRED, cache);
1071 k5_buf_add_uint32_be(&req.reqbuf, map_tcflags(flags));
1072 k5_marshal_mcred(&req.reqbuf, mcred);
1073 ret = cache_call(context, cache, &req);
1074 kcmreq_free(&req);
1075 return ret;
1076 }
1077
1078 static krb5_error_code KRB5_CALLCONV
kcm_set_flags(krb5_context context,krb5_ccache cache,krb5_flags flags)1079 kcm_set_flags(krb5_context context, krb5_ccache cache, krb5_flags flags)
1080 {
1081 /* We don't currently care about any flags for this type. */
1082 return 0;
1083 }
1084
1085 static krb5_error_code KRB5_CALLCONV
kcm_get_flags(krb5_context context,krb5_ccache cache,krb5_flags * flags_out)1086 kcm_get_flags(krb5_context context, krb5_ccache cache, krb5_flags *flags_out)
1087 {
1088 /* We don't currently have any operational flags for this type. */
1089 *flags_out = 0;
1090 return 0;
1091 }
1092
1093 /* Construct a per-type cursor, always taking ownership of io and uuids. */
1094 static krb5_error_code
make_ptcursor(const char * residual,struct uuid_list * uuids,struct kcmio * io,krb5_cc_ptcursor * cursor_out)1095 make_ptcursor(const char *residual, struct uuid_list *uuids, struct kcmio *io,
1096 krb5_cc_ptcursor *cursor_out)
1097 {
1098 krb5_cc_ptcursor cursor = NULL;
1099 struct kcm_ptcursor *data = NULL;
1100 char *residual_copy = NULL;
1101
1102 *cursor_out = NULL;
1103
1104 if (residual != NULL) {
1105 residual_copy = strdup(residual);
1106 if (residual_copy == NULL)
1107 goto oom;
1108 }
1109 cursor = malloc(sizeof(*cursor));
1110 if (cursor == NULL)
1111 goto oom;
1112 data = malloc(sizeof(*data));
1113 if (data == NULL)
1114 goto oom;
1115
1116 data->residual = residual_copy;
1117 data->uuids = uuids;
1118 data->io = io;
1119 data->first = TRUE;
1120 cursor->ops = &krb5_kcm_ops;
1121 cursor->data = data;
1122 *cursor_out = cursor;
1123 return 0;
1124
1125 oom:
1126 kcmio_close(io);
1127 free_uuid_list(uuids);
1128 free(residual_copy);
1129 free(data);
1130 free(cursor);
1131 return ENOMEM;
1132 }
1133
1134 static krb5_error_code KRB5_CALLCONV
kcm_ptcursor_new(krb5_context context,krb5_cc_ptcursor * cursor_out)1135 kcm_ptcursor_new(krb5_context context, krb5_cc_ptcursor *cursor_out)
1136 {
1137 krb5_error_code ret;
1138 struct kcmreq req = EMPTY_KCMREQ;
1139 struct kcmio *io = NULL;
1140 struct uuid_list *uuids = NULL;
1141 const char *defname, *primary;
1142
1143 *cursor_out = NULL;
1144
1145 /* Don't try to use KCM for the cache collection unless the default cache
1146 * name has the KCM type. */
1147 defname = krb5_cc_default_name(context);
1148 if (defname == NULL || strncmp(defname, "KCM:", 4) != 0)
1149 return make_ptcursor(NULL, NULL, NULL, cursor_out);
1150
1151 ret = kcmio_connect(context, &io);
1152 if (ret)
1153 return ret;
1154
1155 /* If defname is a subsidiary cache, return a singleton cursor. */
1156 if (strlen(defname) > 4)
1157 return make_ptcursor(defname + 4, NULL, io, cursor_out);
1158
1159 kcmreq_init(&req, KCM_OP_GET_CACHE_UUID_LIST, NULL);
1160 ret = kcmio_call(context, io, &req);
1161 if (ret == KRB5_FCC_NOFILE) {
1162 /* There are no accessible caches; return an empty cursor. */
1163 ret = make_ptcursor(NULL, NULL, NULL, cursor_out);
1164 goto cleanup;
1165 }
1166 if (ret)
1167 goto cleanup;
1168 ret = kcmreq_get_uuid_list(&req, &uuids);
1169 if (ret)
1170 goto cleanup;
1171
1172 kcmreq_free(&req);
1173 kcmreq_init(&req, KCM_OP_GET_DEFAULT_CACHE, NULL);
1174 ret = kcmio_call(context, io, &req);
1175 if (ret)
1176 goto cleanup;
1177 ret = kcmreq_get_name(&req, &primary);
1178 if (ret)
1179 goto cleanup;
1180
1181 ret = make_ptcursor(primary, uuids, io, cursor_out);
1182 uuids = NULL;
1183 io = NULL;
1184
1185 cleanup:
1186 free_uuid_list(uuids);
1187 kcmio_close(io);
1188 kcmreq_free(&req);
1189 return ret;
1190 }
1191
1192 /* Return true if name is an initialized cache. */
1193 static krb5_boolean
name_exists(krb5_context context,struct kcmio * io,const char * name)1194 name_exists(krb5_context context, struct kcmio *io, const char *name)
1195 {
1196 krb5_error_code ret;
1197 struct kcmreq req;
1198
1199 kcmreq_init(&req, KCM_OP_GET_PRINCIPAL, NULL);
1200 k5_buf_add_len(&req.reqbuf, name, strlen(name) + 1);
1201 ret = kcmio_call(context, io, &req);
1202 kcmreq_free(&req);
1203 return ret == 0;
1204 }
1205
1206 static krb5_error_code KRB5_CALLCONV
kcm_ptcursor_next(krb5_context context,krb5_cc_ptcursor cursor,krb5_ccache * cache_out)1207 kcm_ptcursor_next(krb5_context context, krb5_cc_ptcursor cursor,
1208 krb5_ccache *cache_out)
1209 {
1210 krb5_error_code ret = 0;
1211 struct kcmreq req = EMPTY_KCMREQ;
1212 struct kcm_ptcursor *data = cursor->data;
1213 struct uuid_list *uuids;
1214 const unsigned char *id;
1215 const char *name;
1216
1217 *cache_out = NULL;
1218
1219 /* Return the primary or specified subsidiary cache if we haven't yet. */
1220 if (data->first && data->residual != NULL) {
1221 data->first = FALSE;
1222 if (name_exists(context, data->io, data->residual))
1223 return make_cache(context, data->residual, NULL, cache_out);
1224 }
1225
1226 uuids = data->uuids;
1227 if (uuids == NULL)
1228 return 0;
1229
1230 while (uuids->pos < uuids->count) {
1231 /* Get the name of the next cache. */
1232 id = &uuids->uuidbytes[KCM_UUID_LEN * uuids->pos++];
1233 kcmreq_free(&req);
1234 kcmreq_init(&req, KCM_OP_GET_CACHE_BY_UUID, NULL);
1235 k5_buf_add_len(&req.reqbuf, id, KCM_UUID_LEN);
1236 ret = kcmio_call(context, data->io, &req);
1237 /* Continue if the cache has been deleted. */
1238 if (ret == KRB5_CC_END || ret == KRB5_FCC_NOFILE) {
1239 ret = 0;
1240 continue;
1241 }
1242 if (ret)
1243 goto cleanup;
1244 ret = kcmreq_get_name(&req, &name);
1245 if (ret)
1246 goto cleanup;
1247
1248 /* Don't yield the primary cache twice. */
1249 if (strcmp(name, data->residual) == 0)
1250 continue;
1251
1252 ret = make_cache(context, name, NULL, cache_out);
1253 break;
1254 }
1255
1256 cleanup:
1257 kcmreq_free(&req);
1258 return ret;
1259 }
1260
1261 static krb5_error_code KRB5_CALLCONV
kcm_ptcursor_free(krb5_context context,krb5_cc_ptcursor * cursor)1262 kcm_ptcursor_free(krb5_context context, krb5_cc_ptcursor *cursor)
1263 {
1264 struct kcm_ptcursor *data = (*cursor)->data;
1265
1266 free(data->residual);
1267 free_uuid_list(data->uuids);
1268 kcmio_close(data->io);
1269 free(data);
1270 free(*cursor);
1271 *cursor = NULL;
1272 return 0;
1273 }
1274
1275 static krb5_error_code KRB5_CALLCONV
kcm_replace(krb5_context context,krb5_ccache cache,krb5_principal princ,krb5_creds ** creds)1276 kcm_replace(krb5_context context, krb5_ccache cache, krb5_principal princ,
1277 krb5_creds **creds)
1278 {
1279 krb5_error_code ret;
1280 struct kcmreq req = EMPTY_KCMREQ;
1281 size_t pos;
1282 uint8_t *lenptr;
1283 int ncreds, i;
1284 krb5_os_context octx = &context->os_context;
1285 int32_t offset;
1286
1287 kcmreq_init(&req, KCM_OP_REPLACE, cache);
1288 offset = (octx->os_flags & KRB5_OS_TOFFSET_VALID) ? octx->time_offset : 0;
1289 k5_buf_add_uint32_be(&req.reqbuf, offset);
1290 k5_marshal_princ(&req.reqbuf, 4, princ);
1291 for (ncreds = 0; creds[ncreds] != NULL; ncreds++);
1292 k5_buf_add_uint32_be(&req.reqbuf, ncreds);
1293 for (i = 0; creds[i] != NULL; i++) {
1294 /* Store a dummy length, then fix it up after marshalling the cred. */
1295 pos = req.reqbuf.len;
1296 k5_buf_add_uint32_be(&req.reqbuf, 0);
1297 k5_marshal_cred(&req.reqbuf, 4, creds[i]);
1298 if (k5_buf_status(&req.reqbuf) == 0) {
1299 lenptr = (uint8_t *)req.reqbuf.data + pos;
1300 store_32_be(req.reqbuf.len - (pos + 4), lenptr);
1301 }
1302 }
1303 ret = cache_call(context, cache, &req);
1304 kcmreq_free(&req);
1305
1306 if (unsupported_op_error(ret))
1307 return k5_nonatomic_replace(context, cache, princ, creds);
1308
1309 return ret;
1310 }
1311
1312 static krb5_error_code KRB5_CALLCONV
kcm_lock(krb5_context context,krb5_ccache cache)1313 kcm_lock(krb5_context context, krb5_ccache cache)
1314 {
1315 k5_cc_mutex_lock(context, &((struct kcm_cache_data *)cache->data)->lock);
1316 return 0;
1317 }
1318
1319 static krb5_error_code KRB5_CALLCONV
kcm_unlock(krb5_context context,krb5_ccache cache)1320 kcm_unlock(krb5_context context, krb5_ccache cache)
1321 {
1322 k5_cc_mutex_unlock(context, &((struct kcm_cache_data *)cache->data)->lock);
1323 return 0;
1324 }
1325
1326 static krb5_error_code KRB5_CALLCONV
kcm_switch_to(krb5_context context,krb5_ccache cache)1327 kcm_switch_to(krb5_context context, krb5_ccache cache)
1328 {
1329 krb5_error_code ret;
1330 struct kcmreq req;
1331
1332 kcmreq_init(&req, KCM_OP_SET_DEFAULT_CACHE, cache);
1333 ret = cache_call(context, cache, &req);
1334 kcmreq_free(&req);
1335 return ret;
1336 }
1337
1338 const krb5_cc_ops krb5_kcm_ops = {
1339 0,
1340 "KCM",
1341 kcm_get_name,
1342 kcm_resolve,
1343 kcm_gen_new,
1344 kcm_initialize,
1345 kcm_destroy,
1346 kcm_close,
1347 kcm_store,
1348 kcm_retrieve,
1349 kcm_get_princ,
1350 kcm_start_seq_get,
1351 kcm_next_cred,
1352 kcm_end_seq_get,
1353 kcm_remove_cred,
1354 kcm_set_flags,
1355 kcm_get_flags,
1356 kcm_ptcursor_new,
1357 kcm_ptcursor_next,
1358 kcm_ptcursor_free,
1359 kcm_replace,
1360 NULL, /* wasdefault */
1361 kcm_lock,
1362 kcm_unlock,
1363 kcm_switch_to,
1364 };
1365
1366 #endif /* not _WIN32 */
1367