1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /* lib/krb5/ccache/ccbase.c - Registration functions for ccache */
3 /*
4 * Copyright 1990,2004,2008 by the Massachusetts Institute of Technology.
5 * All Rights Reserved.
6 *
7 * Export of this software from the United States of America may
8 * require a specific license from the United States Government.
9 * It is the responsibility of any person or organization contemplating
10 * export to obtain such a license before exporting.
11 *
12 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
13 * distribute this software and its documentation for any purpose and
14 * without fee is hereby granted, provided that the above copyright
15 * notice appear in all copies and that both that copyright notice and
16 * this permission notice appear in supporting documentation, and that
17 * the name of M.I.T. not be used in advertising or publicity pertaining
18 * to distribution of the software without specific, written prior
19 * permission. Furthermore if you modify this software you must label
20 * your software as modified software and not distribute it in such a
21 * fashion that it might be confused with the original M.I.T. software.
22 * M.I.T. makes no representations about the suitability of
23 * this software for any purpose. It is provided "as is" without express
24 * or implied warranty.
25 */
26
27 #include "k5-int.h"
28 #include "k5-thread.h"
29
30 #include "fcc.h"
31 #include "cc-int.h"
32
33 struct krb5_cc_typelist {
34 const krb5_cc_ops *ops;
35 struct krb5_cc_typelist *next;
36 };
37
38 struct krb5_cc_typecursor {
39 struct krb5_cc_typelist *tptr;
40 };
41 /* typedef krb5_cc_typecursor in k5-int.h */
42
43 extern const krb5_cc_ops krb5_mcc_ops;
44
45 #define NEXT NULL
46
47 #ifdef _WIN32
48 extern const krb5_cc_ops krb5_lcc_ops;
49 static struct krb5_cc_typelist cc_lcc_entry = { &krb5_lcc_ops, NEXT };
50 #undef NEXT
51 #define NEXT &cc_lcc_entry
52 #endif
53
54 #ifdef USE_CCAPI
55 extern const krb5_cc_ops krb5_cc_stdcc_ops;
56 static struct krb5_cc_typelist cc_stdcc_entry = { &krb5_cc_stdcc_ops, NEXT };
57 #undef NEXT
58 #define NEXT &cc_stdcc_entry
59 #endif
60
61 static struct krb5_cc_typelist cc_mcc_entry = { &krb5_mcc_ops, NEXT };
62 #undef NEXT
63 #define NEXT &cc_mcc_entry
64
65 #ifndef NO_FILE_CCACHE
66 static struct krb5_cc_typelist cc_fcc_entry = { &krb5_cc_file_ops, NEXT };
67 #undef NEXT
68 #define NEXT &cc_fcc_entry
69 #endif
70
71 #ifdef USE_KEYRING_CCACHE
72 extern const krb5_cc_ops krb5_krcc_ops;
73 static struct krb5_cc_typelist cc_krcc_entry = { &krb5_krcc_ops, NEXT };
74 #undef NEXT
75 #define NEXT &cc_krcc_entry
76 #endif /* USE_KEYRING_CCACHE */
77
78 #ifndef _WIN32
79 extern const krb5_cc_ops krb5_dcc_ops;
80 static struct krb5_cc_typelist cc_dcc_entry = { &krb5_dcc_ops, NEXT };
81 #undef NEXT
82 #define NEXT &cc_dcc_entry
83
84 extern const krb5_cc_ops krb5_kcm_ops;
85 static struct krb5_cc_typelist cc_kcm_entry = { &krb5_kcm_ops, NEXT };
86 #undef NEXT
87 #define NEXT &cc_kcm_entry
88 #endif /* not _WIN32 */
89
90 #ifdef USE_CCAPI_MACOS
91 extern const krb5_cc_ops krb5_api_macos_ops;
92 static struct krb5_cc_typelist cc_macos_entry = { &krb5_api_macos_ops, NEXT };
93 #undef NEXT
94 #define NEXT &cc_macos_entry
95 #endif /* USE_CCAPI_MACOS */
96
97 #define INITIAL_TYPEHEAD (NEXT)
98 static struct krb5_cc_typelist *cc_typehead = INITIAL_TYPEHEAD;
99 static k5_mutex_t cc_typelist_lock = K5_MUTEX_PARTIAL_INITIALIZER;
100
101 /* mutex for krb5_cccol_[un]lock */
102 static k5_cc_mutex cccol_lock = K5_CC_MUTEX_PARTIAL_INITIALIZER;
103
104 static krb5_error_code
105 krb5int_cc_getops(krb5_context, const char *, const krb5_cc_ops **);
106
107 int
krb5int_cc_initialize(void)108 krb5int_cc_initialize(void)
109 {
110 int err;
111
112 err = k5_cc_mutex_finish_init(&cccol_lock);
113 if (err)
114 return err;
115 err = k5_cc_mutex_finish_init(&krb5int_mcc_mutex);
116 if (err)
117 return err;
118 err = k5_mutex_finish_init(&cc_typelist_lock);
119 if (err)
120 return err;
121 #ifndef NO_FILE_CCACHE
122 err = k5_cc_mutex_finish_init(&krb5int_cc_file_mutex);
123 if (err)
124 return err;
125 #endif
126 #ifdef USE_KEYRING_CCACHE
127 err = k5_cc_mutex_finish_init(&krb5int_krcc_mutex);
128 if (err)
129 return err;
130 #endif
131 return 0;
132 }
133
134 void
krb5int_cc_finalize(void)135 krb5int_cc_finalize(void)
136 {
137 struct krb5_cc_typelist *t, *t_next;
138 k5_cccol_force_unlock();
139 k5_cc_mutex_destroy(&cccol_lock);
140 k5_mutex_destroy(&cc_typelist_lock);
141 #ifndef NO_FILE_CCACHE
142 k5_cc_mutex_destroy(&krb5int_cc_file_mutex);
143 #endif
144 k5_cc_mutex_destroy(&krb5int_mcc_mutex);
145 #ifdef USE_KEYRING_CCACHE
146 k5_cc_mutex_destroy(&krb5int_krcc_mutex);
147 #endif
148 for (t = cc_typehead; t != INITIAL_TYPEHEAD; t = t_next) {
149 t_next = t->next;
150 free(t);
151 }
152 }
153
154
155 /*
156 * Register a new credentials cache type
157 * If override is set, replace any existing ccache with that type tag
158 */
159
160 krb5_error_code KRB5_CALLCONV
krb5_cc_register(krb5_context context,const krb5_cc_ops * ops,krb5_boolean override)161 krb5_cc_register(krb5_context context, const krb5_cc_ops *ops,
162 krb5_boolean override)
163 {
164 struct krb5_cc_typelist *t;
165
166 k5_mutex_lock(&cc_typelist_lock);
167 for (t = cc_typehead;t && strcmp(t->ops->prefix,ops->prefix);t = t->next)
168 ;
169 if (t) {
170 if (override) {
171 t->ops = ops;
172 k5_mutex_unlock(&cc_typelist_lock);
173 return 0;
174 } else {
175 k5_mutex_unlock(&cc_typelist_lock);
176 return KRB5_CC_TYPE_EXISTS;
177 }
178 }
179 if (!(t = (struct krb5_cc_typelist *) malloc(sizeof(*t)))) {
180 k5_mutex_unlock(&cc_typelist_lock);
181 return ENOMEM;
182 }
183 t->next = cc_typehead;
184 t->ops = ops;
185 cc_typehead = t;
186 k5_mutex_unlock(&cc_typelist_lock);
187 return 0;
188 }
189
190 /*
191 * Resolve a credential cache name into a cred. cache object.
192 *
193 * The name is currently constrained to be of the form "type:residual";
194 *
195 * The "type" portion corresponds to one of the predefined credential
196 * cache types, while the "residual" portion is specific to the
197 * particular cache type.
198 */
199
200 #include <ctype.h>
201 krb5_error_code KRB5_CALLCONV
krb5_cc_resolve(krb5_context context,const char * name,krb5_ccache * cache)202 krb5_cc_resolve (krb5_context context, const char *name, krb5_ccache *cache)
203 {
204 char *pfx, *cp;
205 const char *resid;
206 unsigned int pfxlen;
207 krb5_error_code err;
208 const krb5_cc_ops *ops;
209
210 if (name == NULL)
211 return KRB5_CC_BADNAME;
212 pfx = NULL;
213 cp = strchr (name, ':');
214 if (!cp) {
215 if (krb5_cc_dfl_ops)
216 return (*krb5_cc_dfl_ops->resolve)(context, cache, name);
217 else
218 return KRB5_CC_BADNAME;
219 }
220
221 pfxlen = cp - name;
222
223 if ( pfxlen == 1 && isalpha((unsigned char) name[0]) ) {
224 /* We found a drive letter not a prefix - use FILE */
225 pfx = strdup("FILE");
226 if (!pfx)
227 return ENOMEM;
228
229 resid = name;
230 } else {
231 resid = name + pfxlen + 1;
232 pfx = k5memdup0(name, pfxlen, &err);
233 if (pfx == NULL)
234 return err;
235 }
236
237 *cache = (krb5_ccache) 0;
238
239 err = krb5int_cc_getops(context, pfx, &ops);
240 if (pfx != NULL)
241 free(pfx);
242 if (err)
243 return err;
244
245 return ops->resolve(context, cache, resid);
246 }
247
248 krb5_error_code KRB5_CALLCONV
krb5_cc_dup(krb5_context context,krb5_ccache in,krb5_ccache * out)249 krb5_cc_dup(krb5_context context, krb5_ccache in, krb5_ccache *out)
250 {
251 return in->ops->resolve(context, out, in->ops->get_name(context, in));
252 }
253
254 /*
255 * cc_getops
256 *
257 * Internal function to return the ops vector for a given ccache
258 * prefix string.
259 */
260 static krb5_error_code
krb5int_cc_getops(krb5_context context,const char * pfx,const krb5_cc_ops ** ops)261 krb5int_cc_getops(krb5_context context,
262 const char *pfx,
263 const krb5_cc_ops **ops)
264 {
265 struct krb5_cc_typelist *tlist;
266
267 k5_mutex_lock(&cc_typelist_lock);
268 for (tlist = cc_typehead; tlist; tlist = tlist->next) {
269 if (strcmp (tlist->ops->prefix, pfx) == 0) {
270 *ops = tlist->ops;
271 k5_mutex_unlock(&cc_typelist_lock);
272 return 0;
273 }
274 }
275 k5_mutex_unlock(&cc_typelist_lock);
276 if (krb5_cc_dfl_ops && !strcmp (pfx, krb5_cc_dfl_ops->prefix)) {
277 *ops = krb5_cc_dfl_ops;
278 return 0;
279 }
280 return KRB5_CC_UNKNOWN_TYPE;
281 }
282
283 /*
284 * cc_new_unique
285 *
286 * Generate a new unique ccache, given a ccache type and a hint
287 * string. Ignores the hint string for now.
288 */
289 krb5_error_code KRB5_CALLCONV
krb5_cc_new_unique(krb5_context context,const char * type,const char * hint,krb5_ccache * id)290 krb5_cc_new_unique(
291 krb5_context context,
292 const char *type,
293 const char *hint,
294 krb5_ccache *id)
295 {
296 const krb5_cc_ops *ops;
297 krb5_error_code err;
298
299 *id = NULL;
300
301 TRACE_CC_NEW_UNIQUE(context, type);
302 err = krb5int_cc_getops(context, type, &ops);
303 if (err)
304 return err;
305
306 return ops->gen_new(context, id);
307 }
308
309 /*
310 * cc_typecursor
311 *
312 * Note: to avoid copying the typelist at cursor creation time, among
313 * other things, we assume that the only additions ever occur to the
314 * typelist.
315 */
316 krb5_error_code
krb5int_cc_typecursor_new(krb5_context context,krb5_cc_typecursor * t)317 krb5int_cc_typecursor_new(krb5_context context, krb5_cc_typecursor *t)
318 {
319 krb5_cc_typecursor n = NULL;
320
321 *t = NULL;
322 n = malloc(sizeof(*n));
323 if (n == NULL)
324 return ENOMEM;
325
326 k5_mutex_lock(&cc_typelist_lock);
327 n->tptr = cc_typehead;
328 k5_mutex_unlock(&cc_typelist_lock);
329 *t = n;
330 return 0;
331 }
332
333 krb5_error_code
krb5int_cc_typecursor_next(krb5_context context,krb5_cc_typecursor t,const krb5_cc_ops ** ops)334 krb5int_cc_typecursor_next(krb5_context context,
335 krb5_cc_typecursor t,
336 const krb5_cc_ops **ops)
337 {
338 *ops = NULL;
339 if (t->tptr == NULL)
340 return 0;
341
342 k5_mutex_lock(&cc_typelist_lock);
343 *ops = t->tptr->ops;
344 t->tptr = t->tptr->next;
345 k5_mutex_unlock(&cc_typelist_lock);
346 return 0;
347 }
348
349 krb5_error_code
krb5int_cc_typecursor_free(krb5_context context,krb5_cc_typecursor * t)350 krb5int_cc_typecursor_free(krb5_context context, krb5_cc_typecursor *t)
351 {
352 free(*t);
353 *t = NULL;
354 return 0;
355 }
356
357 krb5_error_code
k5_nonatomic_replace(krb5_context context,krb5_ccache ccache,krb5_principal princ,krb5_creds ** creds)358 k5_nonatomic_replace(krb5_context context, krb5_ccache ccache,
359 krb5_principal princ, krb5_creds **creds)
360 {
361 krb5_error_code ret;
362 int i;
363
364 ret = krb5_cc_initialize(context, ccache, princ);
365 for (i = 0; !ret && creds[i] != NULL; creds++)
366 ret = krb5_cc_store_cred(context, ccache, creds[i]);
367 return ret;
368 }
369
370 static krb5_error_code
read_creds(krb5_context context,krb5_ccache ccache,krb5_creds *** creds_out)371 read_creds(krb5_context context, krb5_ccache ccache, krb5_creds ***creds_out)
372 {
373 krb5_error_code ret;
374 krb5_cc_cursor cur = NULL;
375 krb5_creds **list = NULL, *cred = NULL, **newptr;
376 int i;
377
378 *creds_out = NULL;
379
380 ret = krb5_cc_start_seq_get(context, ccache, &cur);
381 if (ret)
382 goto cleanup;
383
384 /* Allocate one extra entry so that list remains valid for freeing after
385 * we add the next entry and before we reallocate it. */
386 list = k5calloc(2, sizeof(*list), &ret);
387 if (list == NULL)
388 goto cleanup;
389
390 i = 0;
391 for (;;) {
392 cred = k5alloc(sizeof(*cred), &ret);
393 if (cred == NULL)
394 goto cleanup;
395 ret = krb5_cc_next_cred(context, ccache, &cur, cred);
396 if (ret == KRB5_CC_END)
397 break;
398 if (ret)
399 goto cleanup;
400 list[i++] = cred;
401 list[i] = NULL;
402 cred = NULL;
403
404 newptr = realloc(list, (i + 2) * sizeof(*list));
405 if (newptr == NULL) {
406 ret = ENOMEM;
407 goto cleanup;
408 }
409 list = newptr;
410 list[i + 1] = NULL;
411 }
412 ret = 0;
413
414 *creds_out = list;
415 list = NULL;
416
417 cleanup:
418 if (cur != NULL)
419 (void)krb5_cc_end_seq_get(context, ccache, &cur);
420 krb5_free_tgt_creds(context, list);
421 free(cred);
422 return ret;
423 }
424
425 krb5_error_code KRB5_CALLCONV
krb5_cc_move(krb5_context context,krb5_ccache src,krb5_ccache dst)426 krb5_cc_move(krb5_context context, krb5_ccache src, krb5_ccache dst)
427 {
428 krb5_error_code ret;
429 krb5_principal princ = NULL;
430 krb5_creds **creds = NULL;
431
432 TRACE_CC_MOVE(context, src, dst);
433
434 ret = krb5_cc_get_principal(context, src, &princ);
435 if (ret)
436 goto cleanup;
437
438 ret = read_creds(context, src, &creds);
439 if (ret)
440 goto cleanup;
441
442 if (dst->ops->replace == NULL)
443 ret = k5_nonatomic_replace(context, dst, princ, creds);
444 else
445 ret = dst->ops->replace(context, dst, princ, creds);
446 if (ret)
447 goto cleanup;
448
449 ret = krb5_cc_destroy(context, src);
450
451 cleanup:
452 krb5_free_principal(context, princ);
453 krb5_free_tgt_creds(context, creds);
454 return ret;
455 }
456
457 krb5_boolean KRB5_CALLCONV
krb5_cc_support_switch(krb5_context context,const char * type)458 krb5_cc_support_switch(krb5_context context, const char *type)
459 {
460 const krb5_cc_ops *ops;
461 krb5_error_code err;
462
463 err = krb5int_cc_getops(context, type, &ops);
464 return (err ? FALSE : (ops->switch_to != NULL));
465 }
466
467 krb5_error_code
k5_cc_mutex_init(k5_cc_mutex * m)468 k5_cc_mutex_init(k5_cc_mutex *m)
469 {
470 krb5_error_code ret = 0;
471
472 ret = k5_mutex_init(&m->lock);
473 if (ret) return ret;
474 m->owner = NULL;
475 m->refcount = 0;
476
477 return ret;
478 }
479
480 krb5_error_code
k5_cc_mutex_finish_init(k5_cc_mutex * m)481 k5_cc_mutex_finish_init(k5_cc_mutex *m)
482 {
483 krb5_error_code ret = 0;
484
485 ret = k5_mutex_finish_init(&m->lock);
486 if (ret) return ret;
487 m->owner = NULL;
488 m->refcount = 0;
489
490 return ret;
491 }
492
493 void
k5_cc_mutex_assert_locked(krb5_context context,k5_cc_mutex * m)494 k5_cc_mutex_assert_locked(krb5_context context, k5_cc_mutex *m)
495 {
496 #ifdef DEBUG_THREADS
497 assert(m->refcount > 0);
498 assert(m->owner == context);
499 #endif
500 k5_assert_locked(&m->lock);
501 }
502
503 void
k5_cc_mutex_assert_unlocked(krb5_context context,k5_cc_mutex * m)504 k5_cc_mutex_assert_unlocked(krb5_context context, k5_cc_mutex *m)
505 {
506 #ifdef DEBUG_THREADS
507 assert(m->refcount == 0);
508 assert(m->owner == NULL);
509 #endif
510 k5_assert_unlocked(&m->lock);
511 }
512
513 void
k5_cc_mutex_lock(krb5_context context,k5_cc_mutex * m)514 k5_cc_mutex_lock(krb5_context context, k5_cc_mutex *m)
515 {
516 /* not locked or already locked by another context */
517 if (m->owner != context) {
518 /* acquire lock, blocking until available */
519 k5_mutex_lock(&m->lock);
520 m->owner = context;
521 m->refcount = 1;
522 }
523 /* already locked by this context, just increase refcount */
524 else {
525 m->refcount++;
526 }
527 }
528
529 void
k5_cc_mutex_unlock(krb5_context context,k5_cc_mutex * m)530 k5_cc_mutex_unlock(krb5_context context, k5_cc_mutex *m)
531 {
532 /* verify owner and sanity check refcount */
533 if ((m->owner != context) || (m->refcount < 1)) {
534 return;
535 }
536 /* decrement & unlock when count reaches zero */
537 m->refcount--;
538 if (m->refcount == 0) {
539 m->owner = NULL;
540 k5_mutex_unlock(&m->lock);
541 }
542 }
543
544 /* necessary to make reentrant locks play nice with krb5int_cc_finalize */
545 void
k5_cc_mutex_force_unlock(k5_cc_mutex * m)546 k5_cc_mutex_force_unlock(k5_cc_mutex *m)
547 {
548 m->refcount = 0;
549 m->owner = NULL;
550 if (m->refcount > 0) {
551 k5_mutex_unlock(&m->lock);
552 }
553 }
554
555 /*
556 * holds on to all pertype global locks as well as typelist lock
557 */
558
559 krb5_error_code
k5_cccol_lock(krb5_context context)560 k5_cccol_lock(krb5_context context)
561 {
562 krb5_error_code ret = 0;
563
564 k5_cc_mutex_lock(context, &cccol_lock);
565 k5_mutex_lock(&cc_typelist_lock);
566 k5_cc_mutex_lock(context, &krb5int_cc_file_mutex);
567 k5_cc_mutex_lock(context, &krb5int_mcc_mutex);
568 #ifdef USE_KEYRING_CCACHE
569 k5_cc_mutex_lock(context, &krb5int_krcc_mutex);
570 #endif
571 #ifdef USE_CCAPI
572 ret = krb5_stdccv3_context_lock(context);
573 if (ret) {
574 k5_cc_mutex_unlock(context, &krb5int_mcc_mutex);
575 k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
576 k5_mutex_unlock(&cc_typelist_lock);
577 k5_cc_mutex_unlock(context, &cccol_lock);
578 return ret;
579 }
580 #endif
581 k5_mutex_unlock(&cc_typelist_lock);
582 return ret;
583 }
584
585 krb5_error_code
k5_cccol_unlock(krb5_context context)586 k5_cccol_unlock(krb5_context context)
587 {
588 krb5_error_code ret = 0;
589
590 /* sanity check */
591 k5_cc_mutex_assert_locked(context, &cccol_lock);
592
593 k5_mutex_lock(&cc_typelist_lock);
594
595 /* unlock each type in the opposite order */
596 #ifdef USE_CCAPI
597 krb5_stdccv3_context_unlock(context);
598 #endif
599 #ifdef USE_KEYRING_CCACHE
600 k5_cc_mutex_assert_locked(context, &krb5int_krcc_mutex);
601 k5_cc_mutex_unlock(context, &krb5int_krcc_mutex);
602 #endif
603 k5_cc_mutex_assert_locked(context, &krb5int_mcc_mutex);
604 k5_cc_mutex_unlock(context, &krb5int_mcc_mutex);
605 k5_cc_mutex_assert_locked(context, &krb5int_cc_file_mutex);
606 k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
607 k5_mutex_assert_locked(&cc_typelist_lock);
608
609 k5_mutex_unlock(&cc_typelist_lock);
610 k5_cc_mutex_unlock(context, &cccol_lock);
611
612 return ret;
613 }
614
615 /* necessary to make reentrant locks play nice with krb5int_cc_finalize */
616 void
k5_cccol_force_unlock()617 k5_cccol_force_unlock()
618 {
619 /* sanity check */
620 if ((&cccol_lock)->refcount == 0) {
621 return;
622 }
623
624 k5_mutex_lock(&cc_typelist_lock);
625
626 /* unlock each type in the opposite order */
627 #ifdef USE_KEYRING_CCACHE
628 k5_cc_mutex_force_unlock(&krb5int_krcc_mutex);
629 #endif
630 #ifdef USE_CCAPI
631 krb5_stdccv3_context_unlock(NULL);
632 #endif
633 k5_cc_mutex_force_unlock(&krb5int_mcc_mutex);
634 k5_cc_mutex_force_unlock(&krb5int_cc_file_mutex);
635
636 k5_mutex_unlock(&cc_typelist_lock);
637 k5_cc_mutex_force_unlock(&cccol_lock);
638 }
639