1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2021 Tintri by DDN, Inc. All rights reserved.
24 * Copyright (c) 1996, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright 2012 Milan Jurik. All rights reserved.
26 * Copyright 2012 Marcel Telka <marcel@telka.sk>
27 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
28 */
29
30 /*
31 * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
32 *
33 * $Id: svc_auth_gssapi.c,v 1.19 1994/10/27 12:38:51 jik Exp $
34 */
35
36 /*
37 * Server side handling of RPCSEC_GSS flavor.
38 */
39
40 #include <sys/systm.h>
41 #include <sys/kstat.h>
42 #include <sys/cmn_err.h>
43 #include <sys/debug.h>
44 #include <sys/types.h>
45 #include <sys/time.h>
46 #include <gssapi/gssapi.h>
47 #include <gssapi/gssapi_ext.h>
48 #include <rpc/rpc.h>
49 #include <rpc/rpcsec_defs.h>
50 #include <sys/sunddi.h>
51 #include <sys/atomic.h>
52 #include <sys/disp.h>
53
54 extern bool_t __rpc_gss_make_principal(rpc_gss_principal_t *, gss_buffer_t);
55
56 #ifdef DEBUG
57 extern void prom_printf(const char *, ...);
58 #endif
59
60 #ifdef _KERNEL
61 #define memcmp(a, b, l) bcmp((a), (b), (l))
62 #endif
63
64
65 /*
66 * Sequence window definitions.
67 */
68 #define SEQ_ARR_SIZE 4
69 #define SEQ_WIN (SEQ_ARR_SIZE*32)
70 #define SEQ_HI_BIT 0x80000000
71 #define SEQ_LO_BIT 1
72 #define DIV_BY_32 5
73 #define SEQ_MASK 0x1f
74 #define SEQ_MAX ((unsigned int)0x80000000)
75
76
77 /* cache retransmit data */
78 typedef struct _retrans_entry {
79 uint32_t xid;
80 rpc_gss_init_res result;
81 } retrans_entry;
82
83 /*
84 * Server side RPCSEC_GSS context information.
85 */
86 typedef struct _svc_rpc_gss_data {
87 struct _svc_rpc_gss_data *next, *prev;
88 struct _svc_rpc_gss_data *lru_next, *lru_prev;
89 bool_t established;
90 gss_ctx_id_t context;
91 gss_buffer_desc client_name;
92 time_t expiration;
93 uint_t seq_num;
94 uint_t seq_bits[SEQ_ARR_SIZE];
95 uint_t key;
96 OM_uint32 qop;
97 bool_t done_docallback;
98 bool_t locked;
99 rpc_gss_rawcred_t raw_cred;
100 rpc_gss_ucred_t u_cred;
101 time_t u_cred_set;
102 void *cookie;
103 gss_cred_id_t deleg;
104 kmutex_t clm;
105 int ref_cnt;
106 time_t last_ref_time;
107 bool_t stale;
108 retrans_entry *retrans_data;
109 } svc_rpc_gss_data;
110
111 /*
112 * Data structures used for LRU based context management.
113 */
114
115
116 #define HASH(key) ((key) % svc_rpc_gss_hashmod)
117 /* Size of hash table for svc_rpc_gss_data structures */
118 #define GSS_DATA_HASH_SIZE 1024
119
120 /*
121 * The following two defines specify a time delta that is used in
122 * sweep_clients. When the last_ref_time of a context is older than
123 * than the current time minus the delta, i.e, the context has not
124 * been referenced in the last delta seconds, we will return the
125 * context back to the cache if the ref_cnt is zero. The first delta
126 * value will be used when sweep_clients is called from
127 * svc_data_reclaim, the kmem_cache reclaim call back. We will reclaim
128 * all entries except those that are currently "active". By active we
129 * mean those that have been referenced in the last ACTIVE_DELTA
130 * seconds. If sweep_client is not being called from reclaim, then we
131 * will reclaim all entries that are "inactive". By inactive we mean
132 * those entries that have not been accessed in INACTIVE_DELTA
133 * seconds. Note we always assume that ACTIVE_DELTA is less than
134 * INACTIVE_DELTA, so that reaping entries from a reclaim operation
135 * will necessarily imply reaping all "inactive" entries and then
136 * some.
137 */
138
139 /*
140 * If low on memory reap cache entries that have not been active for
141 * ACTIVE_DELTA seconds and have a ref_cnt equal to zero.
142 */
143 #define ACTIVE_DELTA 30*60 /* 30 minutes */
144
145 /*
146 * If in sweeping contexts we find contexts with a ref_cnt equal to zero
147 * and the context has not been referenced in INACTIVE_DELTA seconds, return
148 * the entry to the cache.
149 */
150 #define INACTIVE_DELTA 8*60*60 /* 8 hours */
151
152 int svc_rpc_gss_hashmod = GSS_DATA_HASH_SIZE;
153 static svc_rpc_gss_data **clients;
154 static svc_rpc_gss_data *lru_first, *lru_last;
155 static time_t sweep_interval = 60*60;
156 static time_t last_swept = 0;
157 static int num_gss_contexts = 0;
158 static time_t svc_rpcgss_gid_timeout = 60*60*12;
159 static kmem_cache_t *svc_data_handle;
160 static time_t svc_rpc_gss_active_delta = ACTIVE_DELTA;
161 static time_t svc_rpc_gss_inactive_delta = INACTIVE_DELTA;
162
163 /*
164 * lock used with context/lru variables
165 */
166 static kmutex_t ctx_mutex;
167
168 /*
169 * Data structure to contain cache statistics
170 */
171
172 static struct {
173 int64_t total_entries_allocated;
174 int64_t no_reclaims;
175 int64_t no_returned_by_reclaim;
176 } svc_rpc_gss_cache_stats;
177
178
179 /*
180 * lock used with server credential variables list
181 *
182 * server cred list locking guidelines:
183 * - Writer's lock holder has exclusive access to the list
184 */
185 static krwlock_t cred_lock;
186
187 /*
188 * server callback list
189 */
190 typedef struct rpc_gss_cblist_s {
191 struct rpc_gss_cblist_s *next;
192 rpc_gss_callback_t cb;
193 } rpc_gss_cblist_t;
194
195 static rpc_gss_cblist_t *rpc_gss_cblist = NULL;
196
197 /*
198 * lock used with callback variables
199 */
200 static kmutex_t cb_mutex;
201
202 /*
203 * forward declarations
204 */
205 static bool_t svc_rpc_gss_wrap();
206 static bool_t svc_rpc_gss_unwrap();
207 static svc_rpc_gss_data *create_client();
208 static svc_rpc_gss_data *get_client();
209 static svc_rpc_gss_data *find_client();
210 static void destroy_client();
211 static void sweep_clients(bool_t);
212 static void insert_client();
213 static bool_t check_verf(struct rpc_msg *, gss_ctx_id_t,
214 int *, uid_t);
215 static bool_t set_response_verf();
216 static void retrans_add(svc_rpc_gss_data *, uint32_t,
217 rpc_gss_init_res *);
218 static void retrans_del(svc_rpc_gss_data *);
219 static bool_t transfer_sec_context(svc_rpc_gss_data *);
220 static void common_client_data_free(svc_rpc_gss_data *);
221
222 /*
223 * server side wrap/unwrap routines
224 */
225 struct svc_auth_ops svc_rpc_gss_ops = {
226 svc_rpc_gss_wrap,
227 svc_rpc_gss_unwrap,
228 };
229
230 /* taskq(9F) */
231 typedef struct svcrpcsec_gss_taskq_arg {
232 SVCXPRT *rq_xprt;
233 rpc_gss_init_arg *rpc_call_arg;
234 struct rpc_msg *msg;
235 svc_rpc_gss_data *client_data;
236 uint_t cr_version;
237 rpc_gss_service_t cr_service;
238 } svcrpcsec_gss_taskq_arg_t;
239
240 /* gssd is single threaded, so 1 thread for the taskq is probably good/ok */
241 int rpcsec_gss_init_taskq_nthreads = 1;
242
243 extern struct rpc_msg *rpc_msg_dup(struct rpc_msg *);
244 extern void rpc_msg_free(struct rpc_msg **, int);
245
246 /*
247 * from svc_clts.c:
248 * Transport private data.
249 * Kept in xprt->xp_p2buf.
250 */
251 struct udp_data {
252 mblk_t *ud_resp; /* buffer for response */
253 mblk_t *ud_inmp; /* mblk chain of request */
254 };
255
256 static zone_key_t svc_gss_zone_key;
257 static uint_t svc_gss_tsd_key;
258
259 typedef struct svc_gss_zsd {
260 zoneid_t sgz_zoneid;
261 kmutex_t sgz_lock;
262 taskq_t *sgz_init_taskq;
263 } svc_gss_zsd_t;
264
265 static taskq_t *
svc_gss_create_taskq(zone_t * zone)266 svc_gss_create_taskq(zone_t *zone)
267 {
268 taskq_t *tq;
269
270 if (zone == NULL) {
271 cmn_err(CE_NOTE, "%s: couldn't find zone", __func__);
272 return (NULL);
273 }
274
275 /* Like ddi_taskq_create(), but for zones, just for now */
276 tq = taskq_create_proc("rpcsec_gss_init_taskq",
277 rpcsec_gss_init_taskq_nthreads, minclsyspri,
278 rpcsec_gss_init_taskq_nthreads, INT_MAX, zone->zone_zsched,
279 TASKQ_PREPOPULATE);
280
281 if (tq == NULL)
282 cmn_err(CE_NOTE, "%s: taskq_create_proc failed", __func__);
283
284 return (tq);
285 }
286
287 static void *
svc_gss_zone_init(zoneid_t zoneid)288 svc_gss_zone_init(zoneid_t zoneid)
289 {
290 svc_gss_zsd_t *zsd;
291 zone_t *zone = curzone;
292
293 zsd = kmem_alloc(sizeof (*zsd), KM_SLEEP);
294 mutex_init(&zsd->sgz_lock, NULL, MUTEX_DEFAULT, NULL);
295 zsd->sgz_zoneid = zoneid;
296
297 if (zone->zone_id != zoneid)
298 zone = zone_find_by_id_nolock(zoneid);
299
300 zsd->sgz_init_taskq = svc_gss_create_taskq(zone);
301 return (zsd);
302 }
303
304 /*
305 * taskq_destroy() wakes all taskq threads and tells them to exit.
306 * It then cv_wait()'s for all of them to finish exiting.
307 * cv_wait() calls resume(), which accesses the target's process.
308 * That may be one of our taskq threads, which are attached to zone_zsched.
309 *
310 * If we do taskq_destroy() in the zsd_destroy callback, then zone_zsched
311 * will have exited and been destroyed before it runs, and we can panic
312 * in resume(). Our taskq threads are not accounted for in either
313 * zone_ntasks or zone_kthreads, which means zsched does not wait for
314 * taskq threads attached to it to complete before exiting.
315 *
316 * We therefore need to do this at shutdown time. At the point where
317 * the zsd_shutdown callback is invoked, all other zone tasks (processes)
318 * have exited, but zone_kthreads and other taskqs hanging off zsched have not.
319 *
320 * We need to be careful not to allow RPC services to be ran from
321 * zsched-attached taskqs or zone_kthreads.
322 */
323 static void
svc_gss_zone_shutdown(zoneid_t zoneid,void * arg)324 svc_gss_zone_shutdown(zoneid_t zoneid, void *arg)
325 {
326 svc_gss_zsd_t *zsd = arg;
327
328 /* All non-zsched-hung threads should be finished. */
329 mutex_enter(&zsd->sgz_lock);
330 if (zsd->sgz_init_taskq != NULL) {
331 taskq_destroy(zsd->sgz_init_taskq);
332 zsd->sgz_init_taskq = NULL;
333 }
334 mutex_exit(&zsd->sgz_lock);
335 }
336
337 static void
svc_gss_zone_fini(zoneid_t zoneid,void * arg)338 svc_gss_zone_fini(zoneid_t zoneid, void *arg)
339 {
340 svc_gss_zsd_t *zsd = arg;
341
342 mutex_destroy(&zsd->sgz_lock);
343 kmem_free(zsd, sizeof (*zsd));
344 }
345
346 static svc_gss_zsd_t *
svc_gss_get_zsd(void)347 svc_gss_get_zsd(void)
348 {
349 svc_gss_zsd_t *zsd;
350
351 zsd = tsd_get(svc_gss_tsd_key);
352 if (zsd == NULL) {
353 zsd = zone_getspecific(svc_gss_zone_key, curzone);
354 (void) tsd_set(svc_gss_tsd_key, zsd);
355 }
356
357 return (zsd);
358 }
359
360 /*ARGSUSED*/
361 static int
svc_gss_data_create(void * buf,void * pdata,int kmflag)362 svc_gss_data_create(void *buf, void *pdata, int kmflag)
363 {
364 svc_rpc_gss_data *client_data = (svc_rpc_gss_data *)buf;
365
366 mutex_init(&client_data->clm, NULL, MUTEX_DEFAULT, NULL);
367
368 return (0);
369 }
370
371 /*ARGSUSED*/
372 static void
svc_gss_data_destroy(void * buf,void * pdata)373 svc_gss_data_destroy(void *buf, void *pdata)
374 {
375 svc_rpc_gss_data *client_data = (svc_rpc_gss_data *)buf;
376
377 mutex_destroy(&client_data->clm);
378 }
379
380
381 /*ARGSUSED*/
382 static void
svc_gss_data_reclaim(void * pdata)383 svc_gss_data_reclaim(void *pdata)
384 {
385 mutex_enter(&ctx_mutex);
386
387 svc_rpc_gss_cache_stats.no_reclaims++;
388 sweep_clients(TRUE);
389
390 mutex_exit(&ctx_mutex);
391 }
392
393 /*
394 * Init stuff on the server side.
395 */
396 void
svc_gss_init()397 svc_gss_init()
398 {
399 mutex_init(&cb_mutex, NULL, MUTEX_DEFAULT, NULL);
400 mutex_init(&ctx_mutex, NULL, MUTEX_DEFAULT, NULL);
401 rw_init(&cred_lock, NULL, RW_DEFAULT, NULL);
402 clients = (svc_rpc_gss_data **)
403 kmem_zalloc(svc_rpc_gss_hashmod * sizeof (svc_rpc_gss_data *),
404 KM_SLEEP);
405 svc_data_handle = kmem_cache_create("rpc_gss_data_cache",
406 sizeof (svc_rpc_gss_data), 0,
407 svc_gss_data_create,
408 svc_gss_data_destroy,
409 svc_gss_data_reclaim,
410 NULL, NULL, 0);
411
412 tsd_create(&svc_gss_tsd_key, NULL);
413 zone_key_create(&svc_gss_zone_key, svc_gss_zone_init,
414 svc_gss_zone_shutdown, svc_gss_zone_fini);
415 }
416
417 /*
418 * Destroy structures allocated in svc_gss_init().
419 * This routine is called by _init() if mod_install() failed.
420 */
421 void
svc_gss_fini()422 svc_gss_fini()
423 {
424 if (zone_key_delete(svc_gss_zone_key) != 0)
425 cmn_err(CE_WARN, "%s: failed to delete zone key", __func__);
426 tsd_destroy(&svc_gss_tsd_key);
427 mutex_destroy(&cb_mutex);
428 mutex_destroy(&ctx_mutex);
429 rw_destroy(&cred_lock);
430 kmem_free(clients, svc_rpc_gss_hashmod * sizeof (svc_rpc_gss_data *));
431 kmem_cache_destroy(svc_data_handle);
432 }
433
434 /*
435 * Cleanup routine for destroying context, called after service
436 * procedure is executed. Actually we just decrement the reference count
437 * associated with this context. If the reference count is zero and the
438 * context is marked as stale, we would then destroy the context. Additionally,
439 * we check if its been longer than sweep_interval since the last sweep_clients
440 * was run, and if so run sweep_clients to free all stale contexts with zero
441 * reference counts or contexts that are old. (Haven't been access in
442 * svc_rpc_inactive_delta seconds).
443 */
444 void
rpc_gss_cleanup(SVCXPRT * clone_xprt)445 rpc_gss_cleanup(SVCXPRT *clone_xprt)
446 {
447 svc_rpc_gss_data *cl;
448 SVCAUTH *svcauth;
449
450 /*
451 * First check if current context needs to be cleaned up.
452 * There might be other threads stale this client data
453 * in between.
454 */
455 svcauth = &clone_xprt->xp_auth;
456 mutex_enter(&ctx_mutex);
457 if ((cl = (svc_rpc_gss_data *)svcauth->svc_ah_private) != NULL) {
458 mutex_enter(&cl->clm);
459 ASSERT(cl->ref_cnt > 0);
460 if (--cl->ref_cnt == 0 && cl->stale) {
461 mutex_exit(&cl->clm);
462 destroy_client(cl);
463 svcauth->svc_ah_private = NULL;
464 } else
465 mutex_exit(&cl->clm);
466 }
467
468 /*
469 * Check for other expired contexts.
470 */
471 if ((gethrestime_sec() - last_swept) > sweep_interval)
472 sweep_clients(FALSE);
473
474 mutex_exit(&ctx_mutex);
475 }
476
477 /*
478 * Shift the array arr of length arrlen right by nbits bits.
479 */
480 static void
shift_bits(uint_t * arr,int arrlen,int nbits)481 shift_bits(uint_t *arr, int arrlen, int nbits)
482 {
483 int i, j;
484 uint_t lo, hi;
485
486 /*
487 * If the number of bits to be shifted exceeds SEQ_WIN, just
488 * zero out the array.
489 */
490 if (nbits < SEQ_WIN) {
491 for (i = 0; i < nbits; i++) {
492 hi = 0;
493 for (j = 0; j < arrlen; j++) {
494 lo = arr[j] & SEQ_LO_BIT;
495 arr[j] >>= 1;
496 if (hi)
497 arr[j] |= SEQ_HI_BIT;
498 hi = lo;
499 }
500 }
501 } else {
502 for (j = 0; j < arrlen; j++)
503 arr[j] = 0;
504 }
505 }
506
507 /*
508 * Check that the received sequence number seq_num is valid.
509 */
510 static bool_t
check_seq(svc_rpc_gss_data * cl,uint_t seq_num,bool_t * kill_context)511 check_seq(svc_rpc_gss_data *cl, uint_t seq_num, bool_t *kill_context)
512 {
513 int i, j;
514 uint_t bit;
515
516 /*
517 * If it exceeds the maximum, kill context.
518 */
519 if (seq_num >= SEQ_MAX) {
520 *kill_context = TRUE;
521 RPCGSS_LOG0(4, "check_seq: seq_num not valid\n");
522 return (FALSE);
523 }
524
525 /*
526 * If greater than the last seen sequence number, just shift
527 * the sequence window so that it starts at the new sequence
528 * number and extends downwards by SEQ_WIN.
529 */
530 if (seq_num > cl->seq_num) {
531 (void) shift_bits(cl->seq_bits, SEQ_ARR_SIZE,
532 (int)(seq_num - cl->seq_num));
533 cl->seq_bits[0] |= SEQ_HI_BIT;
534 cl->seq_num = seq_num;
535 return (TRUE);
536 }
537
538 /*
539 * If it is outside the sequence window, return failure.
540 */
541 i = cl->seq_num - seq_num;
542 if (i >= SEQ_WIN) {
543 RPCGSS_LOG0(4, "check_seq: seq_num is outside the window\n");
544 return (FALSE);
545 }
546
547 /*
548 * If within sequence window, set the bit corresponding to it
549 * if not already seen; if already seen, return failure.
550 */
551 j = SEQ_MASK - (i & SEQ_MASK);
552 bit = j > 0 ? (1 << j) : 1;
553 i >>= DIV_BY_32;
554 if (cl->seq_bits[i] & bit) {
555 RPCGSS_LOG0(4, "check_seq: sequence number already seen\n");
556 return (FALSE);
557 }
558 cl->seq_bits[i] |= bit;
559 return (TRUE);
560 }
561
562 /*
563 * Set server callback.
564 */
565 bool_t
rpc_gss_set_callback(rpc_gss_callback_t * cb)566 rpc_gss_set_callback(rpc_gss_callback_t *cb)
567 {
568 rpc_gss_cblist_t *cbl, *tmp;
569
570 if (cb->callback == NULL) {
571 RPCGSS_LOG0(1, "rpc_gss_set_callback: no callback to set\n");
572 return (FALSE);
573 }
574
575 /* check if there is already an entry in the rpc_gss_cblist. */
576 mutex_enter(&cb_mutex);
577 if (rpc_gss_cblist) {
578 for (tmp = rpc_gss_cblist; tmp != NULL; tmp = tmp->next) {
579 if ((tmp->cb.callback == cb->callback) &&
580 (tmp->cb.version == cb->version) &&
581 (tmp->cb.program == cb->program)) {
582 mutex_exit(&cb_mutex);
583 return (TRUE);
584 }
585 }
586 }
587
588 /* Not in rpc_gss_cblist. Create a new entry. */
589 if ((cbl = (rpc_gss_cblist_t *)kmem_alloc(sizeof (*cbl), KM_SLEEP))
590 == NULL) {
591 mutex_exit(&cb_mutex);
592 return (FALSE);
593 }
594 cbl->cb = *cb;
595 cbl->next = rpc_gss_cblist;
596 rpc_gss_cblist = cbl;
597 mutex_exit(&cb_mutex);
598 return (TRUE);
599 }
600
601 /*
602 * Locate callback (if specified) and call server. Release any
603 * delegated credentials unless passed to server and the server
604 * accepts the context. If a callback is not specified, accept
605 * the incoming context.
606 */
607 static bool_t
do_callback(struct svc_req * req,svc_rpc_gss_data * client_data)608 do_callback(struct svc_req *req, svc_rpc_gss_data *client_data)
609 {
610 rpc_gss_cblist_t *cbl;
611 bool_t ret = TRUE, found = FALSE;
612 rpc_gss_lock_t lock;
613 OM_uint32 minor;
614 mutex_enter(&cb_mutex);
615 for (cbl = rpc_gss_cblist; cbl != NULL; cbl = cbl->next) {
616 if (req->rq_prog != cbl->cb.program ||
617 req->rq_vers != cbl->cb.version)
618 continue;
619 found = TRUE;
620 lock.locked = FALSE;
621 lock.raw_cred = &client_data->raw_cred;
622 ret = (*cbl->cb.callback)(req, client_data->deleg,
623 client_data->context, &lock, &client_data->cookie);
624 req->rq_xprt->xp_cookie = client_data->cookie;
625
626 if (ret) {
627 client_data->locked = lock.locked;
628 client_data->deleg = GSS_C_NO_CREDENTIAL;
629 }
630 break;
631 }
632 if (!found) {
633 if (client_data->deleg != GSS_C_NO_CREDENTIAL) {
634 (void) kgss_release_cred(&minor, &client_data->deleg,
635 crgetuid(CRED()));
636 client_data->deleg = GSS_C_NO_CREDENTIAL;
637 }
638 }
639 mutex_exit(&cb_mutex);
640 return (ret);
641 }
642
643 /*
644 * Get caller credentials.
645 */
646 bool_t
rpc_gss_getcred(struct svc_req * req,rpc_gss_rawcred_t ** rcred,rpc_gss_ucred_t ** ucred,void ** cookie)647 rpc_gss_getcred(struct svc_req *req, rpc_gss_rawcred_t **rcred,
648 rpc_gss_ucred_t **ucred, void **cookie)
649 {
650 SVCAUTH *svcauth;
651 svc_rpc_gss_data *client_data;
652 int gssstat, gidlen;
653
654 svcauth = &req->rq_xprt->xp_auth;
655 client_data = (svc_rpc_gss_data *)svcauth->svc_ah_private;
656
657 mutex_enter(&client_data->clm);
658
659 if (rcred != NULL) {
660 svcauth->raw_cred = client_data->raw_cred;
661 *rcred = &svcauth->raw_cred;
662 }
663 if (ucred != NULL) {
664 *ucred = &client_data->u_cred;
665
666 if (client_data->u_cred_set == 0 ||
667 client_data->u_cred_set < gethrestime_sec()) {
668 if (client_data->u_cred_set == 0) {
669 if ((gssstat = kgsscred_expname_to_unix_cred(
670 &client_data->client_name,
671 &client_data->u_cred.uid,
672 &client_data->u_cred.gid,
673 &client_data->u_cred.gidlist,
674 &gidlen, crgetuid(CRED())))
675 != GSS_S_COMPLETE) {
676 RPCGSS_LOG(1, "rpc_gss_getcred: "
677 "kgsscred_expname_to_unix_cred "
678 "failed %x\n", gssstat);
679 *ucred = NULL;
680 } else {
681 client_data->u_cred.gidlen =
682 (short)gidlen;
683 client_data->u_cred_set =
684 gethrestime_sec() +
685 svc_rpcgss_gid_timeout;
686 }
687 } else if (client_data->u_cred_set
688 < gethrestime_sec()) {
689 if ((gssstat = kgss_get_group_info(
690 client_data->u_cred.uid,
691 &client_data->u_cred.gid,
692 &client_data->u_cred.gidlist,
693 &gidlen, crgetuid(CRED())))
694 != GSS_S_COMPLETE) {
695 RPCGSS_LOG(1, "rpc_gss_getcred: "
696 "kgss_get_group_info failed %x\n",
697 gssstat);
698 *ucred = NULL;
699 } else {
700 client_data->u_cred.gidlen =
701 (short)gidlen;
702 client_data->u_cred_set =
703 gethrestime_sec() +
704 svc_rpcgss_gid_timeout;
705 }
706 }
707 }
708 }
709
710 if (cookie != NULL)
711 *cookie = client_data->cookie;
712 req->rq_xprt->xp_cookie = client_data->cookie;
713
714 mutex_exit(&client_data->clm);
715
716 return (TRUE);
717 }
718
719 /*
720 * Transfer the context data from the user land to the kernel.
721 */
transfer_sec_context(svc_rpc_gss_data * client_data)722 bool_t transfer_sec_context(svc_rpc_gss_data *client_data) {
723
724 gss_buffer_desc process_token;
725 OM_uint32 gssstat, minor;
726
727 /*
728 * Call kgss_export_sec_context
729 * if an error is returned log a message
730 * go to error handling
731 * Otherwise call kgss_import_sec_context to
732 * convert the token into a context
733 */
734 gssstat = kgss_export_sec_context(&minor, client_data->context,
735 &process_token);
736 /*
737 * if export_sec_context returns an error we delete the
738 * context just to be safe.
739 */
740 if (gssstat == GSS_S_NAME_NOT_MN) {
741 RPCGSS_LOG0(4, "svc_rpcsec_gss: export sec context "
742 "Kernel mod unavailable\n");
743
744 } else if (gssstat != GSS_S_COMPLETE) {
745 RPCGSS_LOG(1, "svc_rpcsec_gss: export sec context failed "
746 " gssstat = 0x%x\n", gssstat);
747 (void) gss_release_buffer(&minor, &process_token);
748 (void) kgss_delete_sec_context(&minor, &client_data->context,
749 NULL);
750 return (FALSE);
751
752 } else if (process_token.length == 0) {
753 RPCGSS_LOG0(1, "svc_rpcsec_gss:zero length token in response "
754 "for export_sec_context, but "
755 "gsstat == GSS_S_COMPLETE\n");
756 (void) kgss_delete_sec_context(&minor, &client_data->context,
757 NULL);
758 return (FALSE);
759
760 } else {
761 gssstat = kgss_import_sec_context(&minor, &process_token,
762 client_data->context);
763 if (gssstat != GSS_S_COMPLETE) {
764 RPCGSS_LOG(1, "svc_rpcsec_gss: import sec context "
765 " failed gssstat = 0x%x\n", gssstat);
766 (void) kgss_delete_sec_context(&minor,
767 &client_data->context, NULL);
768 (void) gss_release_buffer(&minor, &process_token);
769 return (FALSE);
770 }
771
772 RPCGSS_LOG0(4, "gss_import_sec_context successful\n");
773 (void) gss_release_buffer(&minor, &process_token);
774 }
775
776 return (TRUE);
777 }
778
779 /*
780 * do_gss_accept is called from a taskq and does all the work for a
781 * RPCSEC_GSS_INIT call (mostly calling kgss_accept_sec_context()).
782 */
783 static enum auth_stat
do_gss_accept(SVCXPRT * xprt,rpc_gss_init_arg * call_arg,struct rpc_msg * msg,svc_rpc_gss_data * client_data,uint_t cr_version,rpc_gss_service_t cr_service)784 do_gss_accept(
785 SVCXPRT *xprt,
786 rpc_gss_init_arg *call_arg,
787 struct rpc_msg *msg,
788 svc_rpc_gss_data *client_data,
789 uint_t cr_version,
790 rpc_gss_service_t cr_service)
791 {
792 rpc_gss_init_res call_res;
793 gss_buffer_desc output_token;
794 OM_uint32 gssstat, minor, minor_stat, time_rec;
795 int ret_flags, ret;
796 gss_OID mech_type = GSS_C_NULL_OID;
797 int free_mech_type = 1;
798 struct svc_req r, *rqst;
799
800 rqst = &r;
801 rqst->rq_xprt = xprt;
802
803 /*
804 * Initialize output_token.
805 */
806 output_token.length = 0;
807 output_token.value = NULL;
808
809 bzero((char *)&call_res, sizeof (call_res));
810
811 mutex_enter(&client_data->clm);
812 if (client_data->stale) {
813 ret = RPCSEC_GSS_NOCRED;
814 RPCGSS_LOG0(1, "_svcrpcsec_gss: client data stale\n");
815 goto error2;
816 }
817
818 /*
819 * Any response we send will use ctx_handle, so set it now;
820 * also set seq_window since this won't change.
821 */
822 call_res.ctx_handle.length = sizeof (client_data->key);
823 call_res.ctx_handle.value = (char *)&client_data->key;
824 call_res.seq_window = SEQ_WIN;
825
826 gssstat = GSS_S_FAILURE;
827 minor = 0;
828 minor_stat = 0;
829 rw_enter(&cred_lock, RW_READER);
830
831 if (client_data->client_name.length) {
832 (void) gss_release_buffer(&minor,
833 &client_data->client_name);
834 }
835 gssstat = kgss_accept_sec_context(&minor_stat,
836 &client_data->context,
837 GSS_C_NO_CREDENTIAL,
838 call_arg,
839 GSS_C_NO_CHANNEL_BINDINGS,
840 &client_data->client_name,
841 &mech_type,
842 &output_token,
843 &ret_flags,
844 &time_rec,
845 NULL, /* don't need a delegated cred back */
846 crgetuid(CRED()));
847
848 RPCGSS_LOG(4, "gssstat 0x%x \n", gssstat);
849
850 if (gssstat == GSS_S_COMPLETE) {
851 /*
852 * Set the raw and unix credentials at this
853 * point. This saves a lot of computation
854 * later when credentials are retrieved.
855 */
856 client_data->raw_cred.version = cr_version;
857 client_data->raw_cred.service = cr_service;
858
859 if (client_data->raw_cred.mechanism) {
860 kgss_free_oid(client_data->raw_cred.mechanism);
861 client_data->raw_cred.mechanism = NULL;
862 }
863 client_data->raw_cred.mechanism = (rpc_gss_OID) mech_type;
864 /*
865 * client_data is now responsible for freeing
866 * the data of 'mech_type'.
867 */
868 free_mech_type = 0;
869
870 if (client_data->raw_cred.client_principal) {
871 kmem_free((caddr_t)client_data->\
872 raw_cred.client_principal,
873 client_data->raw_cred.\
874 client_principal->len + sizeof (int));
875 client_data->raw_cred.client_principal = NULL;
876 }
877
878 /*
879 * The client_name returned from
880 * kgss_accept_sec_context() is in an
881 * exported flat format.
882 */
883 if (! __rpc_gss_make_principal(
884 &client_data->raw_cred.client_principal,
885 &client_data->client_name)) {
886 RPCGSS_LOG0(1, "_svcrpcsec_gss: "
887 "make principal failed\n");
888 gssstat = GSS_S_FAILURE;
889 (void) gss_release_buffer(&minor_stat, &output_token);
890 }
891 }
892
893 rw_exit(&cred_lock);
894
895 call_res.gss_major = gssstat;
896 call_res.gss_minor = minor_stat;
897
898 if (gssstat != GSS_S_COMPLETE &&
899 gssstat != GSS_S_CONTINUE_NEEDED) {
900 call_res.ctx_handle.length = 0;
901 call_res.ctx_handle.value = NULL;
902 call_res.seq_window = 0;
903 rpc_gss_display_status(gssstat, minor_stat, mech_type,
904 crgetuid(CRED()),
905 "_svc_rpcsec_gss gss_accept_sec_context");
906 (void) svc_sendreply(rqst->rq_xprt,
907 __xdr_rpc_gss_init_res, (caddr_t)&call_res);
908 client_data->stale = TRUE;
909 ret = AUTH_OK;
910 goto error2;
911 }
912
913 /*
914 * If appropriate, set established to TRUE *after* sending
915 * response (otherwise, the client will receive the final
916 * token encrypted)
917 */
918 if (gssstat == GSS_S_COMPLETE) {
919 /*
920 * Context is established. Set expiration time
921 * for the context.
922 */
923 client_data->seq_num = 1;
924 if ((time_rec == GSS_C_INDEFINITE) || (time_rec == 0)) {
925 client_data->expiration = GSS_C_INDEFINITE;
926 } else {
927 client_data->expiration =
928 time_rec + gethrestime_sec();
929 }
930
931 if (!transfer_sec_context(client_data)) {
932 ret = RPCSEC_GSS_FAILED;
933 client_data->stale = TRUE;
934 RPCGSS_LOG0(1,
935 "_svc_rpcsec_gss: transfer sec context failed\n");
936 goto error2;
937 }
938
939 client_data->established = TRUE;
940 }
941
942 /*
943 * This step succeeded. Send a response, along with
944 * a token if there's one. Don't dispatch.
945 */
946
947 if (output_token.length != 0)
948 GSS_COPY_BUFFER(call_res.token, output_token);
949
950 /*
951 * If GSS_S_COMPLETE: set response verifier to
952 * checksum of SEQ_WIN
953 */
954 if (gssstat == GSS_S_COMPLETE) {
955 if (!set_response_verf(rqst, msg, client_data,
956 (uint_t)SEQ_WIN)) {
957 ret = RPCSEC_GSS_FAILED;
958 client_data->stale = TRUE;
959 RPCGSS_LOG0(1,
960 "_svc_rpcsec_gss:set response verifier failed\n");
961 goto error2;
962 }
963 }
964
965 if (!svc_sendreply(rqst->rq_xprt, __xdr_rpc_gss_init_res,
966 (caddr_t)&call_res)) {
967 ret = RPCSEC_GSS_FAILED;
968 client_data->stale = TRUE;
969 RPCGSS_LOG0(1, "_svc_rpcsec_gss:send reply failed\n");
970 goto error2;
971 }
972
973 /*
974 * Cache last response in case it is lost and the client
975 * retries on an established context.
976 */
977 (void) retrans_add(client_data, msg->rm_xid, &call_res);
978 ASSERT(client_data->ref_cnt > 0);
979 client_data->ref_cnt--;
980 mutex_exit(&client_data->clm);
981
982 (void) gss_release_buffer(&minor_stat, &output_token);
983
984 return (AUTH_OK);
985
986 error2:
987 ASSERT(client_data->ref_cnt > 0);
988 client_data->ref_cnt--;
989 mutex_exit(&client_data->clm);
990 (void) gss_release_buffer(&minor_stat, &output_token);
991 if (free_mech_type && mech_type)
992 kgss_free_oid(mech_type);
993
994 return (ret);
995 }
996
997 static void
svcrpcsec_gss_taskq_func(void * svcrpcsecgss_taskq_arg)998 svcrpcsec_gss_taskq_func(void *svcrpcsecgss_taskq_arg)
999 {
1000 enum auth_stat retval;
1001 svcrpcsec_gss_taskq_arg_t *arg = svcrpcsecgss_taskq_arg;
1002
1003 retval = do_gss_accept(arg->rq_xprt, arg->rpc_call_arg, arg->msg,
1004 arg->client_data, arg->cr_version, arg->cr_service);
1005 if (retval != AUTH_OK) {
1006 cmn_err(CE_NOTE,
1007 "svcrpcsec_gss_taskq_func: do_gss_accept fail 0x%x",
1008 retval);
1009 }
1010 rpc_msg_free(&arg->msg, MAX_AUTH_BYTES);
1011 SVC_RELE(arg->rq_xprt, NULL, FALSE);
1012 svc_clone_unlink(arg->rq_xprt);
1013 svc_clone_free(arg->rq_xprt);
1014 xdr_free(__xdr_rpc_gss_init_arg, (caddr_t)arg->rpc_call_arg);
1015 kmem_free(arg->rpc_call_arg, sizeof (*arg->rpc_call_arg));
1016
1017 kmem_free(arg, sizeof (*arg));
1018 }
1019
1020 static enum auth_stat
rpcsec_gss_init(struct svc_req * rqst,struct rpc_msg * msg,rpc_gss_creds creds,bool_t * no_dispatch,svc_rpc_gss_data * c_d)1021 rpcsec_gss_init(
1022 struct svc_req *rqst,
1023 struct rpc_msg *msg,
1024 rpc_gss_creds creds,
1025 bool_t *no_dispatch,
1026 svc_rpc_gss_data *c_d) /* client data, can be NULL */
1027 {
1028 svc_rpc_gss_data *client_data;
1029 int ret;
1030 svcrpcsec_gss_taskq_arg_t *arg;
1031 svc_gss_zsd_t *zsd = svc_gss_get_zsd();
1032 taskq_t *tq = zsd->sgz_init_taskq;
1033
1034 if (tq == NULL) {
1035 mutex_enter(&zsd->sgz_lock);
1036 if (zsd->sgz_init_taskq == NULL)
1037 zsd->sgz_init_taskq = svc_gss_create_taskq(curzone);
1038 tq = zsd->sgz_init_taskq;
1039 mutex_exit(&zsd->sgz_lock);
1040 if (tq == NULL) {
1041 cmn_err(CE_NOTE, "%s: no taskq available", __func__);
1042 return (RPCSEC_GSS_FAILED);
1043 }
1044 }
1045
1046 if (creds.ctx_handle.length != 0) {
1047 RPCGSS_LOG0(1, "_svcrpcsec_gss: ctx_handle not null\n");
1048 ret = AUTH_BADCRED;
1049 return (ret);
1050 }
1051
1052 client_data = c_d ? c_d : create_client();
1053 if (client_data == NULL) {
1054 RPCGSS_LOG0(1,
1055 "_svcrpcsec_gss: can't create a new cache entry\n");
1056 ret = AUTH_FAILED;
1057 return (ret);
1058 }
1059
1060 mutex_enter(&client_data->clm);
1061 if (client_data->stale) {
1062 ret = RPCSEC_GSS_NOCRED;
1063 RPCGSS_LOG0(1, "_svcrpcsec_gss: client data stale\n");
1064 goto error2;
1065 }
1066
1067 /*
1068 * kgss_accept_sec_context()/gssd(8) can be overly time
1069 * consuming so let's queue it and return asap.
1070 *
1071 * taskq func must free arg.
1072 */
1073 arg = kmem_alloc(sizeof (*arg), KM_SLEEP);
1074
1075 /* taskq func must free rpc_call_arg & deserialized arguments */
1076 arg->rpc_call_arg = kmem_zalloc(sizeof (*arg->rpc_call_arg), KM_SLEEP);
1077
1078 /* deserialize arguments */
1079 if (!SVC_GETARGS(rqst->rq_xprt, __xdr_rpc_gss_init_arg,
1080 (caddr_t)arg->rpc_call_arg)) {
1081 ret = RPCSEC_GSS_FAILED;
1082 client_data->stale = TRUE;
1083 goto error2;
1084 }
1085
1086 /* get a xprt clone for taskq thread, taskq func must free it */
1087 arg->rq_xprt = svc_clone_init();
1088 svc_clone_link(rqst->rq_xprt->xp_master, arg->rq_xprt, rqst->rq_xprt);
1089 arg->rq_xprt->xp_xid = rqst->rq_xprt->xp_xid;
1090
1091 /*
1092 * Increment the reference count on the rpcmod slot so that is not
1093 * freed before the task has finished.
1094 */
1095 SVC_HOLD(arg->rq_xprt);
1096
1097 /* set the appropriate wrap/unwrap routine for RPCSEC_GSS */
1098 arg->rq_xprt->xp_auth.svc_ah_ops = svc_rpc_gss_ops;
1099 arg->rq_xprt->xp_auth.svc_ah_private = (caddr_t)client_data;
1100
1101 /* get a dup of rpc msg for taskq thread */
1102 arg->msg = rpc_msg_dup(msg); /* taskq func must free msg dup */
1103
1104 arg->client_data = client_data;
1105 arg->cr_version = creds.version;
1106 arg->cr_service = creds.service;
1107
1108 /* should be ok to hold clm lock as taskq will have new thread(s) */
1109 if (taskq_dispatch(tq, svcrpcsec_gss_taskq_func, arg, TQ_SLEEP)
1110 == DDI_FAILURE) {
1111 cmn_err(CE_NOTE, "%s: taskq dispatch fail", __func__);
1112 ret = RPCSEC_GSS_FAILED;
1113 rpc_msg_free(&arg->msg, MAX_AUTH_BYTES);
1114 SVC_RELE(arg->rq_xprt, NULL, FALSE);
1115 svc_clone_unlink(arg->rq_xprt);
1116 svc_clone_free(arg->rq_xprt);
1117 kmem_free(arg, sizeof (*arg));
1118 goto error2;
1119 }
1120
1121 mutex_exit(&client_data->clm);
1122 *no_dispatch = TRUE;
1123 return (AUTH_OK);
1124
1125 error2:
1126 ASSERT(client_data->ref_cnt > 0);
1127 client_data->ref_cnt--;
1128 mutex_exit(&client_data->clm);
1129 cmn_err(CE_NOTE, "rpcsec_gss_init: error 0x%x", ret);
1130 return (ret);
1131 }
1132
1133 static enum auth_stat
rpcsec_gss_continue_init(struct svc_req * rqst,struct rpc_msg * msg,rpc_gss_creds creds,bool_t * no_dispatch)1134 rpcsec_gss_continue_init(
1135 struct svc_req *rqst,
1136 struct rpc_msg *msg,
1137 rpc_gss_creds creds,
1138 bool_t *no_dispatch)
1139 {
1140 int ret;
1141 svc_rpc_gss_data *client_data;
1142 svc_rpc_gss_parms_t *gss_parms;
1143 rpc_gss_init_res *retrans_result;
1144
1145 if (creds.ctx_handle.length == 0) {
1146 RPCGSS_LOG0(1, "_svcrpcsec_gss: no ctx_handle\n");
1147 ret = AUTH_BADCRED;
1148 return (ret);
1149 }
1150 if ((client_data = get_client(&creds.ctx_handle)) == NULL) {
1151 ret = RPCSEC_GSS_NOCRED;
1152 RPCGSS_LOG0(1, "_svcrpcsec_gss: no security context\n");
1153 return (ret);
1154 }
1155
1156 mutex_enter(&client_data->clm);
1157 if (client_data->stale) {
1158 ret = RPCSEC_GSS_NOCRED;
1159 RPCGSS_LOG0(1, "_svcrpcsec_gss: client data stale\n");
1160 goto error2;
1161 }
1162
1163 /*
1164 * If context not established, go thru INIT code but with
1165 * this client handle.
1166 */
1167 if (!client_data->established) {
1168 mutex_exit(&client_data->clm);
1169 return (rpcsec_gss_init(rqst, msg, creds, no_dispatch,
1170 client_data));
1171 }
1172
1173 /*
1174 * Set the appropriate wrap/unwrap routine for RPCSEC_GSS.
1175 */
1176 rqst->rq_xprt->xp_auth.svc_ah_ops = svc_rpc_gss_ops;
1177 rqst->rq_xprt->xp_auth.svc_ah_private = (caddr_t)client_data;
1178
1179 /*
1180 * Keep copy of parameters we'll need for response, for the
1181 * sake of reentrancy (we don't want to look in the context
1182 * data because when we are sending a response, another
1183 * request may have come in).
1184 */
1185 gss_parms = &rqst->rq_xprt->xp_auth.svc_gss_parms;
1186 gss_parms->established = client_data->established;
1187 gss_parms->service = creds.service;
1188 gss_parms->qop_rcvd = (uint_t)client_data->qop;
1189 gss_parms->context = (void *)client_data->context;
1190 gss_parms->seq_num = creds.seq_num;
1191
1192 /*
1193 * This is an established context. Continue to
1194 * satisfy retried continue init requests out of
1195 * the retransmit cache. Throw away any that don't
1196 * have a matching xid or the cach is empty.
1197 * Delete the retransmit cache once the client sends
1198 * a data request.
1199 */
1200 if (client_data->retrans_data &&
1201 (client_data->retrans_data->xid == msg->rm_xid)) {
1202 retrans_result = &client_data->retrans_data->result;
1203 if (set_response_verf(rqst, msg, client_data,
1204 (uint_t)retrans_result->seq_window)) {
1205 gss_parms->established = FALSE;
1206 (void) svc_sendreply(rqst->rq_xprt,
1207 __xdr_rpc_gss_init_res, (caddr_t)retrans_result);
1208 *no_dispatch = TRUE;
1209 ASSERT(client_data->ref_cnt > 0);
1210 client_data->ref_cnt--;
1211 }
1212 }
1213 mutex_exit(&client_data->clm);
1214
1215 return (AUTH_OK);
1216
1217 error2:
1218 ASSERT(client_data->ref_cnt > 0);
1219 client_data->ref_cnt--;
1220 mutex_exit(&client_data->clm);
1221 return (ret);
1222 }
1223
1224 static enum auth_stat
rpcsec_gss_data(struct svc_req * rqst,struct rpc_msg * msg,rpc_gss_creds creds,bool_t * no_dispatch)1225 rpcsec_gss_data(
1226 struct svc_req *rqst,
1227 struct rpc_msg *msg,
1228 rpc_gss_creds creds,
1229 bool_t *no_dispatch)
1230 {
1231 int ret;
1232 svc_rpc_gss_parms_t *gss_parms;
1233 svc_rpc_gss_data *client_data;
1234
1235 switch (creds.service) {
1236 case rpc_gss_svc_none:
1237 case rpc_gss_svc_integrity:
1238 case rpc_gss_svc_privacy:
1239 break;
1240 default:
1241 cmn_err(CE_NOTE, "__svcrpcsec_gss: unknown service type=0x%x",
1242 creds.service);
1243 RPCGSS_LOG(1, "_svcrpcsec_gss: unknown service type: 0x%x\n",
1244 creds.service);
1245 ret = AUTH_BADCRED;
1246 return (ret);
1247 }
1248
1249 if (creds.ctx_handle.length == 0) {
1250 RPCGSS_LOG0(1, "_svcrpcsec_gss: no ctx_handle\n");
1251 ret = AUTH_BADCRED;
1252 return (ret);
1253 }
1254 if ((client_data = get_client(&creds.ctx_handle)) == NULL) {
1255 ret = RPCSEC_GSS_NOCRED;
1256 RPCGSS_LOG0(1, "_svcrpcsec_gss: no security context\n");
1257 return (ret);
1258 }
1259
1260
1261 mutex_enter(&client_data->clm);
1262 if (!client_data->established) {
1263 ret = AUTH_FAILED;
1264 goto error2;
1265 }
1266 if (client_data->stale) {
1267 ret = RPCSEC_GSS_NOCRED;
1268 RPCGSS_LOG0(1, "_svcrpcsec_gss: client data stale\n");
1269 goto error2;
1270 }
1271
1272 /*
1273 * Once the context is established and there is no more
1274 * retransmission of last continue init request, it is safe
1275 * to delete the retransmit cache entry.
1276 */
1277 if (client_data->retrans_data)
1278 retrans_del(client_data);
1279
1280 /*
1281 * Set the appropriate wrap/unwrap routine for RPCSEC_GSS.
1282 */
1283 rqst->rq_xprt->xp_auth.svc_ah_ops = svc_rpc_gss_ops;
1284 rqst->rq_xprt->xp_auth.svc_ah_private = (caddr_t)client_data;
1285
1286 /*
1287 * Keep copy of parameters we'll need for response, for the
1288 * sake of reentrancy (we don't want to look in the context
1289 * data because when we are sending a response, another
1290 * request may have come in).
1291 */
1292 gss_parms = &rqst->rq_xprt->xp_auth.svc_gss_parms;
1293 gss_parms->established = client_data->established;
1294 gss_parms->service = creds.service;
1295 gss_parms->qop_rcvd = (uint_t)client_data->qop;
1296 gss_parms->context = (void *)client_data->context;
1297 gss_parms->seq_num = creds.seq_num;
1298
1299 /*
1300 * Context is already established. Check verifier, and
1301 * note parameters we will need for response in gss_parms.
1302 */
1303 if (!check_verf(msg, client_data->context,
1304 (int *)&gss_parms->qop_rcvd, client_data->u_cred.uid)) {
1305 ret = RPCSEC_GSS_NOCRED;
1306 RPCGSS_LOG0(1, "_svcrpcsec_gss: check verf failed\n");
1307 goto error2;
1308 }
1309
1310 /*
1311 * Check and invoke callback if necessary.
1312 */
1313 if (!client_data->done_docallback) {
1314 client_data->done_docallback = TRUE;
1315 client_data->qop = gss_parms->qop_rcvd;
1316 client_data->raw_cred.qop = gss_parms->qop_rcvd;
1317 client_data->raw_cred.service = creds.service;
1318 if (!do_callback(rqst, client_data)) {
1319 ret = AUTH_FAILED;
1320 RPCGSS_LOG0(1, "_svc_rpcsec_gss:callback failed\n");
1321 goto error2;
1322 }
1323 }
1324
1325 /*
1326 * If the context was locked, make sure that the client
1327 * has not changed QOP.
1328 */
1329 if (client_data->locked && gss_parms->qop_rcvd != client_data->qop) {
1330 ret = AUTH_BADVERF;
1331 RPCGSS_LOG0(1, "_svcrpcsec_gss: can not change qop\n");
1332 goto error2;
1333 }
1334
1335 /*
1336 * Validate sequence number.
1337 */
1338 if (!check_seq(client_data, creds.seq_num, &client_data->stale)) {
1339 if (client_data->stale) {
1340 ret = RPCSEC_GSS_FAILED;
1341 RPCGSS_LOG0(1,
1342 "_svc_rpcsec_gss:check seq failed\n");
1343 } else {
1344 RPCGSS_LOG0(4, "_svc_rpcsec_gss:check seq "
1345 "failed on good context. Ignoring "
1346 "request\n");
1347 /*
1348 * Operational error, drop packet silently.
1349 * The client will recover after timing out,
1350 * assuming this is a client error and not
1351 * a relpay attack. Don't dispatch.
1352 */
1353 ret = AUTH_OK;
1354 *no_dispatch = TRUE;
1355 }
1356 goto error2;
1357 }
1358
1359 /*
1360 * set response verifier
1361 */
1362 if (!set_response_verf(rqst, msg, client_data, creds.seq_num)) {
1363 ret = RPCSEC_GSS_FAILED;
1364 client_data->stale = TRUE;
1365 RPCGSS_LOG0(1,
1366 "_svc_rpcsec_gss:set response verifier failed\n");
1367 goto error2;
1368 }
1369
1370 /*
1371 * If context is locked, make sure that the client
1372 * has not changed the security service.
1373 */
1374 if (client_data->locked &&
1375 client_data->raw_cred.service != creds.service) {
1376 RPCGSS_LOG0(1, "_svc_rpcsec_gss: "
1377 "security service changed.\n");
1378 ret = AUTH_FAILED;
1379 goto error2;
1380 }
1381
1382 /*
1383 * Set client credentials to raw credential
1384 * structure in context. This is okay, since
1385 * this will not change during the lifetime of
1386 * the context (so it's MT safe).
1387 */
1388 rqst->rq_clntcred = (char *)&client_data->raw_cred;
1389
1390 mutex_exit(&client_data->clm);
1391 return (AUTH_OK);
1392
1393 error2:
1394 ASSERT(client_data->ref_cnt > 0);
1395 client_data->ref_cnt--;
1396 mutex_exit(&client_data->clm);
1397 return (ret);
1398 }
1399
1400 /*
1401 * Note we don't have a client yet to use this routine and test it.
1402 */
1403 static enum auth_stat
rpcsec_gss_destroy(struct svc_req * rqst,rpc_gss_creds creds,bool_t * no_dispatch)1404 rpcsec_gss_destroy(
1405 struct svc_req *rqst,
1406 rpc_gss_creds creds,
1407 bool_t *no_dispatch)
1408 {
1409 svc_rpc_gss_data *client_data;
1410 int ret;
1411
1412 if (creds.ctx_handle.length == 0) {
1413 RPCGSS_LOG0(1, "_svcrpcsec_gss: no ctx_handle\n");
1414 ret = AUTH_BADCRED;
1415 return (ret);
1416 }
1417 if ((client_data = get_client(&creds.ctx_handle)) == NULL) {
1418 ret = RPCSEC_GSS_NOCRED;
1419 RPCGSS_LOG0(1, "_svcrpcsec_gss: no security context\n");
1420 return (ret);
1421 }
1422
1423 mutex_enter(&client_data->clm);
1424 if (!client_data->established) {
1425 ret = AUTH_FAILED;
1426 goto error2;
1427 }
1428 if (client_data->stale) {
1429 ret = RPCSEC_GSS_NOCRED;
1430 RPCGSS_LOG0(1, "_svcrpcsec_gss: client data stale\n");
1431 goto error2;
1432 }
1433
1434 (void) svc_sendreply(rqst->rq_xprt, xdr_void, NULL);
1435 *no_dispatch = TRUE;
1436 ASSERT(client_data->ref_cnt > 0);
1437 client_data->ref_cnt--;
1438 client_data->stale = TRUE;
1439 mutex_exit(&client_data->clm);
1440 return (AUTH_OK);
1441
1442 error2:
1443 ASSERT(client_data->ref_cnt > 0);
1444 client_data->ref_cnt--;
1445 client_data->stale = TRUE;
1446 mutex_exit(&client_data->clm);
1447 return (ret);
1448 }
1449
1450 /*
1451 * Server side authentication for RPCSEC_GSS.
1452 */
1453 enum auth_stat
__svcrpcsec_gss(struct svc_req * rqst,struct rpc_msg * msg,bool_t * no_dispatch)1454 __svcrpcsec_gss(
1455 struct svc_req *rqst,
1456 struct rpc_msg *msg,
1457 bool_t *no_dispatch)
1458 {
1459 XDR xdrs;
1460 rpc_gss_creds creds;
1461 struct opaque_auth *cred;
1462 int ret;
1463
1464 *no_dispatch = FALSE;
1465
1466 /*
1467 * Initialize response verifier to NULL verifier. If
1468 * necessary, this will be changed later.
1469 */
1470 rqst->rq_xprt->xp_verf.oa_flavor = AUTH_NONE;
1471 rqst->rq_xprt->xp_verf.oa_base = NULL;
1472 rqst->rq_xprt->xp_verf.oa_length = 0;
1473
1474 /*
1475 * Pull out and check credential and verifier.
1476 */
1477 cred = &msg->rm_call.cb_cred;
1478
1479 if (cred->oa_length == 0) {
1480 RPCGSS_LOG0(1, "_svcrpcsec_gss: zero length cred\n");
1481 return (AUTH_BADCRED);
1482 }
1483
1484 xdrmem_create(&xdrs, cred->oa_base, cred->oa_length, XDR_DECODE);
1485 bzero((char *)&creds, sizeof (creds));
1486 if (!__xdr_rpc_gss_creds(&xdrs, &creds)) {
1487 XDR_DESTROY(&xdrs);
1488 RPCGSS_LOG0(1, "_svcrpcsec_gss: can't decode creds\n");
1489 ret = AUTH_BADCRED;
1490 return (AUTH_BADCRED);
1491 }
1492 XDR_DESTROY(&xdrs);
1493
1494 switch (creds.gss_proc) {
1495 case RPCSEC_GSS_INIT:
1496 ret = rpcsec_gss_init(rqst, msg, creds, no_dispatch, NULL);
1497 break;
1498 case RPCSEC_GSS_CONTINUE_INIT:
1499 ret = rpcsec_gss_continue_init(rqst, msg, creds, no_dispatch);
1500 break;
1501 case RPCSEC_GSS_DATA:
1502 ret = rpcsec_gss_data(rqst, msg, creds, no_dispatch);
1503 break;
1504 case RPCSEC_GSS_DESTROY:
1505 ret = rpcsec_gss_destroy(rqst, creds, no_dispatch);
1506 break;
1507 default:
1508 cmn_err(CE_NOTE, "__svcrpcsec_gss: bad proc=%d",
1509 creds.gss_proc);
1510 ret = AUTH_BADCRED;
1511 }
1512
1513 if (creds.ctx_handle.length != 0)
1514 xdr_free(__xdr_rpc_gss_creds, (caddr_t)&creds);
1515 return (ret);
1516 }
1517
1518 /*
1519 * Check verifier. The verifier is the checksum of the RPC header
1520 * upto and including the credentials field.
1521 */
1522
1523 /* ARGSUSED */
1524 static bool_t
check_verf(struct rpc_msg * msg,gss_ctx_id_t context,int * qop_state,uid_t uid)1525 check_verf(struct rpc_msg *msg, gss_ctx_id_t context, int *qop_state, uid_t uid)
1526 {
1527 int *buf, *tmp;
1528 char hdr[128];
1529 struct opaque_auth *oa;
1530 int len;
1531 gss_buffer_desc msg_buf;
1532 gss_buffer_desc tok_buf;
1533 OM_uint32 gssstat, minor_stat;
1534
1535 /*
1536 * We have to reconstruct the RPC header from the previously
1537 * parsed information, since we haven't kept the header intact.
1538 */
1539
1540 oa = &msg->rm_call.cb_cred;
1541 if (oa->oa_length > MAX_AUTH_BYTES)
1542 return (FALSE);
1543
1544 /* 8 XDR units from the IXDR macro calls. */
1545 if (sizeof (hdr) < (8 * BYTES_PER_XDR_UNIT +
1546 RNDUP(oa->oa_length)))
1547 return (FALSE);
1548 buf = (int *)hdr;
1549 IXDR_PUT_U_INT32(buf, msg->rm_xid);
1550 IXDR_PUT_ENUM(buf, msg->rm_direction);
1551 IXDR_PUT_U_INT32(buf, msg->rm_call.cb_rpcvers);
1552 IXDR_PUT_U_INT32(buf, msg->rm_call.cb_prog);
1553 IXDR_PUT_U_INT32(buf, msg->rm_call.cb_vers);
1554 IXDR_PUT_U_INT32(buf, msg->rm_call.cb_proc);
1555 IXDR_PUT_ENUM(buf, oa->oa_flavor);
1556 IXDR_PUT_U_INT32(buf, oa->oa_length);
1557 if (oa->oa_length) {
1558 len = RNDUP(oa->oa_length);
1559 tmp = buf;
1560 buf += len / sizeof (int);
1561 *(buf - 1) = 0;
1562 (void) bcopy(oa->oa_base, (caddr_t)tmp, oa->oa_length);
1563 }
1564 len = ((char *)buf) - hdr;
1565 msg_buf.length = len;
1566 msg_buf.value = hdr;
1567 oa = &msg->rm_call.cb_verf;
1568 tok_buf.length = oa->oa_length;
1569 tok_buf.value = oa->oa_base;
1570
1571 gssstat = kgss_verify(&minor_stat, context, &msg_buf, &tok_buf,
1572 qop_state);
1573 if (gssstat != GSS_S_COMPLETE) {
1574 RPCGSS_LOG(1, "check_verf: kgss_verify status 0x%x\n", gssstat);
1575
1576 RPCGSS_LOG(4, "check_verf: msg_buf length %d\n", len);
1577 RPCGSS_LOG(4, "check_verf: msg_buf value 0x%x\n", *(int *)hdr);
1578 RPCGSS_LOG(4, "check_verf: tok_buf length %ld\n",
1579 tok_buf.length);
1580 RPCGSS_LOG(4, "check_verf: tok_buf value 0x%p\n",
1581 (void *)oa->oa_base);
1582 RPCGSS_LOG(4, "check_verf: context 0x%p\n", (void *)context);
1583
1584 return (FALSE);
1585 }
1586 return (TRUE);
1587 }
1588
1589
1590 /*
1591 * Set response verifier. This is the checksum of the given number.
1592 * (e.g. sequence number or sequence window)
1593 */
1594 static bool_t
set_response_verf(struct svc_req * rqst,struct rpc_msg * msg,svc_rpc_gss_data * cl,uint_t num)1595 set_response_verf(struct svc_req *rqst, struct rpc_msg *msg,
1596 svc_rpc_gss_data *cl, uint_t num)
1597 {
1598 OM_uint32 minor;
1599 gss_buffer_desc in_buf, out_buf;
1600 uint_t num_net;
1601
1602 num_net = (uint_t)htonl(num);
1603 in_buf.length = sizeof (num);
1604 in_buf.value = (char *)&num_net;
1605 /* XXX uid ? */
1606
1607 if ((kgss_sign(&minor, cl->context, cl->qop, &in_buf, &out_buf))
1608 != GSS_S_COMPLETE)
1609 return (FALSE);
1610
1611 rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
1612 rqst->rq_xprt->xp_verf.oa_base = msg->rm_call.cb_verf.oa_base;
1613 rqst->rq_xprt->xp_verf.oa_length = out_buf.length;
1614 bcopy(out_buf.value, rqst->rq_xprt->xp_verf.oa_base, out_buf.length);
1615 (void) gss_release_buffer(&minor, &out_buf);
1616 return (TRUE);
1617 }
1618
1619 /*
1620 * Create client context.
1621 */
1622 static svc_rpc_gss_data *
create_client()1623 create_client()
1624 {
1625 svc_rpc_gss_data *client_data;
1626 static uint_t key = 1;
1627
1628 client_data = (svc_rpc_gss_data *) kmem_cache_alloc(svc_data_handle,
1629 KM_SLEEP);
1630 if (client_data == NULL)
1631 return (NULL);
1632
1633 /*
1634 * set up client data structure
1635 */
1636 client_data->next = NULL;
1637 client_data->prev = NULL;
1638 client_data->lru_next = NULL;
1639 client_data->lru_prev = NULL;
1640 client_data->client_name.length = 0;
1641 client_data->client_name.value = NULL;
1642 client_data->seq_num = 0;
1643 bzero(client_data->seq_bits, sizeof (client_data->seq_bits));
1644 client_data->key = 0;
1645 client_data->cookie = NULL;
1646 bzero(&client_data->u_cred, sizeof (client_data->u_cred));
1647 client_data->established = FALSE;
1648 client_data->locked = FALSE;
1649 client_data->u_cred_set = 0;
1650 client_data->context = GSS_C_NO_CONTEXT;
1651 client_data->expiration = GSS_C_INDEFINITE;
1652 client_data->deleg = GSS_C_NO_CREDENTIAL;
1653 client_data->ref_cnt = 1;
1654 client_data->last_ref_time = gethrestime_sec();
1655 client_data->qop = GSS_C_QOP_DEFAULT;
1656 client_data->done_docallback = FALSE;
1657 client_data->stale = FALSE;
1658 client_data->retrans_data = NULL;
1659 bzero(&client_data->raw_cred, sizeof (client_data->raw_cred));
1660
1661 /*
1662 * The client context handle is a 32-bit key (unsigned int).
1663 * The key is incremented until there is no duplicate for it.
1664 */
1665
1666 svc_rpc_gss_cache_stats.total_entries_allocated++;
1667 mutex_enter(&ctx_mutex);
1668 for (;;) {
1669 client_data->key = key++;
1670 if (find_client(client_data->key) == NULL) {
1671 insert_client(client_data);
1672 mutex_exit(&ctx_mutex);
1673 return (client_data);
1674 }
1675 }
1676 /*NOTREACHED*/
1677 }
1678
1679 /*
1680 * Insert client context into hash list and LRU list.
1681 */
1682 static void
insert_client(svc_rpc_gss_data * client_data)1683 insert_client(svc_rpc_gss_data *client_data)
1684 {
1685 svc_rpc_gss_data *cl;
1686 int index = HASH(client_data->key);
1687
1688 ASSERT(mutex_owned(&ctx_mutex));
1689
1690 client_data->prev = NULL;
1691 cl = clients[index];
1692 if ((client_data->next = cl) != NULL)
1693 cl->prev = client_data;
1694 clients[index] = client_data;
1695
1696 client_data->lru_prev = NULL;
1697 if ((client_data->lru_next = lru_first) != NULL)
1698 lru_first->lru_prev = client_data;
1699 else
1700 lru_last = client_data;
1701 lru_first = client_data;
1702
1703 num_gss_contexts++;
1704 }
1705
1706 /*
1707 * Fetch a client, given the client context handle. Move it to the
1708 * top of the LRU list since this is the most recently used context.
1709 */
1710 static svc_rpc_gss_data *
get_client(gss_buffer_t ctx_handle)1711 get_client(gss_buffer_t ctx_handle)
1712 {
1713 uint_t key = *(uint_t *)ctx_handle->value;
1714 svc_rpc_gss_data *cl;
1715
1716 mutex_enter(&ctx_mutex);
1717 if ((cl = find_client(key)) != NULL) {
1718 mutex_enter(&cl->clm);
1719 if (cl->stale) {
1720 if (cl->ref_cnt == 0) {
1721 mutex_exit(&cl->clm);
1722 destroy_client(cl);
1723 } else {
1724 mutex_exit(&cl->clm);
1725 }
1726 mutex_exit(&ctx_mutex);
1727 return (NULL);
1728 }
1729 cl->ref_cnt++;
1730 cl->last_ref_time = gethrestime_sec();
1731 mutex_exit(&cl->clm);
1732 if (cl != lru_first) {
1733 cl->lru_prev->lru_next = cl->lru_next;
1734 if (cl->lru_next != NULL)
1735 cl->lru_next->lru_prev = cl->lru_prev;
1736 else
1737 lru_last = cl->lru_prev;
1738 cl->lru_prev = NULL;
1739 cl->lru_next = lru_first;
1740 lru_first->lru_prev = cl;
1741 lru_first = cl;
1742 }
1743 }
1744 mutex_exit(&ctx_mutex);
1745 return (cl);
1746 }
1747
1748 /*
1749 * Given the client context handle, find the context corresponding to it.
1750 * Don't change its LRU state since it may not be used.
1751 */
1752 static svc_rpc_gss_data *
find_client(uint_t key)1753 find_client(uint_t key)
1754 {
1755 int index = HASH(key);
1756 svc_rpc_gss_data *cl = NULL;
1757
1758 ASSERT(mutex_owned(&ctx_mutex));
1759
1760 for (cl = clients[index]; cl != NULL; cl = cl->next) {
1761 if (cl->key == key)
1762 break;
1763 }
1764 return (cl);
1765 }
1766
1767 /*
1768 * Destroy a client context.
1769 */
1770 static void
destroy_client(svc_rpc_gss_data * client_data)1771 destroy_client(svc_rpc_gss_data *client_data)
1772 {
1773 OM_uint32 minor;
1774 int index = HASH(client_data->key);
1775
1776 ASSERT(mutex_owned(&ctx_mutex));
1777
1778 /*
1779 * remove from hash list
1780 */
1781 if (client_data->prev == NULL)
1782 clients[index] = client_data->next;
1783 else
1784 client_data->prev->next = client_data->next;
1785 if (client_data->next != NULL)
1786 client_data->next->prev = client_data->prev;
1787
1788 /*
1789 * remove from LRU list
1790 */
1791 if (client_data->lru_prev == NULL)
1792 lru_first = client_data->lru_next;
1793 else
1794 client_data->lru_prev->lru_next = client_data->lru_next;
1795 if (client_data->lru_next != NULL)
1796 client_data->lru_next->lru_prev = client_data->lru_prev;
1797 else
1798 lru_last = client_data->lru_prev;
1799
1800 /*
1801 * If there is a GSS context, clean up GSS state.
1802 */
1803 if (client_data->context != GSS_C_NO_CONTEXT) {
1804 (void) kgss_delete_sec_context(&minor, &client_data->context,
1805 NULL);
1806
1807 common_client_data_free(client_data);
1808
1809 if (client_data->deleg != GSS_C_NO_CREDENTIAL) {
1810 (void) kgss_release_cred(&minor, &client_data->deleg,
1811 crgetuid(CRED()));
1812 }
1813 }
1814
1815 if (client_data->u_cred.gidlist != NULL) {
1816 kmem_free((char *)client_data->u_cred.gidlist,
1817 client_data->u_cred.gidlen * sizeof (gid_t));
1818 client_data->u_cred.gidlist = NULL;
1819 }
1820 if (client_data->retrans_data != NULL)
1821 retrans_del(client_data);
1822
1823 kmem_cache_free(svc_data_handle, client_data);
1824 num_gss_contexts--;
1825 }
1826
1827 /*
1828 * Check for expired and stale client contexts.
1829 */
1830 static void
sweep_clients(bool_t from_reclaim)1831 sweep_clients(bool_t from_reclaim)
1832 {
1833 svc_rpc_gss_data *cl, *next;
1834 time_t last_reference_needed;
1835 time_t now = gethrestime_sec();
1836
1837 ASSERT(mutex_owned(&ctx_mutex));
1838
1839 last_reference_needed = now - (from_reclaim ?
1840 svc_rpc_gss_active_delta : svc_rpc_gss_inactive_delta);
1841
1842 cl = lru_last;
1843 while (cl) {
1844 /*
1845 * We assume here that any manipulation of the LRU pointers
1846 * and hash bucket pointers are only done when holding the
1847 * ctx_mutex.
1848 */
1849 next = cl->lru_prev;
1850
1851 mutex_enter(&cl->clm);
1852
1853 if ((cl->expiration != GSS_C_INDEFINITE &&
1854 cl->expiration <= now) || cl->stale ||
1855 cl->last_ref_time <= last_reference_needed) {
1856
1857 if ((cl->expiration != GSS_C_INDEFINITE &&
1858 cl->expiration <= now) || cl->stale ||
1859 (cl->last_ref_time <= last_reference_needed &&
1860 cl->ref_cnt == 0)) {
1861
1862 cl->stale = TRUE;
1863
1864 if (cl->ref_cnt == 0) {
1865 mutex_exit(&cl->clm);
1866 if (from_reclaim)
1867 svc_rpc_gss_cache_stats.
1868 no_returned_by_reclaim++;
1869 destroy_client(cl);
1870 } else
1871 mutex_exit(&cl->clm);
1872 } else
1873 mutex_exit(&cl->clm);
1874 } else
1875 mutex_exit(&cl->clm);
1876
1877 cl = next;
1878 }
1879
1880 last_swept = gethrestime_sec();
1881 }
1882
1883 /*
1884 * Encrypt the serialized arguments from xdr_func applied to xdr_ptr
1885 * and write the result to xdrs.
1886 */
1887 static bool_t
svc_rpc_gss_wrap(SVCAUTH * auth,XDR * out_xdrs,bool_t (* xdr_func)(),caddr_t xdr_ptr)1888 svc_rpc_gss_wrap(SVCAUTH *auth, XDR *out_xdrs, bool_t (*xdr_func)(),
1889 caddr_t xdr_ptr)
1890 {
1891 svc_rpc_gss_parms_t *gss_parms = SVCAUTH_GSSPARMS(auth);
1892 bool_t ret;
1893
1894 /*
1895 * If context is not established, or if neither integrity nor
1896 * privacy service is used, don't wrap - just XDR encode.
1897 * Otherwise, wrap data using service and QOP parameters.
1898 */
1899 if (!gss_parms->established || gss_parms->service == rpc_gss_svc_none)
1900 return ((*xdr_func)(out_xdrs, xdr_ptr));
1901
1902 ret = __rpc_gss_wrap_data(gss_parms->service,
1903 (OM_uint32)gss_parms->qop_rcvd,
1904 (gss_ctx_id_t)gss_parms->context,
1905 gss_parms->seq_num,
1906 out_xdrs, xdr_func, xdr_ptr);
1907 return (ret);
1908 }
1909
1910 /*
1911 * Decrypt the serialized arguments and XDR decode them.
1912 */
1913 static bool_t
svc_rpc_gss_unwrap(SVCAUTH * auth,XDR * in_xdrs,bool_t (* xdr_func)(),caddr_t xdr_ptr)1914 svc_rpc_gss_unwrap(SVCAUTH *auth, XDR *in_xdrs, bool_t (*xdr_func)(),
1915 caddr_t xdr_ptr)
1916 {
1917 svc_rpc_gss_parms_t *gss_parms = SVCAUTH_GSSPARMS(auth);
1918
1919 /*
1920 * If context is not established, or if neither integrity nor
1921 * privacy service is used, don't unwrap - just XDR decode.
1922 * Otherwise, unwrap data.
1923 */
1924 if (!gss_parms->established || gss_parms->service == rpc_gss_svc_none)
1925 return ((*xdr_func)(in_xdrs, xdr_ptr));
1926
1927 return (__rpc_gss_unwrap_data(gss_parms->service,
1928 (gss_ctx_id_t)gss_parms->context,
1929 gss_parms->seq_num,
1930 gss_parms->qop_rcvd,
1931 in_xdrs, xdr_func, xdr_ptr));
1932 }
1933
1934
1935 /* ARGSUSED */
1936 int
rpc_gss_svc_max_data_length(struct svc_req * req,int max_tp_unit_len)1937 rpc_gss_svc_max_data_length(struct svc_req *req, int max_tp_unit_len)
1938 {
1939 return (0);
1940 }
1941
1942 /*
1943 * Add retransmit entry to the context cache entry for a new xid.
1944 * If there is already an entry, delete it before adding the new one.
1945 */
retrans_add(client,xid,result)1946 static void retrans_add(client, xid, result)
1947 svc_rpc_gss_data *client;
1948 uint32_t xid;
1949 rpc_gss_init_res *result;
1950 {
1951 retrans_entry *rdata;
1952
1953 if (client->retrans_data && client->retrans_data->xid == xid)
1954 return;
1955
1956 rdata = kmem_zalloc(sizeof (*rdata), KM_SLEEP);
1957
1958 if (rdata == NULL)
1959 return;
1960
1961 rdata->xid = xid;
1962 rdata->result = *result;
1963
1964 if (result->token.length != 0) {
1965 GSS_DUP_BUFFER(rdata->result.token, result->token);
1966 }
1967
1968 if (client->retrans_data)
1969 retrans_del(client);
1970
1971 client->retrans_data = rdata;
1972 }
1973
1974 /*
1975 * Delete the retransmit data from the context cache entry.
1976 */
retrans_del(client)1977 static void retrans_del(client)
1978 svc_rpc_gss_data *client;
1979 {
1980 retrans_entry *rdata;
1981 OM_uint32 minor_stat;
1982
1983 if (client->retrans_data == NULL)
1984 return;
1985
1986 rdata = client->retrans_data;
1987 if (rdata->result.token.length != 0) {
1988 (void) gss_release_buffer(&minor_stat, &rdata->result.token);
1989 }
1990
1991 kmem_free((caddr_t)rdata, sizeof (*rdata));
1992 client->retrans_data = NULL;
1993 }
1994
1995 /*
1996 * This function frees the following fields of svc_rpc_gss_data:
1997 * client_name, raw_cred.client_principal, raw_cred.mechanism.
1998 */
1999 static void
common_client_data_free(svc_rpc_gss_data * client_data)2000 common_client_data_free(svc_rpc_gss_data *client_data)
2001 {
2002 if (client_data->client_name.length > 0) {
2003 (void) gss_release_buffer(NULL, &client_data->client_name);
2004 }
2005
2006 if (client_data->raw_cred.client_principal) {
2007 kmem_free((caddr_t)client_data->raw_cred.client_principal,
2008 client_data->raw_cred.client_principal->len +
2009 sizeof (int));
2010 client_data->raw_cred.client_principal = NULL;
2011 }
2012
2013 /*
2014 * In the user GSS-API library, mechanism (mech_type returned
2015 * by gss_accept_sec_context) is static storage, however
2016 * since all the work is done for gss_accept_sec_context under
2017 * gssd, what is returned in the kernel, is a copy from the oid
2018 * obtained under from gssd, so need to free it when destroying
2019 * the client data.
2020 */
2021
2022 if (client_data->raw_cred.mechanism) {
2023 kgss_free_oid(client_data->raw_cred.mechanism);
2024 client_data->raw_cred.mechanism = NULL;
2025 }
2026 }
2027