1 /* 2 * linux/net/sunrpc/auth_gss/auth_gss.c 3 * 4 * RPCSEC_GSS client authentication. 5 * 6 * Copyright (c) 2000 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Dug Song <dugsong@monkey.org> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <linux/module.h> 40 #include <linux/init.h> 41 #include <linux/types.h> 42 #include <linux/slab.h> 43 #include <linux/sched.h> 44 #include <linux/pagemap.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/sunrpc/auth.h> 47 #include <linux/sunrpc/auth_gss.h> 48 #include <linux/sunrpc/svcauth_gss.h> 49 #include <linux/sunrpc/gss_err.h> 50 #include <linux/workqueue.h> 51 #include <linux/sunrpc/rpc_pipe_fs.h> 52 #include <linux/sunrpc/gss_api.h> 53 #include <asm/uaccess.h> 54 #include <linux/hashtable.h> 55 56 #include "../netns.h" 57 58 static const struct rpc_authops authgss_ops; 59 60 static const struct rpc_credops gss_credops; 61 static const struct rpc_credops gss_nullops; 62 63 #define GSS_RETRY_EXPIRED 5 64 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; 65 66 #define GSS_KEY_EXPIRE_TIMEO 240 67 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO; 68 69 #ifdef RPC_DEBUG 70 # define RPCDBG_FACILITY RPCDBG_AUTH 71 #endif 72 73 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) 74 /* length of a krb5 verifier (48), plus data added before arguments when 75 * using integrity (two 4-byte integers): */ 76 #define GSS_VERF_SLACK 100 77 78 static DEFINE_HASHTABLE(gss_auth_hash_table, 16); 79 static DEFINE_SPINLOCK(gss_auth_hash_lock); 80 81 struct gss_pipe { 82 struct rpc_pipe_dir_object pdo; 83 struct rpc_pipe *pipe; 84 struct rpc_clnt *clnt; 85 const char *name; 86 struct kref kref; 87 }; 88 89 struct gss_auth { 90 struct kref kref; 91 struct hlist_node hash; 92 struct rpc_auth rpc_auth; 93 struct gss_api_mech *mech; 94 enum rpc_gss_svc service; 95 struct rpc_clnt *client; 96 struct net *net; 97 /* 98 * There are two upcall pipes; dentry[1], named "gssd", is used 99 * for the new text-based upcall; dentry[0] is named after the 100 * mechanism (for example, "krb5") and exists for 101 * backwards-compatibility with older gssd's. 102 */ 103 struct gss_pipe *gss_pipe[2]; 104 const char *target_name; 105 }; 106 107 /* pipe_version >= 0 if and only if someone has a pipe open. */ 108 static DEFINE_SPINLOCK(pipe_version_lock); 109 static struct rpc_wait_queue pipe_version_rpc_waitqueue; 110 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 111 112 static void gss_free_ctx(struct gss_cl_ctx *); 113 static const struct rpc_pipe_ops gss_upcall_ops_v0; 114 static const struct rpc_pipe_ops gss_upcall_ops_v1; 115 116 static inline struct gss_cl_ctx * 117 gss_get_ctx(struct gss_cl_ctx *ctx) 118 { 119 atomic_inc(&ctx->count); 120 return ctx; 121 } 122 123 static inline void 124 gss_put_ctx(struct gss_cl_ctx *ctx) 125 { 126 if (atomic_dec_and_test(&ctx->count)) 127 gss_free_ctx(ctx); 128 } 129 130 /* gss_cred_set_ctx: 131 * called by gss_upcall_callback and gss_create_upcall in order 132 * to set the gss context. The actual exchange of an old context 133 * and a new one is protected by the pipe->lock. 134 */ 135 static void 136 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 137 { 138 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 139 140 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 141 return; 142 gss_get_ctx(ctx); 143 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 144 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 145 smp_mb__before_clear_bit(); 146 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 147 } 148 149 static const void * 150 simple_get_bytes(const void *p, const void *end, void *res, size_t len) 151 { 152 const void *q = (const void *)((const char *)p + len); 153 if (unlikely(q > end || q < p)) 154 return ERR_PTR(-EFAULT); 155 memcpy(res, p, len); 156 return q; 157 } 158 159 static inline const void * 160 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) 161 { 162 const void *q; 163 unsigned int len; 164 165 p = simple_get_bytes(p, end, &len, sizeof(len)); 166 if (IS_ERR(p)) 167 return p; 168 q = (const void *)((const char *)p + len); 169 if (unlikely(q > end || q < p)) 170 return ERR_PTR(-EFAULT); 171 dest->data = kmemdup(p, len, GFP_NOFS); 172 if (unlikely(dest->data == NULL)) 173 return ERR_PTR(-ENOMEM); 174 dest->len = len; 175 return q; 176 } 177 178 static struct gss_cl_ctx * 179 gss_cred_get_ctx(struct rpc_cred *cred) 180 { 181 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 182 struct gss_cl_ctx *ctx = NULL; 183 184 rcu_read_lock(); 185 if (gss_cred->gc_ctx) 186 ctx = gss_get_ctx(gss_cred->gc_ctx); 187 rcu_read_unlock(); 188 return ctx; 189 } 190 191 static struct gss_cl_ctx * 192 gss_alloc_context(void) 193 { 194 struct gss_cl_ctx *ctx; 195 196 ctx = kzalloc(sizeof(*ctx), GFP_NOFS); 197 if (ctx != NULL) { 198 ctx->gc_proc = RPC_GSS_PROC_DATA; 199 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 200 spin_lock_init(&ctx->gc_seq_lock); 201 atomic_set(&ctx->count,1); 202 } 203 return ctx; 204 } 205 206 #define GSSD_MIN_TIMEOUT (60 * 60) 207 static const void * 208 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm) 209 { 210 const void *q; 211 unsigned int seclen; 212 unsigned int timeout; 213 unsigned long now = jiffies; 214 u32 window_size; 215 int ret; 216 217 /* First unsigned int gives the remaining lifetime in seconds of the 218 * credential - e.g. the remaining TGT lifetime for Kerberos or 219 * the -t value passed to GSSD. 220 */ 221 p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); 222 if (IS_ERR(p)) 223 goto err; 224 if (timeout == 0) 225 timeout = GSSD_MIN_TIMEOUT; 226 ctx->gc_expiry = now + ((unsigned long)timeout * HZ); 227 /* Sequence number window. Determines the maximum number of 228 * simultaneous requests 229 */ 230 p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); 231 if (IS_ERR(p)) 232 goto err; 233 ctx->gc_win = window_size; 234 /* gssd signals an error by passing ctx->gc_win = 0: */ 235 if (ctx->gc_win == 0) { 236 /* 237 * in which case, p points to an error code. Anything other 238 * than -EKEYEXPIRED gets converted to -EACCES. 239 */ 240 p = simple_get_bytes(p, end, &ret, sizeof(ret)); 241 if (!IS_ERR(p)) 242 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : 243 ERR_PTR(-EACCES); 244 goto err; 245 } 246 /* copy the opaque wire context */ 247 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx); 248 if (IS_ERR(p)) 249 goto err; 250 /* import the opaque security context */ 251 p = simple_get_bytes(p, end, &seclen, sizeof(seclen)); 252 if (IS_ERR(p)) 253 goto err; 254 q = (const void *)((const char *)p + seclen); 255 if (unlikely(q > end || q < p)) { 256 p = ERR_PTR(-EFAULT); 257 goto err; 258 } 259 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS); 260 if (ret < 0) { 261 p = ERR_PTR(ret); 262 goto err; 263 } 264 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n", 265 __func__, ctx->gc_expiry, now, timeout); 266 return q; 267 err: 268 dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p)); 269 return p; 270 } 271 272 #define UPCALL_BUF_LEN 128 273 274 struct gss_upcall_msg { 275 atomic_t count; 276 kuid_t uid; 277 struct rpc_pipe_msg msg; 278 struct list_head list; 279 struct gss_auth *auth; 280 struct rpc_pipe *pipe; 281 struct rpc_wait_queue rpc_waitqueue; 282 wait_queue_head_t waitqueue; 283 struct gss_cl_ctx *ctx; 284 char databuf[UPCALL_BUF_LEN]; 285 }; 286 287 static int get_pipe_version(struct net *net) 288 { 289 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 290 int ret; 291 292 spin_lock(&pipe_version_lock); 293 if (sn->pipe_version >= 0) { 294 atomic_inc(&sn->pipe_users); 295 ret = sn->pipe_version; 296 } else 297 ret = -EAGAIN; 298 spin_unlock(&pipe_version_lock); 299 return ret; 300 } 301 302 static void put_pipe_version(struct net *net) 303 { 304 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 305 306 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) { 307 sn->pipe_version = -1; 308 spin_unlock(&pipe_version_lock); 309 } 310 } 311 312 static void 313 gss_release_msg(struct gss_upcall_msg *gss_msg) 314 { 315 struct net *net = gss_msg->auth->net; 316 if (!atomic_dec_and_test(&gss_msg->count)) 317 return; 318 put_pipe_version(net); 319 BUG_ON(!list_empty(&gss_msg->list)); 320 if (gss_msg->ctx != NULL) 321 gss_put_ctx(gss_msg->ctx); 322 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 323 kfree(gss_msg); 324 } 325 326 static struct gss_upcall_msg * 327 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) 328 { 329 struct gss_upcall_msg *pos; 330 list_for_each_entry(pos, &pipe->in_downcall, list) { 331 if (!uid_eq(pos->uid, uid)) 332 continue; 333 atomic_inc(&pos->count); 334 dprintk("RPC: %s found msg %p\n", __func__, pos); 335 return pos; 336 } 337 dprintk("RPC: %s found nothing\n", __func__); 338 return NULL; 339 } 340 341 /* Try to add an upcall to the pipefs queue. 342 * If an upcall owned by our uid already exists, then we return a reference 343 * to that upcall instead of adding the new upcall. 344 */ 345 static inline struct gss_upcall_msg * 346 gss_add_msg(struct gss_upcall_msg *gss_msg) 347 { 348 struct rpc_pipe *pipe = gss_msg->pipe; 349 struct gss_upcall_msg *old; 350 351 spin_lock(&pipe->lock); 352 old = __gss_find_upcall(pipe, gss_msg->uid); 353 if (old == NULL) { 354 atomic_inc(&gss_msg->count); 355 list_add(&gss_msg->list, &pipe->in_downcall); 356 } else 357 gss_msg = old; 358 spin_unlock(&pipe->lock); 359 return gss_msg; 360 } 361 362 static void 363 __gss_unhash_msg(struct gss_upcall_msg *gss_msg) 364 { 365 list_del_init(&gss_msg->list); 366 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 367 wake_up_all(&gss_msg->waitqueue); 368 atomic_dec(&gss_msg->count); 369 } 370 371 static void 372 gss_unhash_msg(struct gss_upcall_msg *gss_msg) 373 { 374 struct rpc_pipe *pipe = gss_msg->pipe; 375 376 if (list_empty(&gss_msg->list)) 377 return; 378 spin_lock(&pipe->lock); 379 if (!list_empty(&gss_msg->list)) 380 __gss_unhash_msg(gss_msg); 381 spin_unlock(&pipe->lock); 382 } 383 384 static void 385 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) 386 { 387 switch (gss_msg->msg.errno) { 388 case 0: 389 if (gss_msg->ctx == NULL) 390 break; 391 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 392 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); 393 break; 394 case -EKEYEXPIRED: 395 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 396 } 397 gss_cred->gc_upcall_timestamp = jiffies; 398 gss_cred->gc_upcall = NULL; 399 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 400 } 401 402 static void 403 gss_upcall_callback(struct rpc_task *task) 404 { 405 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, 406 struct gss_cred, gc_base); 407 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 408 struct rpc_pipe *pipe = gss_msg->pipe; 409 410 spin_lock(&pipe->lock); 411 gss_handle_downcall_result(gss_cred, gss_msg); 412 spin_unlock(&pipe->lock); 413 task->tk_status = gss_msg->msg.errno; 414 gss_release_msg(gss_msg); 415 } 416 417 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) 418 { 419 uid_t uid = from_kuid(&init_user_ns, gss_msg->uid); 420 memcpy(gss_msg->databuf, &uid, sizeof(uid)); 421 gss_msg->msg.data = gss_msg->databuf; 422 gss_msg->msg.len = sizeof(uid); 423 BUG_ON(sizeof(uid) > UPCALL_BUF_LEN); 424 } 425 426 static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, 427 const char *service_name, 428 const char *target_name) 429 { 430 struct gss_api_mech *mech = gss_msg->auth->mech; 431 char *p = gss_msg->databuf; 432 int len = 0; 433 434 gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", 435 mech->gm_name, 436 from_kuid(&init_user_ns, gss_msg->uid)); 437 p += gss_msg->msg.len; 438 if (target_name) { 439 len = sprintf(p, "target=%s ", target_name); 440 p += len; 441 gss_msg->msg.len += len; 442 } 443 if (service_name != NULL) { 444 len = sprintf(p, "service=%s ", service_name); 445 p += len; 446 gss_msg->msg.len += len; 447 } 448 if (mech->gm_upcall_enctypes) { 449 len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes); 450 p += len; 451 gss_msg->msg.len += len; 452 } 453 len = sprintf(p, "\n"); 454 gss_msg->msg.len += len; 455 456 gss_msg->msg.data = gss_msg->databuf; 457 BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN); 458 } 459 460 static struct gss_upcall_msg * 461 gss_alloc_msg(struct gss_auth *gss_auth, 462 kuid_t uid, const char *service_name) 463 { 464 struct gss_upcall_msg *gss_msg; 465 int vers; 466 467 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); 468 if (gss_msg == NULL) 469 return ERR_PTR(-ENOMEM); 470 vers = get_pipe_version(gss_auth->net); 471 if (vers < 0) { 472 kfree(gss_msg); 473 return ERR_PTR(vers); 474 } 475 gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe; 476 INIT_LIST_HEAD(&gss_msg->list); 477 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 478 init_waitqueue_head(&gss_msg->waitqueue); 479 atomic_set(&gss_msg->count, 1); 480 gss_msg->uid = uid; 481 gss_msg->auth = gss_auth; 482 switch (vers) { 483 case 0: 484 gss_encode_v0_msg(gss_msg); 485 default: 486 gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); 487 }; 488 return gss_msg; 489 } 490 491 static struct gss_upcall_msg * 492 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred) 493 { 494 struct gss_cred *gss_cred = container_of(cred, 495 struct gss_cred, gc_base); 496 struct gss_upcall_msg *gss_new, *gss_msg; 497 kuid_t uid = cred->cr_uid; 498 499 gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal); 500 if (IS_ERR(gss_new)) 501 return gss_new; 502 gss_msg = gss_add_msg(gss_new); 503 if (gss_msg == gss_new) { 504 int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); 505 if (res) { 506 gss_unhash_msg(gss_new); 507 gss_msg = ERR_PTR(res); 508 } 509 } else 510 gss_release_msg(gss_new); 511 return gss_msg; 512 } 513 514 static void warn_gssd(void) 515 { 516 static unsigned long ratelimit; 517 unsigned long now = jiffies; 518 519 if (time_after(now, ratelimit)) { 520 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" 521 "Please check user daemon is running.\n"); 522 ratelimit = now + 15*HZ; 523 } 524 } 525 526 static inline int 527 gss_refresh_upcall(struct rpc_task *task) 528 { 529 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 530 struct gss_auth *gss_auth = container_of(cred->cr_auth, 531 struct gss_auth, rpc_auth); 532 struct gss_cred *gss_cred = container_of(cred, 533 struct gss_cred, gc_base); 534 struct gss_upcall_msg *gss_msg; 535 struct rpc_pipe *pipe; 536 int err = 0; 537 538 dprintk("RPC: %5u %s for uid %u\n", 539 task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid)); 540 gss_msg = gss_setup_upcall(gss_auth, cred); 541 if (PTR_ERR(gss_msg) == -EAGAIN) { 542 /* XXX: warning on the first, under the assumption we 543 * shouldn't normally hit this case on a refresh. */ 544 warn_gssd(); 545 task->tk_timeout = 15*HZ; 546 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); 547 return -EAGAIN; 548 } 549 if (IS_ERR(gss_msg)) { 550 err = PTR_ERR(gss_msg); 551 goto out; 552 } 553 pipe = gss_msg->pipe; 554 spin_lock(&pipe->lock); 555 if (gss_cred->gc_upcall != NULL) 556 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 557 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 558 task->tk_timeout = 0; 559 gss_cred->gc_upcall = gss_msg; 560 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 561 atomic_inc(&gss_msg->count); 562 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); 563 } else { 564 gss_handle_downcall_result(gss_cred, gss_msg); 565 err = gss_msg->msg.errno; 566 } 567 spin_unlock(&pipe->lock); 568 gss_release_msg(gss_msg); 569 out: 570 dprintk("RPC: %5u %s for uid %u result %d\n", 571 task->tk_pid, __func__, 572 from_kuid(&init_user_ns, cred->cr_uid), err); 573 return err; 574 } 575 576 static inline int 577 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 578 { 579 struct net *net = gss_auth->net; 580 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 581 struct rpc_pipe *pipe; 582 struct rpc_cred *cred = &gss_cred->gc_base; 583 struct gss_upcall_msg *gss_msg; 584 unsigned long timeout; 585 DEFINE_WAIT(wait); 586 int err; 587 588 dprintk("RPC: %s for uid %u\n", 589 __func__, from_kuid(&init_user_ns, cred->cr_uid)); 590 retry: 591 err = 0; 592 /* Default timeout is 15s unless we know that gssd is not running */ 593 timeout = 15 * HZ; 594 if (!sn->gssd_running) 595 timeout = HZ >> 2; 596 gss_msg = gss_setup_upcall(gss_auth, cred); 597 if (PTR_ERR(gss_msg) == -EAGAIN) { 598 err = wait_event_interruptible_timeout(pipe_version_waitqueue, 599 sn->pipe_version >= 0, timeout); 600 if (sn->pipe_version < 0) { 601 if (err == 0) 602 sn->gssd_running = 0; 603 warn_gssd(); 604 err = -EACCES; 605 } 606 if (err < 0) 607 goto out; 608 goto retry; 609 } 610 if (IS_ERR(gss_msg)) { 611 err = PTR_ERR(gss_msg); 612 goto out; 613 } 614 pipe = gss_msg->pipe; 615 for (;;) { 616 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); 617 spin_lock(&pipe->lock); 618 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 619 break; 620 } 621 spin_unlock(&pipe->lock); 622 if (fatal_signal_pending(current)) { 623 err = -ERESTARTSYS; 624 goto out_intr; 625 } 626 schedule(); 627 } 628 if (gss_msg->ctx) 629 gss_cred_set_ctx(cred, gss_msg->ctx); 630 else 631 err = gss_msg->msg.errno; 632 spin_unlock(&pipe->lock); 633 out_intr: 634 finish_wait(&gss_msg->waitqueue, &wait); 635 gss_release_msg(gss_msg); 636 out: 637 dprintk("RPC: %s for uid %u result %d\n", 638 __func__, from_kuid(&init_user_ns, cred->cr_uid), err); 639 return err; 640 } 641 642 #define MSG_BUF_MAXSIZE 1024 643 644 static ssize_t 645 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 646 { 647 const void *p, *end; 648 void *buf; 649 struct gss_upcall_msg *gss_msg; 650 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe; 651 struct gss_cl_ctx *ctx; 652 uid_t id; 653 kuid_t uid; 654 ssize_t err = -EFBIG; 655 656 if (mlen > MSG_BUF_MAXSIZE) 657 goto out; 658 err = -ENOMEM; 659 buf = kmalloc(mlen, GFP_NOFS); 660 if (!buf) 661 goto out; 662 663 err = -EFAULT; 664 if (copy_from_user(buf, src, mlen)) 665 goto err; 666 667 end = (const void *)((char *)buf + mlen); 668 p = simple_get_bytes(buf, end, &id, sizeof(id)); 669 if (IS_ERR(p)) { 670 err = PTR_ERR(p); 671 goto err; 672 } 673 674 uid = make_kuid(&init_user_ns, id); 675 if (!uid_valid(uid)) { 676 err = -EINVAL; 677 goto err; 678 } 679 680 err = -ENOMEM; 681 ctx = gss_alloc_context(); 682 if (ctx == NULL) 683 goto err; 684 685 err = -ENOENT; 686 /* Find a matching upcall */ 687 spin_lock(&pipe->lock); 688 gss_msg = __gss_find_upcall(pipe, uid); 689 if (gss_msg == NULL) { 690 spin_unlock(&pipe->lock); 691 goto err_put_ctx; 692 } 693 list_del_init(&gss_msg->list); 694 spin_unlock(&pipe->lock); 695 696 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 697 if (IS_ERR(p)) { 698 err = PTR_ERR(p); 699 switch (err) { 700 case -EACCES: 701 case -EKEYEXPIRED: 702 gss_msg->msg.errno = err; 703 err = mlen; 704 break; 705 case -EFAULT: 706 case -ENOMEM: 707 case -EINVAL: 708 case -ENOSYS: 709 gss_msg->msg.errno = -EAGAIN; 710 break; 711 default: 712 printk(KERN_CRIT "%s: bad return from " 713 "gss_fill_context: %zd\n", __func__, err); 714 BUG(); 715 } 716 goto err_release_msg; 717 } 718 gss_msg->ctx = gss_get_ctx(ctx); 719 err = mlen; 720 721 err_release_msg: 722 spin_lock(&pipe->lock); 723 __gss_unhash_msg(gss_msg); 724 spin_unlock(&pipe->lock); 725 gss_release_msg(gss_msg); 726 err_put_ctx: 727 gss_put_ctx(ctx); 728 err: 729 kfree(buf); 730 out: 731 dprintk("RPC: %s returning %Zd\n", __func__, err); 732 return err; 733 } 734 735 static int gss_pipe_open(struct inode *inode, int new_version) 736 { 737 struct net *net = inode->i_sb->s_fs_info; 738 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 739 int ret = 0; 740 741 spin_lock(&pipe_version_lock); 742 if (sn->pipe_version < 0) { 743 /* First open of any gss pipe determines the version: */ 744 sn->pipe_version = new_version; 745 rpc_wake_up(&pipe_version_rpc_waitqueue); 746 wake_up(&pipe_version_waitqueue); 747 } else if (sn->pipe_version != new_version) { 748 /* Trying to open a pipe of a different version */ 749 ret = -EBUSY; 750 goto out; 751 } 752 atomic_inc(&sn->pipe_users); 753 out: 754 spin_unlock(&pipe_version_lock); 755 return ret; 756 757 } 758 759 static int gss_pipe_open_v0(struct inode *inode) 760 { 761 return gss_pipe_open(inode, 0); 762 } 763 764 static int gss_pipe_open_v1(struct inode *inode) 765 { 766 return gss_pipe_open(inode, 1); 767 } 768 769 static void 770 gss_pipe_release(struct inode *inode) 771 { 772 struct net *net = inode->i_sb->s_fs_info; 773 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 774 struct gss_upcall_msg *gss_msg; 775 776 restart: 777 spin_lock(&pipe->lock); 778 list_for_each_entry(gss_msg, &pipe->in_downcall, list) { 779 780 if (!list_empty(&gss_msg->msg.list)) 781 continue; 782 gss_msg->msg.errno = -EPIPE; 783 atomic_inc(&gss_msg->count); 784 __gss_unhash_msg(gss_msg); 785 spin_unlock(&pipe->lock); 786 gss_release_msg(gss_msg); 787 goto restart; 788 } 789 spin_unlock(&pipe->lock); 790 791 put_pipe_version(net); 792 } 793 794 static void 795 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) 796 { 797 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); 798 799 if (msg->errno < 0) { 800 dprintk("RPC: %s releasing msg %p\n", 801 __func__, gss_msg); 802 atomic_inc(&gss_msg->count); 803 gss_unhash_msg(gss_msg); 804 if (msg->errno == -ETIMEDOUT) 805 warn_gssd(); 806 gss_release_msg(gss_msg); 807 } 808 } 809 810 static void gss_pipe_dentry_destroy(struct dentry *dir, 811 struct rpc_pipe_dir_object *pdo) 812 { 813 struct gss_pipe *gss_pipe = pdo->pdo_data; 814 struct rpc_pipe *pipe = gss_pipe->pipe; 815 816 if (pipe->dentry != NULL) { 817 rpc_unlink(pipe->dentry); 818 pipe->dentry = NULL; 819 } 820 } 821 822 static int gss_pipe_dentry_create(struct dentry *dir, 823 struct rpc_pipe_dir_object *pdo) 824 { 825 struct gss_pipe *p = pdo->pdo_data; 826 struct dentry *dentry; 827 828 dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe); 829 if (IS_ERR(dentry)) 830 return PTR_ERR(dentry); 831 p->pipe->dentry = dentry; 832 return 0; 833 } 834 835 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = { 836 .create = gss_pipe_dentry_create, 837 .destroy = gss_pipe_dentry_destroy, 838 }; 839 840 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt, 841 const char *name, 842 const struct rpc_pipe_ops *upcall_ops) 843 { 844 struct gss_pipe *p; 845 int err = -ENOMEM; 846 847 p = kmalloc(sizeof(*p), GFP_KERNEL); 848 if (p == NULL) 849 goto err; 850 p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); 851 if (IS_ERR(p->pipe)) { 852 err = PTR_ERR(p->pipe); 853 goto err_free_gss_pipe; 854 } 855 p->name = name; 856 p->clnt = clnt; 857 kref_init(&p->kref); 858 rpc_init_pipe_dir_object(&p->pdo, 859 &gss_pipe_dir_object_ops, 860 p); 861 return p; 862 err_free_gss_pipe: 863 kfree(p); 864 err: 865 return ERR_PTR(err); 866 } 867 868 struct gss_alloc_pdo { 869 struct rpc_clnt *clnt; 870 const char *name; 871 const struct rpc_pipe_ops *upcall_ops; 872 }; 873 874 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data) 875 { 876 struct gss_pipe *gss_pipe; 877 struct gss_alloc_pdo *args = data; 878 879 if (pdo->pdo_ops != &gss_pipe_dir_object_ops) 880 return 0; 881 gss_pipe = container_of(pdo, struct gss_pipe, pdo); 882 if (strcmp(gss_pipe->name, args->name) != 0) 883 return 0; 884 if (!kref_get_unless_zero(&gss_pipe->kref)) 885 return 0; 886 return 1; 887 } 888 889 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data) 890 { 891 struct gss_pipe *gss_pipe; 892 struct gss_alloc_pdo *args = data; 893 894 gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops); 895 if (!IS_ERR(gss_pipe)) 896 return &gss_pipe->pdo; 897 return NULL; 898 } 899 900 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt, 901 const char *name, 902 const struct rpc_pipe_ops *upcall_ops) 903 { 904 struct net *net = rpc_net_ns(clnt); 905 struct rpc_pipe_dir_object *pdo; 906 struct gss_alloc_pdo args = { 907 .clnt = clnt, 908 .name = name, 909 .upcall_ops = upcall_ops, 910 }; 911 912 pdo = rpc_find_or_alloc_pipe_dir_object(net, 913 &clnt->cl_pipedir_objects, 914 gss_pipe_match_pdo, 915 gss_pipe_alloc_pdo, 916 &args); 917 if (pdo != NULL) 918 return container_of(pdo, struct gss_pipe, pdo); 919 return ERR_PTR(-ENOMEM); 920 } 921 922 static void __gss_pipe_free(struct gss_pipe *p) 923 { 924 struct rpc_clnt *clnt = p->clnt; 925 struct net *net = rpc_net_ns(clnt); 926 927 rpc_remove_pipe_dir_object(net, 928 &clnt->cl_pipedir_objects, 929 &p->pdo); 930 rpc_destroy_pipe_data(p->pipe); 931 kfree(p); 932 } 933 934 static void __gss_pipe_release(struct kref *kref) 935 { 936 struct gss_pipe *p = container_of(kref, struct gss_pipe, kref); 937 938 __gss_pipe_free(p); 939 } 940 941 static void gss_pipe_free(struct gss_pipe *p) 942 { 943 if (p != NULL) 944 kref_put(&p->kref, __gss_pipe_release); 945 } 946 947 /* 948 * NOTE: we have the opportunity to use different 949 * parameters based on the input flavor (which must be a pseudoflavor) 950 */ 951 static struct gss_auth * 952 gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 953 { 954 rpc_authflavor_t flavor = args->pseudoflavor; 955 struct gss_auth *gss_auth; 956 struct gss_pipe *gss_pipe; 957 struct rpc_auth * auth; 958 int err = -ENOMEM; /* XXX? */ 959 960 dprintk("RPC: creating GSS authenticator for client %p\n", clnt); 961 962 if (!try_module_get(THIS_MODULE)) 963 return ERR_PTR(err); 964 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) 965 goto out_dec; 966 INIT_HLIST_NODE(&gss_auth->hash); 967 gss_auth->target_name = NULL; 968 if (args->target_name) { 969 gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL); 970 if (gss_auth->target_name == NULL) 971 goto err_free; 972 } 973 gss_auth->client = clnt; 974 gss_auth->net = get_net(rpc_net_ns(clnt)); 975 err = -EINVAL; 976 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 977 if (!gss_auth->mech) { 978 dprintk("RPC: Pseudoflavor %d not found!\n", flavor); 979 goto err_put_net; 980 } 981 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 982 if (gss_auth->service == 0) 983 goto err_put_mech; 984 auth = &gss_auth->rpc_auth; 985 auth->au_cslack = GSS_CRED_SLACK >> 2; 986 auth->au_rslack = GSS_VERF_SLACK >> 2; 987 auth->au_ops = &authgss_ops; 988 auth->au_flavor = flavor; 989 atomic_set(&auth->au_count, 1); 990 kref_init(&gss_auth->kref); 991 992 err = rpcauth_init_credcache(auth); 993 if (err) 994 goto err_put_mech; 995 /* 996 * Note: if we created the old pipe first, then someone who 997 * examined the directory at the right moment might conclude 998 * that we supported only the old pipe. So we instead create 999 * the new pipe first. 1000 */ 1001 gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1); 1002 if (IS_ERR(gss_pipe)) { 1003 err = PTR_ERR(gss_pipe); 1004 goto err_destroy_credcache; 1005 } 1006 gss_auth->gss_pipe[1] = gss_pipe; 1007 1008 gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name, 1009 &gss_upcall_ops_v0); 1010 if (IS_ERR(gss_pipe)) { 1011 err = PTR_ERR(gss_pipe); 1012 goto err_destroy_pipe_1; 1013 } 1014 gss_auth->gss_pipe[0] = gss_pipe; 1015 1016 return gss_auth; 1017 err_destroy_pipe_1: 1018 gss_pipe_free(gss_auth->gss_pipe[1]); 1019 err_destroy_credcache: 1020 rpcauth_destroy_credcache(auth); 1021 err_put_mech: 1022 gss_mech_put(gss_auth->mech); 1023 err_put_net: 1024 put_net(gss_auth->net); 1025 err_free: 1026 kfree(gss_auth->target_name); 1027 kfree(gss_auth); 1028 out_dec: 1029 module_put(THIS_MODULE); 1030 return ERR_PTR(err); 1031 } 1032 1033 static void 1034 gss_free(struct gss_auth *gss_auth) 1035 { 1036 gss_pipe_free(gss_auth->gss_pipe[0]); 1037 gss_pipe_free(gss_auth->gss_pipe[1]); 1038 gss_mech_put(gss_auth->mech); 1039 put_net(gss_auth->net); 1040 kfree(gss_auth->target_name); 1041 1042 kfree(gss_auth); 1043 module_put(THIS_MODULE); 1044 } 1045 1046 static void 1047 gss_free_callback(struct kref *kref) 1048 { 1049 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); 1050 1051 gss_free(gss_auth); 1052 } 1053 1054 static void 1055 gss_destroy(struct rpc_auth *auth) 1056 { 1057 struct gss_auth *gss_auth = container_of(auth, 1058 struct gss_auth, rpc_auth); 1059 1060 dprintk("RPC: destroying GSS authenticator %p flavor %d\n", 1061 auth, auth->au_flavor); 1062 1063 if (hash_hashed(&gss_auth->hash)) { 1064 spin_lock(&gss_auth_hash_lock); 1065 hash_del(&gss_auth->hash); 1066 spin_unlock(&gss_auth_hash_lock); 1067 } 1068 1069 gss_pipe_free(gss_auth->gss_pipe[0]); 1070 gss_auth->gss_pipe[0] = NULL; 1071 gss_pipe_free(gss_auth->gss_pipe[1]); 1072 gss_auth->gss_pipe[1] = NULL; 1073 rpcauth_destroy_credcache(auth); 1074 1075 kref_put(&gss_auth->kref, gss_free_callback); 1076 } 1077 1078 static struct gss_auth * 1079 gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args, 1080 struct rpc_clnt *clnt, 1081 struct gss_auth *new) 1082 { 1083 struct gss_auth *gss_auth; 1084 unsigned long hashval = (unsigned long)clnt; 1085 1086 spin_lock(&gss_auth_hash_lock); 1087 hash_for_each_possible(gss_auth_hash_table, 1088 gss_auth, 1089 hash, 1090 hashval) { 1091 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor) 1092 continue; 1093 if (gss_auth->target_name != args->target_name) { 1094 if (gss_auth->target_name == NULL) 1095 continue; 1096 if (args->target_name == NULL) 1097 continue; 1098 if (strcmp(gss_auth->target_name, args->target_name)) 1099 continue; 1100 } 1101 if (!atomic_inc_not_zero(&gss_auth->rpc_auth.au_count)) 1102 continue; 1103 goto out; 1104 } 1105 if (new) 1106 hash_add(gss_auth_hash_table, &new->hash, hashval); 1107 gss_auth = new; 1108 out: 1109 spin_unlock(&gss_auth_hash_lock); 1110 return gss_auth; 1111 } 1112 1113 static struct gss_auth * 1114 gss_create_hashed(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1115 { 1116 struct gss_auth *gss_auth; 1117 struct gss_auth *new; 1118 1119 gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL); 1120 if (gss_auth != NULL) 1121 goto out; 1122 new = gss_create_new(args, clnt); 1123 if (IS_ERR(new)) 1124 return new; 1125 gss_auth = gss_auth_find_or_add_hashed(args, clnt, new); 1126 if (gss_auth != new) 1127 gss_destroy(&new->rpc_auth); 1128 out: 1129 return gss_auth; 1130 } 1131 1132 static struct rpc_auth * 1133 gss_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1134 { 1135 struct gss_auth *gss_auth; 1136 struct rpc_xprt *xprt = rcu_access_pointer(clnt->cl_xprt); 1137 1138 while (clnt != clnt->cl_parent) { 1139 struct rpc_clnt *parent = clnt->cl_parent; 1140 /* Find the original parent for this transport */ 1141 if (rcu_access_pointer(parent->cl_xprt) != xprt) 1142 break; 1143 clnt = parent; 1144 } 1145 1146 gss_auth = gss_create_hashed(args, clnt); 1147 if (IS_ERR(gss_auth)) 1148 return ERR_CAST(gss_auth); 1149 return &gss_auth->rpc_auth; 1150 } 1151 1152 /* 1153 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call 1154 * to the server with the GSS control procedure field set to 1155 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release 1156 * all RPCSEC_GSS state associated with that context. 1157 */ 1158 static int 1159 gss_destroying_context(struct rpc_cred *cred) 1160 { 1161 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1162 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1163 struct rpc_task *task; 1164 1165 if (gss_cred->gc_ctx == NULL || 1166 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 1167 return 0; 1168 1169 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; 1170 cred->cr_ops = &gss_nullops; 1171 1172 /* Take a reference to ensure the cred will be destroyed either 1173 * by the RPC call or by the put_rpccred() below */ 1174 get_rpccred(cred); 1175 1176 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT); 1177 if (!IS_ERR(task)) 1178 rpc_put_task(task); 1179 1180 put_rpccred(cred); 1181 return 1; 1182 } 1183 1184 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure 1185 * to create a new cred or context, so they check that things have been 1186 * allocated before freeing them. */ 1187 static void 1188 gss_do_free_ctx(struct gss_cl_ctx *ctx) 1189 { 1190 dprintk("RPC: %s\n", __func__); 1191 1192 gss_delete_sec_context(&ctx->gc_gss_ctx); 1193 kfree(ctx->gc_wire_ctx.data); 1194 kfree(ctx); 1195 } 1196 1197 static void 1198 gss_free_ctx_callback(struct rcu_head *head) 1199 { 1200 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); 1201 gss_do_free_ctx(ctx); 1202 } 1203 1204 static void 1205 gss_free_ctx(struct gss_cl_ctx *ctx) 1206 { 1207 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); 1208 } 1209 1210 static void 1211 gss_free_cred(struct gss_cred *gss_cred) 1212 { 1213 dprintk("RPC: %s cred=%p\n", __func__, gss_cred); 1214 kfree(gss_cred); 1215 } 1216 1217 static void 1218 gss_free_cred_callback(struct rcu_head *head) 1219 { 1220 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); 1221 gss_free_cred(gss_cred); 1222 } 1223 1224 static void 1225 gss_destroy_nullcred(struct rpc_cred *cred) 1226 { 1227 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1228 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1229 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 1230 1231 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); 1232 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1233 if (ctx) 1234 gss_put_ctx(ctx); 1235 kref_put(&gss_auth->kref, gss_free_callback); 1236 } 1237 1238 static void 1239 gss_destroy_cred(struct rpc_cred *cred) 1240 { 1241 1242 if (gss_destroying_context(cred)) 1243 return; 1244 gss_destroy_nullcred(cred); 1245 } 1246 1247 /* 1248 * Lookup RPCSEC_GSS cred for the current process 1249 */ 1250 static struct rpc_cred * 1251 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1252 { 1253 return rpcauth_lookup_credcache(auth, acred, flags); 1254 } 1255 1256 static struct rpc_cred * 1257 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1258 { 1259 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1260 struct gss_cred *cred = NULL; 1261 int err = -ENOMEM; 1262 1263 dprintk("RPC: %s for uid %d, flavor %d\n", 1264 __func__, from_kuid(&init_user_ns, acred->uid), 1265 auth->au_flavor); 1266 1267 if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) 1268 goto out_err; 1269 1270 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); 1271 /* 1272 * Note: in order to force a call to call_refresh(), we deliberately 1273 * fail to flag the credential as RPCAUTH_CRED_UPTODATE. 1274 */ 1275 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; 1276 cred->gc_service = gss_auth->service; 1277 cred->gc_principal = NULL; 1278 if (acred->machine_cred) 1279 cred->gc_principal = acred->principal; 1280 kref_get(&gss_auth->kref); 1281 return &cred->gc_base; 1282 1283 out_err: 1284 dprintk("RPC: %s failed with error %d\n", __func__, err); 1285 return ERR_PTR(err); 1286 } 1287 1288 static int 1289 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) 1290 { 1291 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1292 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); 1293 int err; 1294 1295 do { 1296 err = gss_create_upcall(gss_auth, gss_cred); 1297 } while (err == -EAGAIN); 1298 return err; 1299 } 1300 1301 /* 1302 * Returns -EACCES if GSS context is NULL or will expire within the 1303 * timeout (miliseconds) 1304 */ 1305 static int 1306 gss_key_timeout(struct rpc_cred *rc) 1307 { 1308 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1309 unsigned long now = jiffies; 1310 unsigned long expire; 1311 1312 if (gss_cred->gc_ctx == NULL) 1313 return -EACCES; 1314 1315 expire = gss_cred->gc_ctx->gc_expiry - (gss_key_expire_timeo * HZ); 1316 1317 if (time_after(now, expire)) 1318 return -EACCES; 1319 return 0; 1320 } 1321 1322 static int 1323 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) 1324 { 1325 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1326 int ret; 1327 1328 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) 1329 goto out; 1330 /* Don't match with creds that have expired. */ 1331 if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) 1332 return 0; 1333 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags)) 1334 return 0; 1335 out: 1336 if (acred->principal != NULL) { 1337 if (gss_cred->gc_principal == NULL) 1338 return 0; 1339 ret = strcmp(acred->principal, gss_cred->gc_principal) == 0; 1340 goto check_expire; 1341 } 1342 if (gss_cred->gc_principal != NULL) 1343 return 0; 1344 ret = uid_eq(rc->cr_uid, acred->uid); 1345 1346 check_expire: 1347 if (ret == 0) 1348 return ret; 1349 1350 /* Notify acred users of GSS context expiration timeout */ 1351 if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags) && 1352 (gss_key_timeout(rc) != 0)) { 1353 /* test will now be done from generic cred */ 1354 test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags); 1355 /* tell NFS layer that key will expire soon */ 1356 set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); 1357 } 1358 return ret; 1359 } 1360 1361 /* 1362 * Marshal credentials. 1363 * Maybe we should keep a cached credential for performance reasons. 1364 */ 1365 static __be32 * 1366 gss_marshal(struct rpc_task *task, __be32 *p) 1367 { 1368 struct rpc_rqst *req = task->tk_rqstp; 1369 struct rpc_cred *cred = req->rq_cred; 1370 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1371 gc_base); 1372 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1373 __be32 *cred_len; 1374 u32 maj_stat = 0; 1375 struct xdr_netobj mic; 1376 struct kvec iov; 1377 struct xdr_buf verf_buf; 1378 1379 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1380 1381 *p++ = htonl(RPC_AUTH_GSS); 1382 cred_len = p++; 1383 1384 spin_lock(&ctx->gc_seq_lock); 1385 req->rq_seqno = ctx->gc_seq++; 1386 spin_unlock(&ctx->gc_seq_lock); 1387 1388 *p++ = htonl((u32) RPC_GSS_VERSION); 1389 *p++ = htonl((u32) ctx->gc_proc); 1390 *p++ = htonl((u32) req->rq_seqno); 1391 *p++ = htonl((u32) gss_cred->gc_service); 1392 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); 1393 *cred_len = htonl((p - (cred_len + 1)) << 2); 1394 1395 /* We compute the checksum for the verifier over the xdr-encoded bytes 1396 * starting with the xid and ending at the end of the credential: */ 1397 iov.iov_base = xprt_skip_transport_header(req->rq_xprt, 1398 req->rq_snd_buf.head[0].iov_base); 1399 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; 1400 xdr_buf_from_iov(&iov, &verf_buf); 1401 1402 /* set verifier flavor*/ 1403 *p++ = htonl(RPC_AUTH_GSS); 1404 1405 mic.data = (u8 *)(p + 1); 1406 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1407 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1408 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1409 } else if (maj_stat != 0) { 1410 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1411 goto out_put_ctx; 1412 } 1413 p = xdr_encode_opaque(p, NULL, mic.len); 1414 gss_put_ctx(ctx); 1415 return p; 1416 out_put_ctx: 1417 gss_put_ctx(ctx); 1418 return NULL; 1419 } 1420 1421 static int gss_renew_cred(struct rpc_task *task) 1422 { 1423 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; 1424 struct gss_cred *gss_cred = container_of(oldcred, 1425 struct gss_cred, 1426 gc_base); 1427 struct rpc_auth *auth = oldcred->cr_auth; 1428 struct auth_cred acred = { 1429 .uid = oldcred->cr_uid, 1430 .principal = gss_cred->gc_principal, 1431 .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0), 1432 }; 1433 struct rpc_cred *new; 1434 1435 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); 1436 if (IS_ERR(new)) 1437 return PTR_ERR(new); 1438 task->tk_rqstp->rq_cred = new; 1439 put_rpccred(oldcred); 1440 return 0; 1441 } 1442 1443 static int gss_cred_is_negative_entry(struct rpc_cred *cred) 1444 { 1445 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { 1446 unsigned long now = jiffies; 1447 unsigned long begin, expire; 1448 struct gss_cred *gss_cred; 1449 1450 gss_cred = container_of(cred, struct gss_cred, gc_base); 1451 begin = gss_cred->gc_upcall_timestamp; 1452 expire = begin + gss_expired_cred_retry_delay * HZ; 1453 1454 if (time_in_range_open(now, begin, expire)) 1455 return 1; 1456 } 1457 return 0; 1458 } 1459 1460 /* 1461 * Refresh credentials. XXX - finish 1462 */ 1463 static int 1464 gss_refresh(struct rpc_task *task) 1465 { 1466 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1467 int ret = 0; 1468 1469 if (gss_cred_is_negative_entry(cred)) 1470 return -EKEYEXPIRED; 1471 1472 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && 1473 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 1474 ret = gss_renew_cred(task); 1475 if (ret < 0) 1476 goto out; 1477 cred = task->tk_rqstp->rq_cred; 1478 } 1479 1480 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 1481 ret = gss_refresh_upcall(task); 1482 out: 1483 return ret; 1484 } 1485 1486 /* Dummy refresh routine: used only when destroying the context */ 1487 static int 1488 gss_refresh_null(struct rpc_task *task) 1489 { 1490 return -EACCES; 1491 } 1492 1493 static __be32 * 1494 gss_validate(struct rpc_task *task, __be32 *p) 1495 { 1496 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1497 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1498 __be32 seq; 1499 struct kvec iov; 1500 struct xdr_buf verf_buf; 1501 struct xdr_netobj mic; 1502 u32 flav,len; 1503 u32 maj_stat; 1504 __be32 *ret = ERR_PTR(-EIO); 1505 1506 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1507 1508 flav = ntohl(*p++); 1509 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 1510 goto out_bad; 1511 if (flav != RPC_AUTH_GSS) 1512 goto out_bad; 1513 seq = htonl(task->tk_rqstp->rq_seqno); 1514 iov.iov_base = &seq; 1515 iov.iov_len = sizeof(seq); 1516 xdr_buf_from_iov(&iov, &verf_buf); 1517 mic.data = (u8 *)p; 1518 mic.len = len; 1519 1520 ret = ERR_PTR(-EACCES); 1521 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1522 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1523 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1524 if (maj_stat) { 1525 dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n", 1526 task->tk_pid, __func__, maj_stat); 1527 goto out_bad; 1528 } 1529 /* We leave it to unwrap to calculate au_rslack. For now we just 1530 * calculate the length of the verifier: */ 1531 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; 1532 gss_put_ctx(ctx); 1533 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", 1534 task->tk_pid, __func__); 1535 return p + XDR_QUADLEN(len); 1536 out_bad: 1537 gss_put_ctx(ctx); 1538 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, 1539 PTR_ERR(ret)); 1540 return ret; 1541 } 1542 1543 static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, 1544 __be32 *p, void *obj) 1545 { 1546 struct xdr_stream xdr; 1547 1548 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); 1549 encode(rqstp, &xdr, obj); 1550 } 1551 1552 static inline int 1553 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1554 kxdreproc_t encode, struct rpc_rqst *rqstp, 1555 __be32 *p, void *obj) 1556 { 1557 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1558 struct xdr_buf integ_buf; 1559 __be32 *integ_len = NULL; 1560 struct xdr_netobj mic; 1561 u32 offset; 1562 __be32 *q; 1563 struct kvec *iov; 1564 u32 maj_stat = 0; 1565 int status = -EIO; 1566 1567 integ_len = p++; 1568 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1569 *p++ = htonl(rqstp->rq_seqno); 1570 1571 gss_wrap_req_encode(encode, rqstp, p, obj); 1572 1573 if (xdr_buf_subsegment(snd_buf, &integ_buf, 1574 offset, snd_buf->len - offset)) 1575 return status; 1576 *integ_len = htonl(integ_buf.len); 1577 1578 /* guess whether we're in the head or the tail: */ 1579 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1580 iov = snd_buf->tail; 1581 else 1582 iov = snd_buf->head; 1583 p = iov->iov_base + iov->iov_len; 1584 mic.data = (u8 *)(p + 1); 1585 1586 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1587 status = -EIO; /* XXX? */ 1588 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1589 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1590 else if (maj_stat) 1591 return status; 1592 q = xdr_encode_opaque(p, NULL, mic.len); 1593 1594 offset = (u8 *)q - (u8 *)p; 1595 iov->iov_len += offset; 1596 snd_buf->len += offset; 1597 return 0; 1598 } 1599 1600 static void 1601 priv_release_snd_buf(struct rpc_rqst *rqstp) 1602 { 1603 int i; 1604 1605 for (i=0; i < rqstp->rq_enc_pages_num; i++) 1606 __free_page(rqstp->rq_enc_pages[i]); 1607 kfree(rqstp->rq_enc_pages); 1608 } 1609 1610 static int 1611 alloc_enc_pages(struct rpc_rqst *rqstp) 1612 { 1613 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1614 int first, last, i; 1615 1616 if (snd_buf->page_len == 0) { 1617 rqstp->rq_enc_pages_num = 0; 1618 return 0; 1619 } 1620 1621 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1622 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; 1623 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1624 rqstp->rq_enc_pages 1625 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1626 GFP_NOFS); 1627 if (!rqstp->rq_enc_pages) 1628 goto out; 1629 for (i=0; i < rqstp->rq_enc_pages_num; i++) { 1630 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); 1631 if (rqstp->rq_enc_pages[i] == NULL) 1632 goto out_free; 1633 } 1634 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1635 return 0; 1636 out_free: 1637 rqstp->rq_enc_pages_num = i; 1638 priv_release_snd_buf(rqstp); 1639 out: 1640 return -EAGAIN; 1641 } 1642 1643 static inline int 1644 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1645 kxdreproc_t encode, struct rpc_rqst *rqstp, 1646 __be32 *p, void *obj) 1647 { 1648 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1649 u32 offset; 1650 u32 maj_stat; 1651 int status; 1652 __be32 *opaque_len; 1653 struct page **inpages; 1654 int first; 1655 int pad; 1656 struct kvec *iov; 1657 char *tmp; 1658 1659 opaque_len = p++; 1660 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1661 *p++ = htonl(rqstp->rq_seqno); 1662 1663 gss_wrap_req_encode(encode, rqstp, p, obj); 1664 1665 status = alloc_enc_pages(rqstp); 1666 if (status) 1667 return status; 1668 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1669 inpages = snd_buf->pages + first; 1670 snd_buf->pages = rqstp->rq_enc_pages; 1671 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1672 /* 1673 * Give the tail its own page, in case we need extra space in the 1674 * head when wrapping: 1675 * 1676 * call_allocate() allocates twice the slack space required 1677 * by the authentication flavor to rq_callsize. 1678 * For GSS, slack is GSS_CRED_SLACK. 1679 */ 1680 if (snd_buf->page_len || snd_buf->tail[0].iov_len) { 1681 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); 1682 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); 1683 snd_buf->tail[0].iov_base = tmp; 1684 } 1685 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1686 /* slack space should prevent this ever happening: */ 1687 BUG_ON(snd_buf->len > snd_buf->buflen); 1688 status = -EIO; 1689 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1690 * done anyway, so it's safe to put the request on the wire: */ 1691 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1692 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1693 else if (maj_stat) 1694 return status; 1695 1696 *opaque_len = htonl(snd_buf->len - offset); 1697 /* guess whether we're in the head or the tail: */ 1698 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1699 iov = snd_buf->tail; 1700 else 1701 iov = snd_buf->head; 1702 p = iov->iov_base + iov->iov_len; 1703 pad = 3 - ((snd_buf->len - offset - 1) & 3); 1704 memset(p, 0, pad); 1705 iov->iov_len += pad; 1706 snd_buf->len += pad; 1707 1708 return 0; 1709 } 1710 1711 static int 1712 gss_wrap_req(struct rpc_task *task, 1713 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) 1714 { 1715 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1716 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1717 gc_base); 1718 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1719 int status = -EIO; 1720 1721 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1722 if (ctx->gc_proc != RPC_GSS_PROC_DATA) { 1723 /* The spec seems a little ambiguous here, but I think that not 1724 * wrapping context destruction requests makes the most sense. 1725 */ 1726 gss_wrap_req_encode(encode, rqstp, p, obj); 1727 status = 0; 1728 goto out; 1729 } 1730 switch (gss_cred->gc_service) { 1731 case RPC_GSS_SVC_NONE: 1732 gss_wrap_req_encode(encode, rqstp, p, obj); 1733 status = 0; 1734 break; 1735 case RPC_GSS_SVC_INTEGRITY: 1736 status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj); 1737 break; 1738 case RPC_GSS_SVC_PRIVACY: 1739 status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj); 1740 break; 1741 } 1742 out: 1743 gss_put_ctx(ctx); 1744 dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status); 1745 return status; 1746 } 1747 1748 static inline int 1749 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1750 struct rpc_rqst *rqstp, __be32 **p) 1751 { 1752 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1753 struct xdr_buf integ_buf; 1754 struct xdr_netobj mic; 1755 u32 data_offset, mic_offset; 1756 u32 integ_len; 1757 u32 maj_stat; 1758 int status = -EIO; 1759 1760 integ_len = ntohl(*(*p)++); 1761 if (integ_len & 3) 1762 return status; 1763 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1764 mic_offset = integ_len + data_offset; 1765 if (mic_offset > rcv_buf->len) 1766 return status; 1767 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1768 return status; 1769 1770 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, 1771 mic_offset - data_offset)) 1772 return status; 1773 1774 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) 1775 return status; 1776 1777 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1778 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1779 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1780 if (maj_stat != GSS_S_COMPLETE) 1781 return status; 1782 return 0; 1783 } 1784 1785 static inline int 1786 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1787 struct rpc_rqst *rqstp, __be32 **p) 1788 { 1789 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1790 u32 offset; 1791 u32 opaque_len; 1792 u32 maj_stat; 1793 int status = -EIO; 1794 1795 opaque_len = ntohl(*(*p)++); 1796 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1797 if (offset + opaque_len > rcv_buf->len) 1798 return status; 1799 /* remove padding: */ 1800 rcv_buf->len = offset + opaque_len; 1801 1802 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); 1803 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1804 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1805 if (maj_stat != GSS_S_COMPLETE) 1806 return status; 1807 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1808 return status; 1809 1810 return 0; 1811 } 1812 1813 static int 1814 gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, 1815 __be32 *p, void *obj) 1816 { 1817 struct xdr_stream xdr; 1818 1819 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 1820 return decode(rqstp, &xdr, obj); 1821 } 1822 1823 static int 1824 gss_unwrap_resp(struct rpc_task *task, 1825 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) 1826 { 1827 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1828 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1829 gc_base); 1830 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1831 __be32 *savedp = p; 1832 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; 1833 int savedlen = head->iov_len; 1834 int status = -EIO; 1835 1836 if (ctx->gc_proc != RPC_GSS_PROC_DATA) 1837 goto out_decode; 1838 switch (gss_cred->gc_service) { 1839 case RPC_GSS_SVC_NONE: 1840 break; 1841 case RPC_GSS_SVC_INTEGRITY: 1842 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); 1843 if (status) 1844 goto out; 1845 break; 1846 case RPC_GSS_SVC_PRIVACY: 1847 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1848 if (status) 1849 goto out; 1850 break; 1851 } 1852 /* take into account extra slack for integrity and privacy cases: */ 1853 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1854 + (savedlen - head->iov_len); 1855 out_decode: 1856 status = gss_unwrap_req_decode(decode, rqstp, p, obj); 1857 out: 1858 gss_put_ctx(ctx); 1859 dprintk("RPC: %5u %s returning %d\n", 1860 task->tk_pid, __func__, status); 1861 return status; 1862 } 1863 1864 static const struct rpc_authops authgss_ops = { 1865 .owner = THIS_MODULE, 1866 .au_flavor = RPC_AUTH_GSS, 1867 .au_name = "RPCSEC_GSS", 1868 .create = gss_create, 1869 .destroy = gss_destroy, 1870 .lookup_cred = gss_lookup_cred, 1871 .crcreate = gss_create_cred, 1872 .list_pseudoflavors = gss_mech_list_pseudoflavors, 1873 .info2flavor = gss_mech_info2flavor, 1874 .flavor2info = gss_mech_flavor2info, 1875 }; 1876 1877 static const struct rpc_credops gss_credops = { 1878 .cr_name = "AUTH_GSS", 1879 .crdestroy = gss_destroy_cred, 1880 .cr_init = gss_cred_init, 1881 .crbind = rpcauth_generic_bind_cred, 1882 .crmatch = gss_match, 1883 .crmarshal = gss_marshal, 1884 .crrefresh = gss_refresh, 1885 .crvalidate = gss_validate, 1886 .crwrap_req = gss_wrap_req, 1887 .crunwrap_resp = gss_unwrap_resp, 1888 .crkey_timeout = gss_key_timeout, 1889 }; 1890 1891 static const struct rpc_credops gss_nullops = { 1892 .cr_name = "AUTH_GSS", 1893 .crdestroy = gss_destroy_nullcred, 1894 .crbind = rpcauth_generic_bind_cred, 1895 .crmatch = gss_match, 1896 .crmarshal = gss_marshal, 1897 .crrefresh = gss_refresh_null, 1898 .crvalidate = gss_validate, 1899 .crwrap_req = gss_wrap_req, 1900 .crunwrap_resp = gss_unwrap_resp, 1901 }; 1902 1903 static const struct rpc_pipe_ops gss_upcall_ops_v0 = { 1904 .upcall = rpc_pipe_generic_upcall, 1905 .downcall = gss_pipe_downcall, 1906 .destroy_msg = gss_pipe_destroy_msg, 1907 .open_pipe = gss_pipe_open_v0, 1908 .release_pipe = gss_pipe_release, 1909 }; 1910 1911 static const struct rpc_pipe_ops gss_upcall_ops_v1 = { 1912 .upcall = rpc_pipe_generic_upcall, 1913 .downcall = gss_pipe_downcall, 1914 .destroy_msg = gss_pipe_destroy_msg, 1915 .open_pipe = gss_pipe_open_v1, 1916 .release_pipe = gss_pipe_release, 1917 }; 1918 1919 static __net_init int rpcsec_gss_init_net(struct net *net) 1920 { 1921 return gss_svc_init_net(net); 1922 } 1923 1924 static __net_exit void rpcsec_gss_exit_net(struct net *net) 1925 { 1926 gss_svc_shutdown_net(net); 1927 } 1928 1929 static struct pernet_operations rpcsec_gss_net_ops = { 1930 .init = rpcsec_gss_init_net, 1931 .exit = rpcsec_gss_exit_net, 1932 }; 1933 1934 /* 1935 * Initialize RPCSEC_GSS module 1936 */ 1937 static int __init init_rpcsec_gss(void) 1938 { 1939 int err = 0; 1940 1941 err = rpcauth_register(&authgss_ops); 1942 if (err) 1943 goto out; 1944 err = gss_svc_init(); 1945 if (err) 1946 goto out_unregister; 1947 err = register_pernet_subsys(&rpcsec_gss_net_ops); 1948 if (err) 1949 goto out_svc_exit; 1950 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); 1951 return 0; 1952 out_svc_exit: 1953 gss_svc_shutdown(); 1954 out_unregister: 1955 rpcauth_unregister(&authgss_ops); 1956 out: 1957 return err; 1958 } 1959 1960 static void __exit exit_rpcsec_gss(void) 1961 { 1962 unregister_pernet_subsys(&rpcsec_gss_net_ops); 1963 gss_svc_shutdown(); 1964 rpcauth_unregister(&authgss_ops); 1965 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1966 } 1967 1968 MODULE_ALIAS("rpc-auth-6"); 1969 MODULE_LICENSE("GPL"); 1970 module_param_named(expired_cred_retry_delay, 1971 gss_expired_cred_retry_delay, 1972 uint, 0644); 1973 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " 1974 "the RPC engine retries an expired credential"); 1975 1976 module_param_named(key_expire_timeo, 1977 gss_key_expire_timeo, 1978 uint, 0644); 1979 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a " 1980 "credential keys lifetime where the NFS layer cleans up " 1981 "prior to key expiration"); 1982 1983 module_init(init_rpcsec_gss) 1984 module_exit(exit_rpcsec_gss) 1985