1 /* 2 * linux/net/sunrpc/auth_gss/auth_gss.c 3 * 4 * RPCSEC_GSS client authentication. 5 * 6 * Copyright (c) 2000 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Dug Song <dugsong@monkey.org> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <linux/module.h> 40 #include <linux/init.h> 41 #include <linux/types.h> 42 #include <linux/slab.h> 43 #include <linux/sched.h> 44 #include <linux/pagemap.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/sunrpc/auth.h> 47 #include <linux/sunrpc/auth_gss.h> 48 #include <linux/sunrpc/svcauth_gss.h> 49 #include <linux/sunrpc/gss_err.h> 50 #include <linux/workqueue.h> 51 #include <linux/sunrpc/rpc_pipe_fs.h> 52 #include <linux/sunrpc/gss_api.h> 53 #include <asm/uaccess.h> 54 #include <linux/hashtable.h> 55 56 #include "../netns.h" 57 58 static const struct rpc_authops authgss_ops; 59 60 static const struct rpc_credops gss_credops; 61 static const struct rpc_credops gss_nullops; 62 63 #define GSS_RETRY_EXPIRED 5 64 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; 65 66 #define GSS_KEY_EXPIRE_TIMEO 240 67 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO; 68 69 #ifdef RPC_DEBUG 70 # define RPCDBG_FACILITY RPCDBG_AUTH 71 #endif 72 73 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) 74 /* length of a krb5 verifier (48), plus data added before arguments when 75 * using integrity (two 4-byte integers): */ 76 #define GSS_VERF_SLACK 100 77 78 static DEFINE_HASHTABLE(gss_auth_hash_table, 4); 79 static DEFINE_SPINLOCK(gss_auth_hash_lock); 80 81 struct gss_pipe { 82 struct rpc_pipe_dir_object pdo; 83 struct rpc_pipe *pipe; 84 struct rpc_clnt *clnt; 85 const char *name; 86 struct kref kref; 87 }; 88 89 struct gss_auth { 90 struct kref kref; 91 struct hlist_node hash; 92 struct rpc_auth rpc_auth; 93 struct gss_api_mech *mech; 94 enum rpc_gss_svc service; 95 struct rpc_clnt *client; 96 struct net *net; 97 /* 98 * There are two upcall pipes; dentry[1], named "gssd", is used 99 * for the new text-based upcall; dentry[0] is named after the 100 * mechanism (for example, "krb5") and exists for 101 * backwards-compatibility with older gssd's. 102 */ 103 struct gss_pipe *gss_pipe[2]; 104 const char *target_name; 105 }; 106 107 /* pipe_version >= 0 if and only if someone has a pipe open. */ 108 static DEFINE_SPINLOCK(pipe_version_lock); 109 static struct rpc_wait_queue pipe_version_rpc_waitqueue; 110 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 111 112 static void gss_free_ctx(struct gss_cl_ctx *); 113 static const struct rpc_pipe_ops gss_upcall_ops_v0; 114 static const struct rpc_pipe_ops gss_upcall_ops_v1; 115 116 static inline struct gss_cl_ctx * 117 gss_get_ctx(struct gss_cl_ctx *ctx) 118 { 119 atomic_inc(&ctx->count); 120 return ctx; 121 } 122 123 static inline void 124 gss_put_ctx(struct gss_cl_ctx *ctx) 125 { 126 if (atomic_dec_and_test(&ctx->count)) 127 gss_free_ctx(ctx); 128 } 129 130 /* gss_cred_set_ctx: 131 * called by gss_upcall_callback and gss_create_upcall in order 132 * to set the gss context. The actual exchange of an old context 133 * and a new one is protected by the pipe->lock. 134 */ 135 static void 136 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 137 { 138 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 139 140 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 141 return; 142 gss_get_ctx(ctx); 143 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 144 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 145 smp_mb__before_clear_bit(); 146 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 147 } 148 149 static const void * 150 simple_get_bytes(const void *p, const void *end, void *res, size_t len) 151 { 152 const void *q = (const void *)((const char *)p + len); 153 if (unlikely(q > end || q < p)) 154 return ERR_PTR(-EFAULT); 155 memcpy(res, p, len); 156 return q; 157 } 158 159 static inline const void * 160 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) 161 { 162 const void *q; 163 unsigned int len; 164 165 p = simple_get_bytes(p, end, &len, sizeof(len)); 166 if (IS_ERR(p)) 167 return p; 168 q = (const void *)((const char *)p + len); 169 if (unlikely(q > end || q < p)) 170 return ERR_PTR(-EFAULT); 171 dest->data = kmemdup(p, len, GFP_NOFS); 172 if (unlikely(dest->data == NULL)) 173 return ERR_PTR(-ENOMEM); 174 dest->len = len; 175 return q; 176 } 177 178 static struct gss_cl_ctx * 179 gss_cred_get_ctx(struct rpc_cred *cred) 180 { 181 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 182 struct gss_cl_ctx *ctx = NULL; 183 184 rcu_read_lock(); 185 if (gss_cred->gc_ctx) 186 ctx = gss_get_ctx(gss_cred->gc_ctx); 187 rcu_read_unlock(); 188 return ctx; 189 } 190 191 static struct gss_cl_ctx * 192 gss_alloc_context(void) 193 { 194 struct gss_cl_ctx *ctx; 195 196 ctx = kzalloc(sizeof(*ctx), GFP_NOFS); 197 if (ctx != NULL) { 198 ctx->gc_proc = RPC_GSS_PROC_DATA; 199 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 200 spin_lock_init(&ctx->gc_seq_lock); 201 atomic_set(&ctx->count,1); 202 } 203 return ctx; 204 } 205 206 #define GSSD_MIN_TIMEOUT (60 * 60) 207 static const void * 208 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm) 209 { 210 const void *q; 211 unsigned int seclen; 212 unsigned int timeout; 213 unsigned long now = jiffies; 214 u32 window_size; 215 int ret; 216 217 /* First unsigned int gives the remaining lifetime in seconds of the 218 * credential - e.g. the remaining TGT lifetime for Kerberos or 219 * the -t value passed to GSSD. 220 */ 221 p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); 222 if (IS_ERR(p)) 223 goto err; 224 if (timeout == 0) 225 timeout = GSSD_MIN_TIMEOUT; 226 ctx->gc_expiry = now + ((unsigned long)timeout * HZ); 227 /* Sequence number window. Determines the maximum number of 228 * simultaneous requests 229 */ 230 p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); 231 if (IS_ERR(p)) 232 goto err; 233 ctx->gc_win = window_size; 234 /* gssd signals an error by passing ctx->gc_win = 0: */ 235 if (ctx->gc_win == 0) { 236 /* 237 * in which case, p points to an error code. Anything other 238 * than -EKEYEXPIRED gets converted to -EACCES. 239 */ 240 p = simple_get_bytes(p, end, &ret, sizeof(ret)); 241 if (!IS_ERR(p)) 242 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : 243 ERR_PTR(-EACCES); 244 goto err; 245 } 246 /* copy the opaque wire context */ 247 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx); 248 if (IS_ERR(p)) 249 goto err; 250 /* import the opaque security context */ 251 p = simple_get_bytes(p, end, &seclen, sizeof(seclen)); 252 if (IS_ERR(p)) 253 goto err; 254 q = (const void *)((const char *)p + seclen); 255 if (unlikely(q > end || q < p)) { 256 p = ERR_PTR(-EFAULT); 257 goto err; 258 } 259 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS); 260 if (ret < 0) { 261 p = ERR_PTR(ret); 262 goto err; 263 } 264 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n", 265 __func__, ctx->gc_expiry, now, timeout); 266 return q; 267 err: 268 dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p)); 269 return p; 270 } 271 272 #define UPCALL_BUF_LEN 128 273 274 struct gss_upcall_msg { 275 atomic_t count; 276 kuid_t uid; 277 struct rpc_pipe_msg msg; 278 struct list_head list; 279 struct gss_auth *auth; 280 struct rpc_pipe *pipe; 281 struct rpc_wait_queue rpc_waitqueue; 282 wait_queue_head_t waitqueue; 283 struct gss_cl_ctx *ctx; 284 char databuf[UPCALL_BUF_LEN]; 285 }; 286 287 static int get_pipe_version(struct net *net) 288 { 289 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 290 int ret; 291 292 spin_lock(&pipe_version_lock); 293 if (sn->pipe_version >= 0) { 294 atomic_inc(&sn->pipe_users); 295 ret = sn->pipe_version; 296 } else 297 ret = -EAGAIN; 298 spin_unlock(&pipe_version_lock); 299 return ret; 300 } 301 302 static void put_pipe_version(struct net *net) 303 { 304 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 305 306 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) { 307 sn->pipe_version = -1; 308 spin_unlock(&pipe_version_lock); 309 } 310 } 311 312 static void 313 gss_release_msg(struct gss_upcall_msg *gss_msg) 314 { 315 struct net *net = gss_msg->auth->net; 316 if (!atomic_dec_and_test(&gss_msg->count)) 317 return; 318 put_pipe_version(net); 319 BUG_ON(!list_empty(&gss_msg->list)); 320 if (gss_msg->ctx != NULL) 321 gss_put_ctx(gss_msg->ctx); 322 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 323 kfree(gss_msg); 324 } 325 326 static struct gss_upcall_msg * 327 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) 328 { 329 struct gss_upcall_msg *pos; 330 list_for_each_entry(pos, &pipe->in_downcall, list) { 331 if (!uid_eq(pos->uid, uid)) 332 continue; 333 atomic_inc(&pos->count); 334 dprintk("RPC: %s found msg %p\n", __func__, pos); 335 return pos; 336 } 337 dprintk("RPC: %s found nothing\n", __func__); 338 return NULL; 339 } 340 341 /* Try to add an upcall to the pipefs queue. 342 * If an upcall owned by our uid already exists, then we return a reference 343 * to that upcall instead of adding the new upcall. 344 */ 345 static inline struct gss_upcall_msg * 346 gss_add_msg(struct gss_upcall_msg *gss_msg) 347 { 348 struct rpc_pipe *pipe = gss_msg->pipe; 349 struct gss_upcall_msg *old; 350 351 spin_lock(&pipe->lock); 352 old = __gss_find_upcall(pipe, gss_msg->uid); 353 if (old == NULL) { 354 atomic_inc(&gss_msg->count); 355 list_add(&gss_msg->list, &pipe->in_downcall); 356 } else 357 gss_msg = old; 358 spin_unlock(&pipe->lock); 359 return gss_msg; 360 } 361 362 static void 363 __gss_unhash_msg(struct gss_upcall_msg *gss_msg) 364 { 365 list_del_init(&gss_msg->list); 366 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 367 wake_up_all(&gss_msg->waitqueue); 368 atomic_dec(&gss_msg->count); 369 } 370 371 static void 372 gss_unhash_msg(struct gss_upcall_msg *gss_msg) 373 { 374 struct rpc_pipe *pipe = gss_msg->pipe; 375 376 if (list_empty(&gss_msg->list)) 377 return; 378 spin_lock(&pipe->lock); 379 if (!list_empty(&gss_msg->list)) 380 __gss_unhash_msg(gss_msg); 381 spin_unlock(&pipe->lock); 382 } 383 384 static void 385 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) 386 { 387 switch (gss_msg->msg.errno) { 388 case 0: 389 if (gss_msg->ctx == NULL) 390 break; 391 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 392 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); 393 break; 394 case -EKEYEXPIRED: 395 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 396 } 397 gss_cred->gc_upcall_timestamp = jiffies; 398 gss_cred->gc_upcall = NULL; 399 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 400 } 401 402 static void 403 gss_upcall_callback(struct rpc_task *task) 404 { 405 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, 406 struct gss_cred, gc_base); 407 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 408 struct rpc_pipe *pipe = gss_msg->pipe; 409 410 spin_lock(&pipe->lock); 411 gss_handle_downcall_result(gss_cred, gss_msg); 412 spin_unlock(&pipe->lock); 413 task->tk_status = gss_msg->msg.errno; 414 gss_release_msg(gss_msg); 415 } 416 417 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) 418 { 419 uid_t uid = from_kuid(&init_user_ns, gss_msg->uid); 420 memcpy(gss_msg->databuf, &uid, sizeof(uid)); 421 gss_msg->msg.data = gss_msg->databuf; 422 gss_msg->msg.len = sizeof(uid); 423 424 BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf)); 425 } 426 427 static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, 428 const char *service_name, 429 const char *target_name) 430 { 431 struct gss_api_mech *mech = gss_msg->auth->mech; 432 char *p = gss_msg->databuf; 433 size_t buflen = sizeof(gss_msg->databuf); 434 int len; 435 436 len = scnprintf(p, buflen, "mech=%s uid=%d ", mech->gm_name, 437 from_kuid(&init_user_ns, gss_msg->uid)); 438 buflen -= len; 439 p += len; 440 gss_msg->msg.len = len; 441 if (target_name) { 442 len = scnprintf(p, buflen, "target=%s ", target_name); 443 buflen -= len; 444 p += len; 445 gss_msg->msg.len += len; 446 } 447 if (service_name != NULL) { 448 len = scnprintf(p, buflen, "service=%s ", service_name); 449 buflen -= len; 450 p += len; 451 gss_msg->msg.len += len; 452 } 453 if (mech->gm_upcall_enctypes) { 454 len = scnprintf(p, buflen, "enctypes=%s ", 455 mech->gm_upcall_enctypes); 456 buflen -= len; 457 p += len; 458 gss_msg->msg.len += len; 459 } 460 len = scnprintf(p, buflen, "\n"); 461 if (len == 0) 462 goto out_overflow; 463 gss_msg->msg.len += len; 464 465 gss_msg->msg.data = gss_msg->databuf; 466 return 0; 467 out_overflow: 468 WARN_ON_ONCE(1); 469 return -ENOMEM; 470 } 471 472 static struct gss_upcall_msg * 473 gss_alloc_msg(struct gss_auth *gss_auth, 474 kuid_t uid, const char *service_name) 475 { 476 struct gss_upcall_msg *gss_msg; 477 int vers; 478 int err = -ENOMEM; 479 480 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); 481 if (gss_msg == NULL) 482 goto err; 483 vers = get_pipe_version(gss_auth->net); 484 err = vers; 485 if (err < 0) 486 goto err_free_msg; 487 gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe; 488 INIT_LIST_HEAD(&gss_msg->list); 489 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 490 init_waitqueue_head(&gss_msg->waitqueue); 491 atomic_set(&gss_msg->count, 1); 492 gss_msg->uid = uid; 493 gss_msg->auth = gss_auth; 494 switch (vers) { 495 case 0: 496 gss_encode_v0_msg(gss_msg); 497 break; 498 default: 499 err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); 500 if (err) 501 goto err_free_msg; 502 }; 503 return gss_msg; 504 err_free_msg: 505 kfree(gss_msg); 506 err: 507 return ERR_PTR(err); 508 } 509 510 static struct gss_upcall_msg * 511 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred) 512 { 513 struct gss_cred *gss_cred = container_of(cred, 514 struct gss_cred, gc_base); 515 struct gss_upcall_msg *gss_new, *gss_msg; 516 kuid_t uid = cred->cr_uid; 517 518 gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal); 519 if (IS_ERR(gss_new)) 520 return gss_new; 521 gss_msg = gss_add_msg(gss_new); 522 if (gss_msg == gss_new) { 523 int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); 524 if (res) { 525 gss_unhash_msg(gss_new); 526 gss_msg = ERR_PTR(res); 527 } 528 } else 529 gss_release_msg(gss_new); 530 return gss_msg; 531 } 532 533 static void warn_gssd(void) 534 { 535 static unsigned long ratelimit; 536 unsigned long now = jiffies; 537 538 if (time_after(now, ratelimit)) { 539 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" 540 "Please check user daemon is running.\n"); 541 ratelimit = now + 15*HZ; 542 } 543 } 544 545 static inline int 546 gss_refresh_upcall(struct rpc_task *task) 547 { 548 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 549 struct gss_auth *gss_auth = container_of(cred->cr_auth, 550 struct gss_auth, rpc_auth); 551 struct gss_cred *gss_cred = container_of(cred, 552 struct gss_cred, gc_base); 553 struct gss_upcall_msg *gss_msg; 554 struct rpc_pipe *pipe; 555 int err = 0; 556 557 dprintk("RPC: %5u %s for uid %u\n", 558 task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid)); 559 gss_msg = gss_setup_upcall(gss_auth, cred); 560 if (PTR_ERR(gss_msg) == -EAGAIN) { 561 /* XXX: warning on the first, under the assumption we 562 * shouldn't normally hit this case on a refresh. */ 563 warn_gssd(); 564 task->tk_timeout = 15*HZ; 565 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); 566 return -EAGAIN; 567 } 568 if (IS_ERR(gss_msg)) { 569 err = PTR_ERR(gss_msg); 570 goto out; 571 } 572 pipe = gss_msg->pipe; 573 spin_lock(&pipe->lock); 574 if (gss_cred->gc_upcall != NULL) 575 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 576 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 577 task->tk_timeout = 0; 578 gss_cred->gc_upcall = gss_msg; 579 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 580 atomic_inc(&gss_msg->count); 581 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); 582 } else { 583 gss_handle_downcall_result(gss_cred, gss_msg); 584 err = gss_msg->msg.errno; 585 } 586 spin_unlock(&pipe->lock); 587 gss_release_msg(gss_msg); 588 out: 589 dprintk("RPC: %5u %s for uid %u result %d\n", 590 task->tk_pid, __func__, 591 from_kuid(&init_user_ns, cred->cr_uid), err); 592 return err; 593 } 594 595 static inline int 596 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 597 { 598 struct net *net = gss_auth->net; 599 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 600 struct rpc_pipe *pipe; 601 struct rpc_cred *cred = &gss_cred->gc_base; 602 struct gss_upcall_msg *gss_msg; 603 unsigned long timeout; 604 DEFINE_WAIT(wait); 605 int err; 606 607 dprintk("RPC: %s for uid %u\n", 608 __func__, from_kuid(&init_user_ns, cred->cr_uid)); 609 retry: 610 err = 0; 611 /* Default timeout is 15s unless we know that gssd is not running */ 612 timeout = 15 * HZ; 613 if (!sn->gssd_running) 614 timeout = HZ >> 2; 615 gss_msg = gss_setup_upcall(gss_auth, cred); 616 if (PTR_ERR(gss_msg) == -EAGAIN) { 617 err = wait_event_interruptible_timeout(pipe_version_waitqueue, 618 sn->pipe_version >= 0, timeout); 619 if (sn->pipe_version < 0) { 620 if (err == 0) 621 sn->gssd_running = 0; 622 warn_gssd(); 623 err = -EACCES; 624 } 625 if (err < 0) 626 goto out; 627 goto retry; 628 } 629 if (IS_ERR(gss_msg)) { 630 err = PTR_ERR(gss_msg); 631 goto out; 632 } 633 pipe = gss_msg->pipe; 634 for (;;) { 635 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); 636 spin_lock(&pipe->lock); 637 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 638 break; 639 } 640 spin_unlock(&pipe->lock); 641 if (fatal_signal_pending(current)) { 642 err = -ERESTARTSYS; 643 goto out_intr; 644 } 645 schedule(); 646 } 647 if (gss_msg->ctx) 648 gss_cred_set_ctx(cred, gss_msg->ctx); 649 else 650 err = gss_msg->msg.errno; 651 spin_unlock(&pipe->lock); 652 out_intr: 653 finish_wait(&gss_msg->waitqueue, &wait); 654 gss_release_msg(gss_msg); 655 out: 656 dprintk("RPC: %s for uid %u result %d\n", 657 __func__, from_kuid(&init_user_ns, cred->cr_uid), err); 658 return err; 659 } 660 661 #define MSG_BUF_MAXSIZE 1024 662 663 static ssize_t 664 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 665 { 666 const void *p, *end; 667 void *buf; 668 struct gss_upcall_msg *gss_msg; 669 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe; 670 struct gss_cl_ctx *ctx; 671 uid_t id; 672 kuid_t uid; 673 ssize_t err = -EFBIG; 674 675 if (mlen > MSG_BUF_MAXSIZE) 676 goto out; 677 err = -ENOMEM; 678 buf = kmalloc(mlen, GFP_NOFS); 679 if (!buf) 680 goto out; 681 682 err = -EFAULT; 683 if (copy_from_user(buf, src, mlen)) 684 goto err; 685 686 end = (const void *)((char *)buf + mlen); 687 p = simple_get_bytes(buf, end, &id, sizeof(id)); 688 if (IS_ERR(p)) { 689 err = PTR_ERR(p); 690 goto err; 691 } 692 693 uid = make_kuid(&init_user_ns, id); 694 if (!uid_valid(uid)) { 695 err = -EINVAL; 696 goto err; 697 } 698 699 err = -ENOMEM; 700 ctx = gss_alloc_context(); 701 if (ctx == NULL) 702 goto err; 703 704 err = -ENOENT; 705 /* Find a matching upcall */ 706 spin_lock(&pipe->lock); 707 gss_msg = __gss_find_upcall(pipe, uid); 708 if (gss_msg == NULL) { 709 spin_unlock(&pipe->lock); 710 goto err_put_ctx; 711 } 712 list_del_init(&gss_msg->list); 713 spin_unlock(&pipe->lock); 714 715 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 716 if (IS_ERR(p)) { 717 err = PTR_ERR(p); 718 switch (err) { 719 case -EACCES: 720 case -EKEYEXPIRED: 721 gss_msg->msg.errno = err; 722 err = mlen; 723 break; 724 case -EFAULT: 725 case -ENOMEM: 726 case -EINVAL: 727 case -ENOSYS: 728 gss_msg->msg.errno = -EAGAIN; 729 break; 730 default: 731 printk(KERN_CRIT "%s: bad return from " 732 "gss_fill_context: %zd\n", __func__, err); 733 BUG(); 734 } 735 goto err_release_msg; 736 } 737 gss_msg->ctx = gss_get_ctx(ctx); 738 err = mlen; 739 740 err_release_msg: 741 spin_lock(&pipe->lock); 742 __gss_unhash_msg(gss_msg); 743 spin_unlock(&pipe->lock); 744 gss_release_msg(gss_msg); 745 err_put_ctx: 746 gss_put_ctx(ctx); 747 err: 748 kfree(buf); 749 out: 750 dprintk("RPC: %s returning %Zd\n", __func__, err); 751 return err; 752 } 753 754 static int gss_pipe_open(struct inode *inode, int new_version) 755 { 756 struct net *net = inode->i_sb->s_fs_info; 757 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 758 int ret = 0; 759 760 spin_lock(&pipe_version_lock); 761 if (sn->pipe_version < 0) { 762 /* First open of any gss pipe determines the version: */ 763 sn->pipe_version = new_version; 764 rpc_wake_up(&pipe_version_rpc_waitqueue); 765 wake_up(&pipe_version_waitqueue); 766 } else if (sn->pipe_version != new_version) { 767 /* Trying to open a pipe of a different version */ 768 ret = -EBUSY; 769 goto out; 770 } 771 atomic_inc(&sn->pipe_users); 772 out: 773 spin_unlock(&pipe_version_lock); 774 return ret; 775 776 } 777 778 static int gss_pipe_open_v0(struct inode *inode) 779 { 780 return gss_pipe_open(inode, 0); 781 } 782 783 static int gss_pipe_open_v1(struct inode *inode) 784 { 785 return gss_pipe_open(inode, 1); 786 } 787 788 static void 789 gss_pipe_release(struct inode *inode) 790 { 791 struct net *net = inode->i_sb->s_fs_info; 792 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 793 struct gss_upcall_msg *gss_msg; 794 795 restart: 796 spin_lock(&pipe->lock); 797 list_for_each_entry(gss_msg, &pipe->in_downcall, list) { 798 799 if (!list_empty(&gss_msg->msg.list)) 800 continue; 801 gss_msg->msg.errno = -EPIPE; 802 atomic_inc(&gss_msg->count); 803 __gss_unhash_msg(gss_msg); 804 spin_unlock(&pipe->lock); 805 gss_release_msg(gss_msg); 806 goto restart; 807 } 808 spin_unlock(&pipe->lock); 809 810 put_pipe_version(net); 811 } 812 813 static void 814 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) 815 { 816 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); 817 818 if (msg->errno < 0) { 819 dprintk("RPC: %s releasing msg %p\n", 820 __func__, gss_msg); 821 atomic_inc(&gss_msg->count); 822 gss_unhash_msg(gss_msg); 823 if (msg->errno == -ETIMEDOUT) 824 warn_gssd(); 825 gss_release_msg(gss_msg); 826 } 827 } 828 829 static void gss_pipe_dentry_destroy(struct dentry *dir, 830 struct rpc_pipe_dir_object *pdo) 831 { 832 struct gss_pipe *gss_pipe = pdo->pdo_data; 833 struct rpc_pipe *pipe = gss_pipe->pipe; 834 835 if (pipe->dentry != NULL) { 836 rpc_unlink(pipe->dentry); 837 pipe->dentry = NULL; 838 } 839 } 840 841 static int gss_pipe_dentry_create(struct dentry *dir, 842 struct rpc_pipe_dir_object *pdo) 843 { 844 struct gss_pipe *p = pdo->pdo_data; 845 struct dentry *dentry; 846 847 dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe); 848 if (IS_ERR(dentry)) 849 return PTR_ERR(dentry); 850 p->pipe->dentry = dentry; 851 return 0; 852 } 853 854 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = { 855 .create = gss_pipe_dentry_create, 856 .destroy = gss_pipe_dentry_destroy, 857 }; 858 859 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt, 860 const char *name, 861 const struct rpc_pipe_ops *upcall_ops) 862 { 863 struct gss_pipe *p; 864 int err = -ENOMEM; 865 866 p = kmalloc(sizeof(*p), GFP_KERNEL); 867 if (p == NULL) 868 goto err; 869 p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); 870 if (IS_ERR(p->pipe)) { 871 err = PTR_ERR(p->pipe); 872 goto err_free_gss_pipe; 873 } 874 p->name = name; 875 p->clnt = clnt; 876 kref_init(&p->kref); 877 rpc_init_pipe_dir_object(&p->pdo, 878 &gss_pipe_dir_object_ops, 879 p); 880 return p; 881 err_free_gss_pipe: 882 kfree(p); 883 err: 884 return ERR_PTR(err); 885 } 886 887 struct gss_alloc_pdo { 888 struct rpc_clnt *clnt; 889 const char *name; 890 const struct rpc_pipe_ops *upcall_ops; 891 }; 892 893 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data) 894 { 895 struct gss_pipe *gss_pipe; 896 struct gss_alloc_pdo *args = data; 897 898 if (pdo->pdo_ops != &gss_pipe_dir_object_ops) 899 return 0; 900 gss_pipe = container_of(pdo, struct gss_pipe, pdo); 901 if (strcmp(gss_pipe->name, args->name) != 0) 902 return 0; 903 if (!kref_get_unless_zero(&gss_pipe->kref)) 904 return 0; 905 return 1; 906 } 907 908 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data) 909 { 910 struct gss_pipe *gss_pipe; 911 struct gss_alloc_pdo *args = data; 912 913 gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops); 914 if (!IS_ERR(gss_pipe)) 915 return &gss_pipe->pdo; 916 return NULL; 917 } 918 919 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt, 920 const char *name, 921 const struct rpc_pipe_ops *upcall_ops) 922 { 923 struct net *net = rpc_net_ns(clnt); 924 struct rpc_pipe_dir_object *pdo; 925 struct gss_alloc_pdo args = { 926 .clnt = clnt, 927 .name = name, 928 .upcall_ops = upcall_ops, 929 }; 930 931 pdo = rpc_find_or_alloc_pipe_dir_object(net, 932 &clnt->cl_pipedir_objects, 933 gss_pipe_match_pdo, 934 gss_pipe_alloc_pdo, 935 &args); 936 if (pdo != NULL) 937 return container_of(pdo, struct gss_pipe, pdo); 938 return ERR_PTR(-ENOMEM); 939 } 940 941 static void __gss_pipe_free(struct gss_pipe *p) 942 { 943 struct rpc_clnt *clnt = p->clnt; 944 struct net *net = rpc_net_ns(clnt); 945 946 rpc_remove_pipe_dir_object(net, 947 &clnt->cl_pipedir_objects, 948 &p->pdo); 949 rpc_destroy_pipe_data(p->pipe); 950 kfree(p); 951 } 952 953 static void __gss_pipe_release(struct kref *kref) 954 { 955 struct gss_pipe *p = container_of(kref, struct gss_pipe, kref); 956 957 __gss_pipe_free(p); 958 } 959 960 static void gss_pipe_free(struct gss_pipe *p) 961 { 962 if (p != NULL) 963 kref_put(&p->kref, __gss_pipe_release); 964 } 965 966 /* 967 * NOTE: we have the opportunity to use different 968 * parameters based on the input flavor (which must be a pseudoflavor) 969 */ 970 static struct gss_auth * 971 gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 972 { 973 rpc_authflavor_t flavor = args->pseudoflavor; 974 struct gss_auth *gss_auth; 975 struct gss_pipe *gss_pipe; 976 struct rpc_auth * auth; 977 int err = -ENOMEM; /* XXX? */ 978 979 dprintk("RPC: creating GSS authenticator for client %p\n", clnt); 980 981 if (!try_module_get(THIS_MODULE)) 982 return ERR_PTR(err); 983 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) 984 goto out_dec; 985 INIT_HLIST_NODE(&gss_auth->hash); 986 gss_auth->target_name = NULL; 987 if (args->target_name) { 988 gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL); 989 if (gss_auth->target_name == NULL) 990 goto err_free; 991 } 992 gss_auth->client = clnt; 993 gss_auth->net = get_net(rpc_net_ns(clnt)); 994 err = -EINVAL; 995 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 996 if (!gss_auth->mech) { 997 dprintk("RPC: Pseudoflavor %d not found!\n", flavor); 998 goto err_put_net; 999 } 1000 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 1001 if (gss_auth->service == 0) 1002 goto err_put_mech; 1003 auth = &gss_auth->rpc_auth; 1004 auth->au_cslack = GSS_CRED_SLACK >> 2; 1005 auth->au_rslack = GSS_VERF_SLACK >> 2; 1006 auth->au_ops = &authgss_ops; 1007 auth->au_flavor = flavor; 1008 atomic_set(&auth->au_count, 1); 1009 kref_init(&gss_auth->kref); 1010 1011 err = rpcauth_init_credcache(auth); 1012 if (err) 1013 goto err_put_mech; 1014 /* 1015 * Note: if we created the old pipe first, then someone who 1016 * examined the directory at the right moment might conclude 1017 * that we supported only the old pipe. So we instead create 1018 * the new pipe first. 1019 */ 1020 gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1); 1021 if (IS_ERR(gss_pipe)) { 1022 err = PTR_ERR(gss_pipe); 1023 goto err_destroy_credcache; 1024 } 1025 gss_auth->gss_pipe[1] = gss_pipe; 1026 1027 gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name, 1028 &gss_upcall_ops_v0); 1029 if (IS_ERR(gss_pipe)) { 1030 err = PTR_ERR(gss_pipe); 1031 goto err_destroy_pipe_1; 1032 } 1033 gss_auth->gss_pipe[0] = gss_pipe; 1034 1035 return gss_auth; 1036 err_destroy_pipe_1: 1037 gss_pipe_free(gss_auth->gss_pipe[1]); 1038 err_destroy_credcache: 1039 rpcauth_destroy_credcache(auth); 1040 err_put_mech: 1041 gss_mech_put(gss_auth->mech); 1042 err_put_net: 1043 put_net(gss_auth->net); 1044 err_free: 1045 kfree(gss_auth->target_name); 1046 kfree(gss_auth); 1047 out_dec: 1048 module_put(THIS_MODULE); 1049 return ERR_PTR(err); 1050 } 1051 1052 static void 1053 gss_free(struct gss_auth *gss_auth) 1054 { 1055 gss_pipe_free(gss_auth->gss_pipe[0]); 1056 gss_pipe_free(gss_auth->gss_pipe[1]); 1057 gss_mech_put(gss_auth->mech); 1058 put_net(gss_auth->net); 1059 kfree(gss_auth->target_name); 1060 1061 kfree(gss_auth); 1062 module_put(THIS_MODULE); 1063 } 1064 1065 static void 1066 gss_free_callback(struct kref *kref) 1067 { 1068 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); 1069 1070 gss_free(gss_auth); 1071 } 1072 1073 static void 1074 gss_destroy(struct rpc_auth *auth) 1075 { 1076 struct gss_auth *gss_auth = container_of(auth, 1077 struct gss_auth, rpc_auth); 1078 1079 dprintk("RPC: destroying GSS authenticator %p flavor %d\n", 1080 auth, auth->au_flavor); 1081 1082 if (hash_hashed(&gss_auth->hash)) { 1083 spin_lock(&gss_auth_hash_lock); 1084 hash_del(&gss_auth->hash); 1085 spin_unlock(&gss_auth_hash_lock); 1086 } 1087 1088 gss_pipe_free(gss_auth->gss_pipe[0]); 1089 gss_auth->gss_pipe[0] = NULL; 1090 gss_pipe_free(gss_auth->gss_pipe[1]); 1091 gss_auth->gss_pipe[1] = NULL; 1092 rpcauth_destroy_credcache(auth); 1093 1094 kref_put(&gss_auth->kref, gss_free_callback); 1095 } 1096 1097 /* 1098 * Auths may be shared between rpc clients that were cloned from a 1099 * common client with the same xprt, if they also share the flavor and 1100 * target_name. 1101 * 1102 * The auth is looked up from the oldest parent sharing the same 1103 * cl_xprt, and the auth itself references only that common parent 1104 * (which is guaranteed to last as long as any of its descendants). 1105 */ 1106 static struct gss_auth * 1107 gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args, 1108 struct rpc_clnt *clnt, 1109 struct gss_auth *new) 1110 { 1111 struct gss_auth *gss_auth; 1112 unsigned long hashval = (unsigned long)clnt; 1113 1114 spin_lock(&gss_auth_hash_lock); 1115 hash_for_each_possible(gss_auth_hash_table, 1116 gss_auth, 1117 hash, 1118 hashval) { 1119 if (gss_auth->client != clnt) 1120 continue; 1121 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor) 1122 continue; 1123 if (gss_auth->target_name != args->target_name) { 1124 if (gss_auth->target_name == NULL) 1125 continue; 1126 if (args->target_name == NULL) 1127 continue; 1128 if (strcmp(gss_auth->target_name, args->target_name)) 1129 continue; 1130 } 1131 if (!atomic_inc_not_zero(&gss_auth->rpc_auth.au_count)) 1132 continue; 1133 goto out; 1134 } 1135 if (new) 1136 hash_add(gss_auth_hash_table, &new->hash, hashval); 1137 gss_auth = new; 1138 out: 1139 spin_unlock(&gss_auth_hash_lock); 1140 return gss_auth; 1141 } 1142 1143 static struct gss_auth * 1144 gss_create_hashed(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1145 { 1146 struct gss_auth *gss_auth; 1147 struct gss_auth *new; 1148 1149 gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL); 1150 if (gss_auth != NULL) 1151 goto out; 1152 new = gss_create_new(args, clnt); 1153 if (IS_ERR(new)) 1154 return new; 1155 gss_auth = gss_auth_find_or_add_hashed(args, clnt, new); 1156 if (gss_auth != new) 1157 gss_destroy(&new->rpc_auth); 1158 out: 1159 return gss_auth; 1160 } 1161 1162 static struct rpc_auth * 1163 gss_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1164 { 1165 struct gss_auth *gss_auth; 1166 struct rpc_xprt *xprt = rcu_access_pointer(clnt->cl_xprt); 1167 1168 while (clnt != clnt->cl_parent) { 1169 struct rpc_clnt *parent = clnt->cl_parent; 1170 /* Find the original parent for this transport */ 1171 if (rcu_access_pointer(parent->cl_xprt) != xprt) 1172 break; 1173 clnt = parent; 1174 } 1175 1176 gss_auth = gss_create_hashed(args, clnt); 1177 if (IS_ERR(gss_auth)) 1178 return ERR_CAST(gss_auth); 1179 return &gss_auth->rpc_auth; 1180 } 1181 1182 /* 1183 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call 1184 * to the server with the GSS control procedure field set to 1185 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release 1186 * all RPCSEC_GSS state associated with that context. 1187 */ 1188 static int 1189 gss_destroying_context(struct rpc_cred *cred) 1190 { 1191 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1192 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1193 struct rpc_task *task; 1194 1195 if (gss_cred->gc_ctx == NULL || 1196 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 1197 return 0; 1198 1199 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; 1200 cred->cr_ops = &gss_nullops; 1201 1202 /* Take a reference to ensure the cred will be destroyed either 1203 * by the RPC call or by the put_rpccred() below */ 1204 get_rpccred(cred); 1205 1206 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT); 1207 if (!IS_ERR(task)) 1208 rpc_put_task(task); 1209 1210 put_rpccred(cred); 1211 return 1; 1212 } 1213 1214 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure 1215 * to create a new cred or context, so they check that things have been 1216 * allocated before freeing them. */ 1217 static void 1218 gss_do_free_ctx(struct gss_cl_ctx *ctx) 1219 { 1220 dprintk("RPC: %s\n", __func__); 1221 1222 gss_delete_sec_context(&ctx->gc_gss_ctx); 1223 kfree(ctx->gc_wire_ctx.data); 1224 kfree(ctx); 1225 } 1226 1227 static void 1228 gss_free_ctx_callback(struct rcu_head *head) 1229 { 1230 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); 1231 gss_do_free_ctx(ctx); 1232 } 1233 1234 static void 1235 gss_free_ctx(struct gss_cl_ctx *ctx) 1236 { 1237 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); 1238 } 1239 1240 static void 1241 gss_free_cred(struct gss_cred *gss_cred) 1242 { 1243 dprintk("RPC: %s cred=%p\n", __func__, gss_cred); 1244 kfree(gss_cred); 1245 } 1246 1247 static void 1248 gss_free_cred_callback(struct rcu_head *head) 1249 { 1250 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); 1251 gss_free_cred(gss_cred); 1252 } 1253 1254 static void 1255 gss_destroy_nullcred(struct rpc_cred *cred) 1256 { 1257 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1258 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1259 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 1260 1261 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); 1262 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1263 if (ctx) 1264 gss_put_ctx(ctx); 1265 kref_put(&gss_auth->kref, gss_free_callback); 1266 } 1267 1268 static void 1269 gss_destroy_cred(struct rpc_cred *cred) 1270 { 1271 1272 if (gss_destroying_context(cred)) 1273 return; 1274 gss_destroy_nullcred(cred); 1275 } 1276 1277 /* 1278 * Lookup RPCSEC_GSS cred for the current process 1279 */ 1280 static struct rpc_cred * 1281 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1282 { 1283 return rpcauth_lookup_credcache(auth, acred, flags); 1284 } 1285 1286 static struct rpc_cred * 1287 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1288 { 1289 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1290 struct gss_cred *cred = NULL; 1291 int err = -ENOMEM; 1292 1293 dprintk("RPC: %s for uid %d, flavor %d\n", 1294 __func__, from_kuid(&init_user_ns, acred->uid), 1295 auth->au_flavor); 1296 1297 if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) 1298 goto out_err; 1299 1300 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); 1301 /* 1302 * Note: in order to force a call to call_refresh(), we deliberately 1303 * fail to flag the credential as RPCAUTH_CRED_UPTODATE. 1304 */ 1305 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; 1306 cred->gc_service = gss_auth->service; 1307 cred->gc_principal = NULL; 1308 if (acred->machine_cred) 1309 cred->gc_principal = acred->principal; 1310 kref_get(&gss_auth->kref); 1311 return &cred->gc_base; 1312 1313 out_err: 1314 dprintk("RPC: %s failed with error %d\n", __func__, err); 1315 return ERR_PTR(err); 1316 } 1317 1318 static int 1319 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) 1320 { 1321 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1322 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); 1323 int err; 1324 1325 do { 1326 err = gss_create_upcall(gss_auth, gss_cred); 1327 } while (err == -EAGAIN); 1328 return err; 1329 } 1330 1331 /* 1332 * Returns -EACCES if GSS context is NULL or will expire within the 1333 * timeout (miliseconds) 1334 */ 1335 static int 1336 gss_key_timeout(struct rpc_cred *rc) 1337 { 1338 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1339 unsigned long now = jiffies; 1340 unsigned long expire; 1341 1342 if (gss_cred->gc_ctx == NULL) 1343 return -EACCES; 1344 1345 expire = gss_cred->gc_ctx->gc_expiry - (gss_key_expire_timeo * HZ); 1346 1347 if (time_after(now, expire)) 1348 return -EACCES; 1349 return 0; 1350 } 1351 1352 static int 1353 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) 1354 { 1355 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1356 int ret; 1357 1358 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) 1359 goto out; 1360 /* Don't match with creds that have expired. */ 1361 if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) 1362 return 0; 1363 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags)) 1364 return 0; 1365 out: 1366 if (acred->principal != NULL) { 1367 if (gss_cred->gc_principal == NULL) 1368 return 0; 1369 ret = strcmp(acred->principal, gss_cred->gc_principal) == 0; 1370 goto check_expire; 1371 } 1372 if (gss_cred->gc_principal != NULL) 1373 return 0; 1374 ret = uid_eq(rc->cr_uid, acred->uid); 1375 1376 check_expire: 1377 if (ret == 0) 1378 return ret; 1379 1380 /* Notify acred users of GSS context expiration timeout */ 1381 if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags) && 1382 (gss_key_timeout(rc) != 0)) { 1383 /* test will now be done from generic cred */ 1384 test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags); 1385 /* tell NFS layer that key will expire soon */ 1386 set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); 1387 } 1388 return ret; 1389 } 1390 1391 /* 1392 * Marshal credentials. 1393 * Maybe we should keep a cached credential for performance reasons. 1394 */ 1395 static __be32 * 1396 gss_marshal(struct rpc_task *task, __be32 *p) 1397 { 1398 struct rpc_rqst *req = task->tk_rqstp; 1399 struct rpc_cred *cred = req->rq_cred; 1400 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1401 gc_base); 1402 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1403 __be32 *cred_len; 1404 u32 maj_stat = 0; 1405 struct xdr_netobj mic; 1406 struct kvec iov; 1407 struct xdr_buf verf_buf; 1408 1409 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1410 1411 *p++ = htonl(RPC_AUTH_GSS); 1412 cred_len = p++; 1413 1414 spin_lock(&ctx->gc_seq_lock); 1415 req->rq_seqno = ctx->gc_seq++; 1416 spin_unlock(&ctx->gc_seq_lock); 1417 1418 *p++ = htonl((u32) RPC_GSS_VERSION); 1419 *p++ = htonl((u32) ctx->gc_proc); 1420 *p++ = htonl((u32) req->rq_seqno); 1421 *p++ = htonl((u32) gss_cred->gc_service); 1422 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); 1423 *cred_len = htonl((p - (cred_len + 1)) << 2); 1424 1425 /* We compute the checksum for the verifier over the xdr-encoded bytes 1426 * starting with the xid and ending at the end of the credential: */ 1427 iov.iov_base = xprt_skip_transport_header(req->rq_xprt, 1428 req->rq_snd_buf.head[0].iov_base); 1429 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; 1430 xdr_buf_from_iov(&iov, &verf_buf); 1431 1432 /* set verifier flavor*/ 1433 *p++ = htonl(RPC_AUTH_GSS); 1434 1435 mic.data = (u8 *)(p + 1); 1436 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1437 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1438 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1439 } else if (maj_stat != 0) { 1440 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1441 goto out_put_ctx; 1442 } 1443 p = xdr_encode_opaque(p, NULL, mic.len); 1444 gss_put_ctx(ctx); 1445 return p; 1446 out_put_ctx: 1447 gss_put_ctx(ctx); 1448 return NULL; 1449 } 1450 1451 static int gss_renew_cred(struct rpc_task *task) 1452 { 1453 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; 1454 struct gss_cred *gss_cred = container_of(oldcred, 1455 struct gss_cred, 1456 gc_base); 1457 struct rpc_auth *auth = oldcred->cr_auth; 1458 struct auth_cred acred = { 1459 .uid = oldcred->cr_uid, 1460 .principal = gss_cred->gc_principal, 1461 .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0), 1462 }; 1463 struct rpc_cred *new; 1464 1465 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); 1466 if (IS_ERR(new)) 1467 return PTR_ERR(new); 1468 task->tk_rqstp->rq_cred = new; 1469 put_rpccred(oldcred); 1470 return 0; 1471 } 1472 1473 static int gss_cred_is_negative_entry(struct rpc_cred *cred) 1474 { 1475 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { 1476 unsigned long now = jiffies; 1477 unsigned long begin, expire; 1478 struct gss_cred *gss_cred; 1479 1480 gss_cred = container_of(cred, struct gss_cred, gc_base); 1481 begin = gss_cred->gc_upcall_timestamp; 1482 expire = begin + gss_expired_cred_retry_delay * HZ; 1483 1484 if (time_in_range_open(now, begin, expire)) 1485 return 1; 1486 } 1487 return 0; 1488 } 1489 1490 /* 1491 * Refresh credentials. XXX - finish 1492 */ 1493 static int 1494 gss_refresh(struct rpc_task *task) 1495 { 1496 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1497 int ret = 0; 1498 1499 if (gss_cred_is_negative_entry(cred)) 1500 return -EKEYEXPIRED; 1501 1502 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && 1503 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 1504 ret = gss_renew_cred(task); 1505 if (ret < 0) 1506 goto out; 1507 cred = task->tk_rqstp->rq_cred; 1508 } 1509 1510 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 1511 ret = gss_refresh_upcall(task); 1512 out: 1513 return ret; 1514 } 1515 1516 /* Dummy refresh routine: used only when destroying the context */ 1517 static int 1518 gss_refresh_null(struct rpc_task *task) 1519 { 1520 return 0; 1521 } 1522 1523 static __be32 * 1524 gss_validate(struct rpc_task *task, __be32 *p) 1525 { 1526 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1527 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1528 __be32 seq; 1529 struct kvec iov; 1530 struct xdr_buf verf_buf; 1531 struct xdr_netobj mic; 1532 u32 flav,len; 1533 u32 maj_stat; 1534 __be32 *ret = ERR_PTR(-EIO); 1535 1536 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1537 1538 flav = ntohl(*p++); 1539 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 1540 goto out_bad; 1541 if (flav != RPC_AUTH_GSS) 1542 goto out_bad; 1543 seq = htonl(task->tk_rqstp->rq_seqno); 1544 iov.iov_base = &seq; 1545 iov.iov_len = sizeof(seq); 1546 xdr_buf_from_iov(&iov, &verf_buf); 1547 mic.data = (u8 *)p; 1548 mic.len = len; 1549 1550 ret = ERR_PTR(-EACCES); 1551 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1552 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1553 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1554 if (maj_stat) { 1555 dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n", 1556 task->tk_pid, __func__, maj_stat); 1557 goto out_bad; 1558 } 1559 /* We leave it to unwrap to calculate au_rslack. For now we just 1560 * calculate the length of the verifier: */ 1561 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; 1562 gss_put_ctx(ctx); 1563 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", 1564 task->tk_pid, __func__); 1565 return p + XDR_QUADLEN(len); 1566 out_bad: 1567 gss_put_ctx(ctx); 1568 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, 1569 PTR_ERR(ret)); 1570 return ret; 1571 } 1572 1573 static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, 1574 __be32 *p, void *obj) 1575 { 1576 struct xdr_stream xdr; 1577 1578 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); 1579 encode(rqstp, &xdr, obj); 1580 } 1581 1582 static inline int 1583 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1584 kxdreproc_t encode, struct rpc_rqst *rqstp, 1585 __be32 *p, void *obj) 1586 { 1587 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1588 struct xdr_buf integ_buf; 1589 __be32 *integ_len = NULL; 1590 struct xdr_netobj mic; 1591 u32 offset; 1592 __be32 *q; 1593 struct kvec *iov; 1594 u32 maj_stat = 0; 1595 int status = -EIO; 1596 1597 integ_len = p++; 1598 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1599 *p++ = htonl(rqstp->rq_seqno); 1600 1601 gss_wrap_req_encode(encode, rqstp, p, obj); 1602 1603 if (xdr_buf_subsegment(snd_buf, &integ_buf, 1604 offset, snd_buf->len - offset)) 1605 return status; 1606 *integ_len = htonl(integ_buf.len); 1607 1608 /* guess whether we're in the head or the tail: */ 1609 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1610 iov = snd_buf->tail; 1611 else 1612 iov = snd_buf->head; 1613 p = iov->iov_base + iov->iov_len; 1614 mic.data = (u8 *)(p + 1); 1615 1616 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1617 status = -EIO; /* XXX? */ 1618 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1619 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1620 else if (maj_stat) 1621 return status; 1622 q = xdr_encode_opaque(p, NULL, mic.len); 1623 1624 offset = (u8 *)q - (u8 *)p; 1625 iov->iov_len += offset; 1626 snd_buf->len += offset; 1627 return 0; 1628 } 1629 1630 static void 1631 priv_release_snd_buf(struct rpc_rqst *rqstp) 1632 { 1633 int i; 1634 1635 for (i=0; i < rqstp->rq_enc_pages_num; i++) 1636 __free_page(rqstp->rq_enc_pages[i]); 1637 kfree(rqstp->rq_enc_pages); 1638 } 1639 1640 static int 1641 alloc_enc_pages(struct rpc_rqst *rqstp) 1642 { 1643 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1644 int first, last, i; 1645 1646 if (snd_buf->page_len == 0) { 1647 rqstp->rq_enc_pages_num = 0; 1648 return 0; 1649 } 1650 1651 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1652 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; 1653 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1654 rqstp->rq_enc_pages 1655 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1656 GFP_NOFS); 1657 if (!rqstp->rq_enc_pages) 1658 goto out; 1659 for (i=0; i < rqstp->rq_enc_pages_num; i++) { 1660 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); 1661 if (rqstp->rq_enc_pages[i] == NULL) 1662 goto out_free; 1663 } 1664 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1665 return 0; 1666 out_free: 1667 rqstp->rq_enc_pages_num = i; 1668 priv_release_snd_buf(rqstp); 1669 out: 1670 return -EAGAIN; 1671 } 1672 1673 static inline int 1674 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1675 kxdreproc_t encode, struct rpc_rqst *rqstp, 1676 __be32 *p, void *obj) 1677 { 1678 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1679 u32 offset; 1680 u32 maj_stat; 1681 int status; 1682 __be32 *opaque_len; 1683 struct page **inpages; 1684 int first; 1685 int pad; 1686 struct kvec *iov; 1687 char *tmp; 1688 1689 opaque_len = p++; 1690 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1691 *p++ = htonl(rqstp->rq_seqno); 1692 1693 gss_wrap_req_encode(encode, rqstp, p, obj); 1694 1695 status = alloc_enc_pages(rqstp); 1696 if (status) 1697 return status; 1698 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1699 inpages = snd_buf->pages + first; 1700 snd_buf->pages = rqstp->rq_enc_pages; 1701 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1702 /* 1703 * Give the tail its own page, in case we need extra space in the 1704 * head when wrapping: 1705 * 1706 * call_allocate() allocates twice the slack space required 1707 * by the authentication flavor to rq_callsize. 1708 * For GSS, slack is GSS_CRED_SLACK. 1709 */ 1710 if (snd_buf->page_len || snd_buf->tail[0].iov_len) { 1711 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); 1712 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); 1713 snd_buf->tail[0].iov_base = tmp; 1714 } 1715 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1716 /* slack space should prevent this ever happening: */ 1717 BUG_ON(snd_buf->len > snd_buf->buflen); 1718 status = -EIO; 1719 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1720 * done anyway, so it's safe to put the request on the wire: */ 1721 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1722 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1723 else if (maj_stat) 1724 return status; 1725 1726 *opaque_len = htonl(snd_buf->len - offset); 1727 /* guess whether we're in the head or the tail: */ 1728 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1729 iov = snd_buf->tail; 1730 else 1731 iov = snd_buf->head; 1732 p = iov->iov_base + iov->iov_len; 1733 pad = 3 - ((snd_buf->len - offset - 1) & 3); 1734 memset(p, 0, pad); 1735 iov->iov_len += pad; 1736 snd_buf->len += pad; 1737 1738 return 0; 1739 } 1740 1741 static int 1742 gss_wrap_req(struct rpc_task *task, 1743 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) 1744 { 1745 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1746 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1747 gc_base); 1748 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1749 int status = -EIO; 1750 1751 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1752 if (ctx->gc_proc != RPC_GSS_PROC_DATA) { 1753 /* The spec seems a little ambiguous here, but I think that not 1754 * wrapping context destruction requests makes the most sense. 1755 */ 1756 gss_wrap_req_encode(encode, rqstp, p, obj); 1757 status = 0; 1758 goto out; 1759 } 1760 switch (gss_cred->gc_service) { 1761 case RPC_GSS_SVC_NONE: 1762 gss_wrap_req_encode(encode, rqstp, p, obj); 1763 status = 0; 1764 break; 1765 case RPC_GSS_SVC_INTEGRITY: 1766 status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj); 1767 break; 1768 case RPC_GSS_SVC_PRIVACY: 1769 status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj); 1770 break; 1771 } 1772 out: 1773 gss_put_ctx(ctx); 1774 dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status); 1775 return status; 1776 } 1777 1778 static inline int 1779 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1780 struct rpc_rqst *rqstp, __be32 **p) 1781 { 1782 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1783 struct xdr_buf integ_buf; 1784 struct xdr_netobj mic; 1785 u32 data_offset, mic_offset; 1786 u32 integ_len; 1787 u32 maj_stat; 1788 int status = -EIO; 1789 1790 integ_len = ntohl(*(*p)++); 1791 if (integ_len & 3) 1792 return status; 1793 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1794 mic_offset = integ_len + data_offset; 1795 if (mic_offset > rcv_buf->len) 1796 return status; 1797 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1798 return status; 1799 1800 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, 1801 mic_offset - data_offset)) 1802 return status; 1803 1804 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) 1805 return status; 1806 1807 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1808 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1809 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1810 if (maj_stat != GSS_S_COMPLETE) 1811 return status; 1812 return 0; 1813 } 1814 1815 static inline int 1816 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1817 struct rpc_rqst *rqstp, __be32 **p) 1818 { 1819 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1820 u32 offset; 1821 u32 opaque_len; 1822 u32 maj_stat; 1823 int status = -EIO; 1824 1825 opaque_len = ntohl(*(*p)++); 1826 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1827 if (offset + opaque_len > rcv_buf->len) 1828 return status; 1829 /* remove padding: */ 1830 rcv_buf->len = offset + opaque_len; 1831 1832 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); 1833 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1834 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1835 if (maj_stat != GSS_S_COMPLETE) 1836 return status; 1837 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1838 return status; 1839 1840 return 0; 1841 } 1842 1843 static int 1844 gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, 1845 __be32 *p, void *obj) 1846 { 1847 struct xdr_stream xdr; 1848 1849 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 1850 return decode(rqstp, &xdr, obj); 1851 } 1852 1853 static int 1854 gss_unwrap_resp(struct rpc_task *task, 1855 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) 1856 { 1857 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1858 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1859 gc_base); 1860 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1861 __be32 *savedp = p; 1862 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; 1863 int savedlen = head->iov_len; 1864 int status = -EIO; 1865 1866 if (ctx->gc_proc != RPC_GSS_PROC_DATA) 1867 goto out_decode; 1868 switch (gss_cred->gc_service) { 1869 case RPC_GSS_SVC_NONE: 1870 break; 1871 case RPC_GSS_SVC_INTEGRITY: 1872 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); 1873 if (status) 1874 goto out; 1875 break; 1876 case RPC_GSS_SVC_PRIVACY: 1877 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1878 if (status) 1879 goto out; 1880 break; 1881 } 1882 /* take into account extra slack for integrity and privacy cases: */ 1883 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1884 + (savedlen - head->iov_len); 1885 out_decode: 1886 status = gss_unwrap_req_decode(decode, rqstp, p, obj); 1887 out: 1888 gss_put_ctx(ctx); 1889 dprintk("RPC: %5u %s returning %d\n", 1890 task->tk_pid, __func__, status); 1891 return status; 1892 } 1893 1894 static const struct rpc_authops authgss_ops = { 1895 .owner = THIS_MODULE, 1896 .au_flavor = RPC_AUTH_GSS, 1897 .au_name = "RPCSEC_GSS", 1898 .create = gss_create, 1899 .destroy = gss_destroy, 1900 .lookup_cred = gss_lookup_cred, 1901 .crcreate = gss_create_cred, 1902 .list_pseudoflavors = gss_mech_list_pseudoflavors, 1903 .info2flavor = gss_mech_info2flavor, 1904 .flavor2info = gss_mech_flavor2info, 1905 }; 1906 1907 static const struct rpc_credops gss_credops = { 1908 .cr_name = "AUTH_GSS", 1909 .crdestroy = gss_destroy_cred, 1910 .cr_init = gss_cred_init, 1911 .crbind = rpcauth_generic_bind_cred, 1912 .crmatch = gss_match, 1913 .crmarshal = gss_marshal, 1914 .crrefresh = gss_refresh, 1915 .crvalidate = gss_validate, 1916 .crwrap_req = gss_wrap_req, 1917 .crunwrap_resp = gss_unwrap_resp, 1918 .crkey_timeout = gss_key_timeout, 1919 }; 1920 1921 static const struct rpc_credops gss_nullops = { 1922 .cr_name = "AUTH_GSS", 1923 .crdestroy = gss_destroy_nullcred, 1924 .crbind = rpcauth_generic_bind_cred, 1925 .crmatch = gss_match, 1926 .crmarshal = gss_marshal, 1927 .crrefresh = gss_refresh_null, 1928 .crvalidate = gss_validate, 1929 .crwrap_req = gss_wrap_req, 1930 .crunwrap_resp = gss_unwrap_resp, 1931 }; 1932 1933 static const struct rpc_pipe_ops gss_upcall_ops_v0 = { 1934 .upcall = rpc_pipe_generic_upcall, 1935 .downcall = gss_pipe_downcall, 1936 .destroy_msg = gss_pipe_destroy_msg, 1937 .open_pipe = gss_pipe_open_v0, 1938 .release_pipe = gss_pipe_release, 1939 }; 1940 1941 static const struct rpc_pipe_ops gss_upcall_ops_v1 = { 1942 .upcall = rpc_pipe_generic_upcall, 1943 .downcall = gss_pipe_downcall, 1944 .destroy_msg = gss_pipe_destroy_msg, 1945 .open_pipe = gss_pipe_open_v1, 1946 .release_pipe = gss_pipe_release, 1947 }; 1948 1949 static __net_init int rpcsec_gss_init_net(struct net *net) 1950 { 1951 return gss_svc_init_net(net); 1952 } 1953 1954 static __net_exit void rpcsec_gss_exit_net(struct net *net) 1955 { 1956 gss_svc_shutdown_net(net); 1957 } 1958 1959 static struct pernet_operations rpcsec_gss_net_ops = { 1960 .init = rpcsec_gss_init_net, 1961 .exit = rpcsec_gss_exit_net, 1962 }; 1963 1964 /* 1965 * Initialize RPCSEC_GSS module 1966 */ 1967 static int __init init_rpcsec_gss(void) 1968 { 1969 int err = 0; 1970 1971 err = rpcauth_register(&authgss_ops); 1972 if (err) 1973 goto out; 1974 err = gss_svc_init(); 1975 if (err) 1976 goto out_unregister; 1977 err = register_pernet_subsys(&rpcsec_gss_net_ops); 1978 if (err) 1979 goto out_svc_exit; 1980 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); 1981 return 0; 1982 out_svc_exit: 1983 gss_svc_shutdown(); 1984 out_unregister: 1985 rpcauth_unregister(&authgss_ops); 1986 out: 1987 return err; 1988 } 1989 1990 static void __exit exit_rpcsec_gss(void) 1991 { 1992 unregister_pernet_subsys(&rpcsec_gss_net_ops); 1993 gss_svc_shutdown(); 1994 rpcauth_unregister(&authgss_ops); 1995 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1996 } 1997 1998 MODULE_ALIAS("rpc-auth-6"); 1999 MODULE_LICENSE("GPL"); 2000 module_param_named(expired_cred_retry_delay, 2001 gss_expired_cred_retry_delay, 2002 uint, 0644); 2003 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " 2004 "the RPC engine retries an expired credential"); 2005 2006 module_param_named(key_expire_timeo, 2007 gss_key_expire_timeo, 2008 uint, 0644); 2009 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a " 2010 "credential keys lifetime where the NFS layer cleans up " 2011 "prior to key expiration"); 2012 2013 module_init(init_rpcsec_gss) 2014 module_exit(exit_rpcsec_gss) 2015