1 /* 2 * linux/net/sunrpc/auth_gss/auth_gss.c 3 * 4 * RPCSEC_GSS client authentication. 5 * 6 * Copyright (c) 2000 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Dug Song <dugsong@monkey.org> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <linux/module.h> 40 #include <linux/init.h> 41 #include <linux/types.h> 42 #include <linux/slab.h> 43 #include <linux/sched.h> 44 #include <linux/pagemap.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/sunrpc/auth.h> 47 #include <linux/sunrpc/auth_gss.h> 48 #include <linux/sunrpc/svcauth_gss.h> 49 #include <linux/sunrpc/gss_err.h> 50 #include <linux/workqueue.h> 51 #include <linux/sunrpc/rpc_pipe_fs.h> 52 #include <linux/sunrpc/gss_api.h> 53 #include <linux/uaccess.h> 54 #include <linux/hashtable.h> 55 56 #include "../netns.h" 57 58 static const struct rpc_authops authgss_ops; 59 60 static const struct rpc_credops gss_credops; 61 static const struct rpc_credops gss_nullops; 62 63 #define GSS_RETRY_EXPIRED 5 64 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; 65 66 #define GSS_KEY_EXPIRE_TIMEO 240 67 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO; 68 69 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 70 # define RPCDBG_FACILITY RPCDBG_AUTH 71 #endif 72 73 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) 74 /* length of a krb5 verifier (48), plus data added before arguments when 75 * using integrity (two 4-byte integers): */ 76 #define GSS_VERF_SLACK 100 77 78 static DEFINE_HASHTABLE(gss_auth_hash_table, 4); 79 static DEFINE_SPINLOCK(gss_auth_hash_lock); 80 81 struct gss_pipe { 82 struct rpc_pipe_dir_object pdo; 83 struct rpc_pipe *pipe; 84 struct rpc_clnt *clnt; 85 const char *name; 86 struct kref kref; 87 }; 88 89 struct gss_auth { 90 struct kref kref; 91 struct hlist_node hash; 92 struct rpc_auth rpc_auth; 93 struct gss_api_mech *mech; 94 enum rpc_gss_svc service; 95 struct rpc_clnt *client; 96 struct net *net; 97 /* 98 * There are two upcall pipes; dentry[1], named "gssd", is used 99 * for the new text-based upcall; dentry[0] is named after the 100 * mechanism (for example, "krb5") and exists for 101 * backwards-compatibility with older gssd's. 102 */ 103 struct gss_pipe *gss_pipe[2]; 104 const char *target_name; 105 }; 106 107 /* pipe_version >= 0 if and only if someone has a pipe open. */ 108 static DEFINE_SPINLOCK(pipe_version_lock); 109 static struct rpc_wait_queue pipe_version_rpc_waitqueue; 110 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 111 static void gss_put_auth(struct gss_auth *gss_auth); 112 113 static void gss_free_ctx(struct gss_cl_ctx *); 114 static const struct rpc_pipe_ops gss_upcall_ops_v0; 115 static const struct rpc_pipe_ops gss_upcall_ops_v1; 116 117 static inline struct gss_cl_ctx * 118 gss_get_ctx(struct gss_cl_ctx *ctx) 119 { 120 atomic_inc(&ctx->count); 121 return ctx; 122 } 123 124 static inline void 125 gss_put_ctx(struct gss_cl_ctx *ctx) 126 { 127 if (atomic_dec_and_test(&ctx->count)) 128 gss_free_ctx(ctx); 129 } 130 131 /* gss_cred_set_ctx: 132 * called by gss_upcall_callback and gss_create_upcall in order 133 * to set the gss context. The actual exchange of an old context 134 * and a new one is protected by the pipe->lock. 135 */ 136 static void 137 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 138 { 139 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 140 141 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 142 return; 143 gss_get_ctx(ctx); 144 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 145 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 146 smp_mb__before_atomic(); 147 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 148 } 149 150 static const void * 151 simple_get_bytes(const void *p, const void *end, void *res, size_t len) 152 { 153 const void *q = (const void *)((const char *)p + len); 154 if (unlikely(q > end || q < p)) 155 return ERR_PTR(-EFAULT); 156 memcpy(res, p, len); 157 return q; 158 } 159 160 static inline const void * 161 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) 162 { 163 const void *q; 164 unsigned int len; 165 166 p = simple_get_bytes(p, end, &len, sizeof(len)); 167 if (IS_ERR(p)) 168 return p; 169 q = (const void *)((const char *)p + len); 170 if (unlikely(q > end || q < p)) 171 return ERR_PTR(-EFAULT); 172 dest->data = kmemdup(p, len, GFP_NOFS); 173 if (unlikely(dest->data == NULL)) 174 return ERR_PTR(-ENOMEM); 175 dest->len = len; 176 return q; 177 } 178 179 static struct gss_cl_ctx * 180 gss_cred_get_ctx(struct rpc_cred *cred) 181 { 182 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 183 struct gss_cl_ctx *ctx = NULL; 184 185 rcu_read_lock(); 186 ctx = rcu_dereference(gss_cred->gc_ctx); 187 if (ctx) 188 gss_get_ctx(ctx); 189 rcu_read_unlock(); 190 return ctx; 191 } 192 193 static struct gss_cl_ctx * 194 gss_alloc_context(void) 195 { 196 struct gss_cl_ctx *ctx; 197 198 ctx = kzalloc(sizeof(*ctx), GFP_NOFS); 199 if (ctx != NULL) { 200 ctx->gc_proc = RPC_GSS_PROC_DATA; 201 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 202 spin_lock_init(&ctx->gc_seq_lock); 203 atomic_set(&ctx->count,1); 204 } 205 return ctx; 206 } 207 208 #define GSSD_MIN_TIMEOUT (60 * 60) 209 static const void * 210 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm) 211 { 212 const void *q; 213 unsigned int seclen; 214 unsigned int timeout; 215 unsigned long now = jiffies; 216 u32 window_size; 217 int ret; 218 219 /* First unsigned int gives the remaining lifetime in seconds of the 220 * credential - e.g. the remaining TGT lifetime for Kerberos or 221 * the -t value passed to GSSD. 222 */ 223 p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); 224 if (IS_ERR(p)) 225 goto err; 226 if (timeout == 0) 227 timeout = GSSD_MIN_TIMEOUT; 228 ctx->gc_expiry = now + ((unsigned long)timeout * HZ); 229 /* Sequence number window. Determines the maximum number of 230 * simultaneous requests 231 */ 232 p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); 233 if (IS_ERR(p)) 234 goto err; 235 ctx->gc_win = window_size; 236 /* gssd signals an error by passing ctx->gc_win = 0: */ 237 if (ctx->gc_win == 0) { 238 /* 239 * in which case, p points to an error code. Anything other 240 * than -EKEYEXPIRED gets converted to -EACCES. 241 */ 242 p = simple_get_bytes(p, end, &ret, sizeof(ret)); 243 if (!IS_ERR(p)) 244 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : 245 ERR_PTR(-EACCES); 246 goto err; 247 } 248 /* copy the opaque wire context */ 249 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx); 250 if (IS_ERR(p)) 251 goto err; 252 /* import the opaque security context */ 253 p = simple_get_bytes(p, end, &seclen, sizeof(seclen)); 254 if (IS_ERR(p)) 255 goto err; 256 q = (const void *)((const char *)p + seclen); 257 if (unlikely(q > end || q < p)) { 258 p = ERR_PTR(-EFAULT); 259 goto err; 260 } 261 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS); 262 if (ret < 0) { 263 p = ERR_PTR(ret); 264 goto err; 265 } 266 267 /* is there any trailing data? */ 268 if (q == end) { 269 p = q; 270 goto done; 271 } 272 273 /* pull in acceptor name (if there is one) */ 274 p = simple_get_netobj(q, end, &ctx->gc_acceptor); 275 if (IS_ERR(p)) 276 goto err; 277 done: 278 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u acceptor %.*s\n", 279 __func__, ctx->gc_expiry, now, timeout, ctx->gc_acceptor.len, 280 ctx->gc_acceptor.data); 281 return p; 282 err: 283 dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p)); 284 return p; 285 } 286 287 #define UPCALL_BUF_LEN 128 288 289 struct gss_upcall_msg { 290 atomic_t count; 291 kuid_t uid; 292 struct rpc_pipe_msg msg; 293 struct list_head list; 294 struct gss_auth *auth; 295 struct rpc_pipe *pipe; 296 struct rpc_wait_queue rpc_waitqueue; 297 wait_queue_head_t waitqueue; 298 struct gss_cl_ctx *ctx; 299 char databuf[UPCALL_BUF_LEN]; 300 }; 301 302 static int get_pipe_version(struct net *net) 303 { 304 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 305 int ret; 306 307 spin_lock(&pipe_version_lock); 308 if (sn->pipe_version >= 0) { 309 atomic_inc(&sn->pipe_users); 310 ret = sn->pipe_version; 311 } else 312 ret = -EAGAIN; 313 spin_unlock(&pipe_version_lock); 314 return ret; 315 } 316 317 static void put_pipe_version(struct net *net) 318 { 319 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 320 321 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) { 322 sn->pipe_version = -1; 323 spin_unlock(&pipe_version_lock); 324 } 325 } 326 327 static void 328 gss_release_msg(struct gss_upcall_msg *gss_msg) 329 { 330 struct net *net = gss_msg->auth->net; 331 if (!atomic_dec_and_test(&gss_msg->count)) 332 return; 333 put_pipe_version(net); 334 BUG_ON(!list_empty(&gss_msg->list)); 335 if (gss_msg->ctx != NULL) 336 gss_put_ctx(gss_msg->ctx); 337 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 338 gss_put_auth(gss_msg->auth); 339 kfree(gss_msg); 340 } 341 342 static struct gss_upcall_msg * 343 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth) 344 { 345 struct gss_upcall_msg *pos; 346 list_for_each_entry(pos, &pipe->in_downcall, list) { 347 if (!uid_eq(pos->uid, uid)) 348 continue; 349 if (auth && pos->auth->service != auth->service) 350 continue; 351 atomic_inc(&pos->count); 352 dprintk("RPC: %s found msg %p\n", __func__, pos); 353 return pos; 354 } 355 dprintk("RPC: %s found nothing\n", __func__); 356 return NULL; 357 } 358 359 /* Try to add an upcall to the pipefs queue. 360 * If an upcall owned by our uid already exists, then we return a reference 361 * to that upcall instead of adding the new upcall. 362 */ 363 static inline struct gss_upcall_msg * 364 gss_add_msg(struct gss_upcall_msg *gss_msg) 365 { 366 struct rpc_pipe *pipe = gss_msg->pipe; 367 struct gss_upcall_msg *old; 368 369 spin_lock(&pipe->lock); 370 old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth); 371 if (old == NULL) { 372 atomic_inc(&gss_msg->count); 373 list_add(&gss_msg->list, &pipe->in_downcall); 374 } else 375 gss_msg = old; 376 spin_unlock(&pipe->lock); 377 return gss_msg; 378 } 379 380 static void 381 __gss_unhash_msg(struct gss_upcall_msg *gss_msg) 382 { 383 list_del_init(&gss_msg->list); 384 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 385 wake_up_all(&gss_msg->waitqueue); 386 atomic_dec(&gss_msg->count); 387 } 388 389 static void 390 gss_unhash_msg(struct gss_upcall_msg *gss_msg) 391 { 392 struct rpc_pipe *pipe = gss_msg->pipe; 393 394 if (list_empty(&gss_msg->list)) 395 return; 396 spin_lock(&pipe->lock); 397 if (!list_empty(&gss_msg->list)) 398 __gss_unhash_msg(gss_msg); 399 spin_unlock(&pipe->lock); 400 } 401 402 static void 403 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) 404 { 405 switch (gss_msg->msg.errno) { 406 case 0: 407 if (gss_msg->ctx == NULL) 408 break; 409 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 410 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); 411 break; 412 case -EKEYEXPIRED: 413 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 414 } 415 gss_cred->gc_upcall_timestamp = jiffies; 416 gss_cred->gc_upcall = NULL; 417 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 418 } 419 420 static void 421 gss_upcall_callback(struct rpc_task *task) 422 { 423 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, 424 struct gss_cred, gc_base); 425 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 426 struct rpc_pipe *pipe = gss_msg->pipe; 427 428 spin_lock(&pipe->lock); 429 gss_handle_downcall_result(gss_cred, gss_msg); 430 spin_unlock(&pipe->lock); 431 task->tk_status = gss_msg->msg.errno; 432 gss_release_msg(gss_msg); 433 } 434 435 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) 436 { 437 uid_t uid = from_kuid(&init_user_ns, gss_msg->uid); 438 memcpy(gss_msg->databuf, &uid, sizeof(uid)); 439 gss_msg->msg.data = gss_msg->databuf; 440 gss_msg->msg.len = sizeof(uid); 441 442 BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf)); 443 } 444 445 static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, 446 const char *service_name, 447 const char *target_name) 448 { 449 struct gss_api_mech *mech = gss_msg->auth->mech; 450 char *p = gss_msg->databuf; 451 size_t buflen = sizeof(gss_msg->databuf); 452 int len; 453 454 len = scnprintf(p, buflen, "mech=%s uid=%d ", mech->gm_name, 455 from_kuid(&init_user_ns, gss_msg->uid)); 456 buflen -= len; 457 p += len; 458 gss_msg->msg.len = len; 459 if (target_name) { 460 len = scnprintf(p, buflen, "target=%s ", target_name); 461 buflen -= len; 462 p += len; 463 gss_msg->msg.len += len; 464 } 465 if (service_name != NULL) { 466 len = scnprintf(p, buflen, "service=%s ", service_name); 467 buflen -= len; 468 p += len; 469 gss_msg->msg.len += len; 470 } 471 if (mech->gm_upcall_enctypes) { 472 len = scnprintf(p, buflen, "enctypes=%s ", 473 mech->gm_upcall_enctypes); 474 buflen -= len; 475 p += len; 476 gss_msg->msg.len += len; 477 } 478 len = scnprintf(p, buflen, "\n"); 479 if (len == 0) 480 goto out_overflow; 481 gss_msg->msg.len += len; 482 483 gss_msg->msg.data = gss_msg->databuf; 484 return 0; 485 out_overflow: 486 WARN_ON_ONCE(1); 487 return -ENOMEM; 488 } 489 490 static struct gss_upcall_msg * 491 gss_alloc_msg(struct gss_auth *gss_auth, 492 kuid_t uid, const char *service_name) 493 { 494 struct gss_upcall_msg *gss_msg; 495 int vers; 496 int err = -ENOMEM; 497 498 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); 499 if (gss_msg == NULL) 500 goto err; 501 vers = get_pipe_version(gss_auth->net); 502 err = vers; 503 if (err < 0) 504 goto err_free_msg; 505 gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe; 506 INIT_LIST_HEAD(&gss_msg->list); 507 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 508 init_waitqueue_head(&gss_msg->waitqueue); 509 atomic_set(&gss_msg->count, 1); 510 gss_msg->uid = uid; 511 gss_msg->auth = gss_auth; 512 switch (vers) { 513 case 0: 514 gss_encode_v0_msg(gss_msg); 515 break; 516 default: 517 err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); 518 if (err) 519 goto err_put_pipe_version; 520 }; 521 kref_get(&gss_auth->kref); 522 return gss_msg; 523 err_put_pipe_version: 524 put_pipe_version(gss_auth->net); 525 err_free_msg: 526 kfree(gss_msg); 527 err: 528 return ERR_PTR(err); 529 } 530 531 static struct gss_upcall_msg * 532 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred) 533 { 534 struct gss_cred *gss_cred = container_of(cred, 535 struct gss_cred, gc_base); 536 struct gss_upcall_msg *gss_new, *gss_msg; 537 kuid_t uid = cred->cr_uid; 538 539 gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal); 540 if (IS_ERR(gss_new)) 541 return gss_new; 542 gss_msg = gss_add_msg(gss_new); 543 if (gss_msg == gss_new) { 544 int res; 545 atomic_inc(&gss_msg->count); 546 res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); 547 if (res) { 548 gss_unhash_msg(gss_new); 549 atomic_dec(&gss_msg->count); 550 gss_release_msg(gss_new); 551 gss_msg = ERR_PTR(res); 552 } 553 } else 554 gss_release_msg(gss_new); 555 return gss_msg; 556 } 557 558 static void warn_gssd(void) 559 { 560 dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n"); 561 } 562 563 static inline int 564 gss_refresh_upcall(struct rpc_task *task) 565 { 566 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 567 struct gss_auth *gss_auth = container_of(cred->cr_auth, 568 struct gss_auth, rpc_auth); 569 struct gss_cred *gss_cred = container_of(cred, 570 struct gss_cred, gc_base); 571 struct gss_upcall_msg *gss_msg; 572 struct rpc_pipe *pipe; 573 int err = 0; 574 575 dprintk("RPC: %5u %s for uid %u\n", 576 task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid)); 577 gss_msg = gss_setup_upcall(gss_auth, cred); 578 if (PTR_ERR(gss_msg) == -EAGAIN) { 579 /* XXX: warning on the first, under the assumption we 580 * shouldn't normally hit this case on a refresh. */ 581 warn_gssd(); 582 task->tk_timeout = 15*HZ; 583 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); 584 return -EAGAIN; 585 } 586 if (IS_ERR(gss_msg)) { 587 err = PTR_ERR(gss_msg); 588 goto out; 589 } 590 pipe = gss_msg->pipe; 591 spin_lock(&pipe->lock); 592 if (gss_cred->gc_upcall != NULL) 593 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 594 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 595 task->tk_timeout = 0; 596 gss_cred->gc_upcall = gss_msg; 597 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 598 atomic_inc(&gss_msg->count); 599 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); 600 } else { 601 gss_handle_downcall_result(gss_cred, gss_msg); 602 err = gss_msg->msg.errno; 603 } 604 spin_unlock(&pipe->lock); 605 gss_release_msg(gss_msg); 606 out: 607 dprintk("RPC: %5u %s for uid %u result %d\n", 608 task->tk_pid, __func__, 609 from_kuid(&init_user_ns, cred->cr_uid), err); 610 return err; 611 } 612 613 static inline int 614 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 615 { 616 struct net *net = gss_auth->net; 617 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 618 struct rpc_pipe *pipe; 619 struct rpc_cred *cred = &gss_cred->gc_base; 620 struct gss_upcall_msg *gss_msg; 621 DEFINE_WAIT(wait); 622 int err; 623 624 dprintk("RPC: %s for uid %u\n", 625 __func__, from_kuid(&init_user_ns, cred->cr_uid)); 626 retry: 627 err = 0; 628 /* if gssd is down, just skip upcalling altogether */ 629 if (!gssd_running(net)) { 630 warn_gssd(); 631 return -EACCES; 632 } 633 gss_msg = gss_setup_upcall(gss_auth, cred); 634 if (PTR_ERR(gss_msg) == -EAGAIN) { 635 err = wait_event_interruptible_timeout(pipe_version_waitqueue, 636 sn->pipe_version >= 0, 15 * HZ); 637 if (sn->pipe_version < 0) { 638 warn_gssd(); 639 err = -EACCES; 640 } 641 if (err < 0) 642 goto out; 643 goto retry; 644 } 645 if (IS_ERR(gss_msg)) { 646 err = PTR_ERR(gss_msg); 647 goto out; 648 } 649 pipe = gss_msg->pipe; 650 for (;;) { 651 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); 652 spin_lock(&pipe->lock); 653 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 654 break; 655 } 656 spin_unlock(&pipe->lock); 657 if (fatal_signal_pending(current)) { 658 err = -ERESTARTSYS; 659 goto out_intr; 660 } 661 schedule(); 662 } 663 if (gss_msg->ctx) 664 gss_cred_set_ctx(cred, gss_msg->ctx); 665 else 666 err = gss_msg->msg.errno; 667 spin_unlock(&pipe->lock); 668 out_intr: 669 finish_wait(&gss_msg->waitqueue, &wait); 670 gss_release_msg(gss_msg); 671 out: 672 dprintk("RPC: %s for uid %u result %d\n", 673 __func__, from_kuid(&init_user_ns, cred->cr_uid), err); 674 return err; 675 } 676 677 #define MSG_BUF_MAXSIZE 1024 678 679 static ssize_t 680 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 681 { 682 const void *p, *end; 683 void *buf; 684 struct gss_upcall_msg *gss_msg; 685 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe; 686 struct gss_cl_ctx *ctx; 687 uid_t id; 688 kuid_t uid; 689 ssize_t err = -EFBIG; 690 691 if (mlen > MSG_BUF_MAXSIZE) 692 goto out; 693 err = -ENOMEM; 694 buf = kmalloc(mlen, GFP_NOFS); 695 if (!buf) 696 goto out; 697 698 err = -EFAULT; 699 if (copy_from_user(buf, src, mlen)) 700 goto err; 701 702 end = (const void *)((char *)buf + mlen); 703 p = simple_get_bytes(buf, end, &id, sizeof(id)); 704 if (IS_ERR(p)) { 705 err = PTR_ERR(p); 706 goto err; 707 } 708 709 uid = make_kuid(&init_user_ns, id); 710 if (!uid_valid(uid)) { 711 err = -EINVAL; 712 goto err; 713 } 714 715 err = -ENOMEM; 716 ctx = gss_alloc_context(); 717 if (ctx == NULL) 718 goto err; 719 720 err = -ENOENT; 721 /* Find a matching upcall */ 722 spin_lock(&pipe->lock); 723 gss_msg = __gss_find_upcall(pipe, uid, NULL); 724 if (gss_msg == NULL) { 725 spin_unlock(&pipe->lock); 726 goto err_put_ctx; 727 } 728 list_del_init(&gss_msg->list); 729 spin_unlock(&pipe->lock); 730 731 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 732 if (IS_ERR(p)) { 733 err = PTR_ERR(p); 734 switch (err) { 735 case -EACCES: 736 case -EKEYEXPIRED: 737 gss_msg->msg.errno = err; 738 err = mlen; 739 break; 740 case -EFAULT: 741 case -ENOMEM: 742 case -EINVAL: 743 case -ENOSYS: 744 gss_msg->msg.errno = -EAGAIN; 745 break; 746 default: 747 printk(KERN_CRIT "%s: bad return from " 748 "gss_fill_context: %zd\n", __func__, err); 749 gss_msg->msg.errno = -EIO; 750 } 751 goto err_release_msg; 752 } 753 gss_msg->ctx = gss_get_ctx(ctx); 754 err = mlen; 755 756 err_release_msg: 757 spin_lock(&pipe->lock); 758 __gss_unhash_msg(gss_msg); 759 spin_unlock(&pipe->lock); 760 gss_release_msg(gss_msg); 761 err_put_ctx: 762 gss_put_ctx(ctx); 763 err: 764 kfree(buf); 765 out: 766 dprintk("RPC: %s returning %zd\n", __func__, err); 767 return err; 768 } 769 770 static int gss_pipe_open(struct inode *inode, int new_version) 771 { 772 struct net *net = inode->i_sb->s_fs_info; 773 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 774 int ret = 0; 775 776 spin_lock(&pipe_version_lock); 777 if (sn->pipe_version < 0) { 778 /* First open of any gss pipe determines the version: */ 779 sn->pipe_version = new_version; 780 rpc_wake_up(&pipe_version_rpc_waitqueue); 781 wake_up(&pipe_version_waitqueue); 782 } else if (sn->pipe_version != new_version) { 783 /* Trying to open a pipe of a different version */ 784 ret = -EBUSY; 785 goto out; 786 } 787 atomic_inc(&sn->pipe_users); 788 out: 789 spin_unlock(&pipe_version_lock); 790 return ret; 791 792 } 793 794 static int gss_pipe_open_v0(struct inode *inode) 795 { 796 return gss_pipe_open(inode, 0); 797 } 798 799 static int gss_pipe_open_v1(struct inode *inode) 800 { 801 return gss_pipe_open(inode, 1); 802 } 803 804 static void 805 gss_pipe_release(struct inode *inode) 806 { 807 struct net *net = inode->i_sb->s_fs_info; 808 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 809 struct gss_upcall_msg *gss_msg; 810 811 restart: 812 spin_lock(&pipe->lock); 813 list_for_each_entry(gss_msg, &pipe->in_downcall, list) { 814 815 if (!list_empty(&gss_msg->msg.list)) 816 continue; 817 gss_msg->msg.errno = -EPIPE; 818 atomic_inc(&gss_msg->count); 819 __gss_unhash_msg(gss_msg); 820 spin_unlock(&pipe->lock); 821 gss_release_msg(gss_msg); 822 goto restart; 823 } 824 spin_unlock(&pipe->lock); 825 826 put_pipe_version(net); 827 } 828 829 static void 830 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) 831 { 832 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); 833 834 if (msg->errno < 0) { 835 dprintk("RPC: %s releasing msg %p\n", 836 __func__, gss_msg); 837 atomic_inc(&gss_msg->count); 838 gss_unhash_msg(gss_msg); 839 if (msg->errno == -ETIMEDOUT) 840 warn_gssd(); 841 gss_release_msg(gss_msg); 842 } 843 gss_release_msg(gss_msg); 844 } 845 846 static void gss_pipe_dentry_destroy(struct dentry *dir, 847 struct rpc_pipe_dir_object *pdo) 848 { 849 struct gss_pipe *gss_pipe = pdo->pdo_data; 850 struct rpc_pipe *pipe = gss_pipe->pipe; 851 852 if (pipe->dentry != NULL) { 853 rpc_unlink(pipe->dentry); 854 pipe->dentry = NULL; 855 } 856 } 857 858 static int gss_pipe_dentry_create(struct dentry *dir, 859 struct rpc_pipe_dir_object *pdo) 860 { 861 struct gss_pipe *p = pdo->pdo_data; 862 struct dentry *dentry; 863 864 dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe); 865 if (IS_ERR(dentry)) 866 return PTR_ERR(dentry); 867 p->pipe->dentry = dentry; 868 return 0; 869 } 870 871 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = { 872 .create = gss_pipe_dentry_create, 873 .destroy = gss_pipe_dentry_destroy, 874 }; 875 876 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt, 877 const char *name, 878 const struct rpc_pipe_ops *upcall_ops) 879 { 880 struct gss_pipe *p; 881 int err = -ENOMEM; 882 883 p = kmalloc(sizeof(*p), GFP_KERNEL); 884 if (p == NULL) 885 goto err; 886 p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); 887 if (IS_ERR(p->pipe)) { 888 err = PTR_ERR(p->pipe); 889 goto err_free_gss_pipe; 890 } 891 p->name = name; 892 p->clnt = clnt; 893 kref_init(&p->kref); 894 rpc_init_pipe_dir_object(&p->pdo, 895 &gss_pipe_dir_object_ops, 896 p); 897 return p; 898 err_free_gss_pipe: 899 kfree(p); 900 err: 901 return ERR_PTR(err); 902 } 903 904 struct gss_alloc_pdo { 905 struct rpc_clnt *clnt; 906 const char *name; 907 const struct rpc_pipe_ops *upcall_ops; 908 }; 909 910 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data) 911 { 912 struct gss_pipe *gss_pipe; 913 struct gss_alloc_pdo *args = data; 914 915 if (pdo->pdo_ops != &gss_pipe_dir_object_ops) 916 return 0; 917 gss_pipe = container_of(pdo, struct gss_pipe, pdo); 918 if (strcmp(gss_pipe->name, args->name) != 0) 919 return 0; 920 if (!kref_get_unless_zero(&gss_pipe->kref)) 921 return 0; 922 return 1; 923 } 924 925 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data) 926 { 927 struct gss_pipe *gss_pipe; 928 struct gss_alloc_pdo *args = data; 929 930 gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops); 931 if (!IS_ERR(gss_pipe)) 932 return &gss_pipe->pdo; 933 return NULL; 934 } 935 936 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt, 937 const char *name, 938 const struct rpc_pipe_ops *upcall_ops) 939 { 940 struct net *net = rpc_net_ns(clnt); 941 struct rpc_pipe_dir_object *pdo; 942 struct gss_alloc_pdo args = { 943 .clnt = clnt, 944 .name = name, 945 .upcall_ops = upcall_ops, 946 }; 947 948 pdo = rpc_find_or_alloc_pipe_dir_object(net, 949 &clnt->cl_pipedir_objects, 950 gss_pipe_match_pdo, 951 gss_pipe_alloc_pdo, 952 &args); 953 if (pdo != NULL) 954 return container_of(pdo, struct gss_pipe, pdo); 955 return ERR_PTR(-ENOMEM); 956 } 957 958 static void __gss_pipe_free(struct gss_pipe *p) 959 { 960 struct rpc_clnt *clnt = p->clnt; 961 struct net *net = rpc_net_ns(clnt); 962 963 rpc_remove_pipe_dir_object(net, 964 &clnt->cl_pipedir_objects, 965 &p->pdo); 966 rpc_destroy_pipe_data(p->pipe); 967 kfree(p); 968 } 969 970 static void __gss_pipe_release(struct kref *kref) 971 { 972 struct gss_pipe *p = container_of(kref, struct gss_pipe, kref); 973 974 __gss_pipe_free(p); 975 } 976 977 static void gss_pipe_free(struct gss_pipe *p) 978 { 979 if (p != NULL) 980 kref_put(&p->kref, __gss_pipe_release); 981 } 982 983 /* 984 * NOTE: we have the opportunity to use different 985 * parameters based on the input flavor (which must be a pseudoflavor) 986 */ 987 static struct gss_auth * 988 gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 989 { 990 rpc_authflavor_t flavor = args->pseudoflavor; 991 struct gss_auth *gss_auth; 992 struct gss_pipe *gss_pipe; 993 struct rpc_auth * auth; 994 int err = -ENOMEM; /* XXX? */ 995 996 dprintk("RPC: creating GSS authenticator for client %p\n", clnt); 997 998 if (!try_module_get(THIS_MODULE)) 999 return ERR_PTR(err); 1000 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) 1001 goto out_dec; 1002 INIT_HLIST_NODE(&gss_auth->hash); 1003 gss_auth->target_name = NULL; 1004 if (args->target_name) { 1005 gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL); 1006 if (gss_auth->target_name == NULL) 1007 goto err_free; 1008 } 1009 gss_auth->client = clnt; 1010 gss_auth->net = get_net(rpc_net_ns(clnt)); 1011 err = -EINVAL; 1012 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 1013 if (!gss_auth->mech) { 1014 dprintk("RPC: Pseudoflavor %d not found!\n", flavor); 1015 goto err_put_net; 1016 } 1017 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 1018 if (gss_auth->service == 0) 1019 goto err_put_mech; 1020 if (!gssd_running(gss_auth->net)) 1021 goto err_put_mech; 1022 auth = &gss_auth->rpc_auth; 1023 auth->au_cslack = GSS_CRED_SLACK >> 2; 1024 auth->au_rslack = GSS_VERF_SLACK >> 2; 1025 auth->au_flags = 0; 1026 auth->au_ops = &authgss_ops; 1027 auth->au_flavor = flavor; 1028 if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor)) 1029 auth->au_flags |= RPCAUTH_AUTH_DATATOUCH; 1030 atomic_set(&auth->au_count, 1); 1031 kref_init(&gss_auth->kref); 1032 1033 err = rpcauth_init_credcache(auth); 1034 if (err) 1035 goto err_put_mech; 1036 /* 1037 * Note: if we created the old pipe first, then someone who 1038 * examined the directory at the right moment might conclude 1039 * that we supported only the old pipe. So we instead create 1040 * the new pipe first. 1041 */ 1042 gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1); 1043 if (IS_ERR(gss_pipe)) { 1044 err = PTR_ERR(gss_pipe); 1045 goto err_destroy_credcache; 1046 } 1047 gss_auth->gss_pipe[1] = gss_pipe; 1048 1049 gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name, 1050 &gss_upcall_ops_v0); 1051 if (IS_ERR(gss_pipe)) { 1052 err = PTR_ERR(gss_pipe); 1053 goto err_destroy_pipe_1; 1054 } 1055 gss_auth->gss_pipe[0] = gss_pipe; 1056 1057 return gss_auth; 1058 err_destroy_pipe_1: 1059 gss_pipe_free(gss_auth->gss_pipe[1]); 1060 err_destroy_credcache: 1061 rpcauth_destroy_credcache(auth); 1062 err_put_mech: 1063 gss_mech_put(gss_auth->mech); 1064 err_put_net: 1065 put_net(gss_auth->net); 1066 err_free: 1067 kfree(gss_auth->target_name); 1068 kfree(gss_auth); 1069 out_dec: 1070 module_put(THIS_MODULE); 1071 return ERR_PTR(err); 1072 } 1073 1074 static void 1075 gss_free(struct gss_auth *gss_auth) 1076 { 1077 gss_pipe_free(gss_auth->gss_pipe[0]); 1078 gss_pipe_free(gss_auth->gss_pipe[1]); 1079 gss_mech_put(gss_auth->mech); 1080 put_net(gss_auth->net); 1081 kfree(gss_auth->target_name); 1082 1083 kfree(gss_auth); 1084 module_put(THIS_MODULE); 1085 } 1086 1087 static void 1088 gss_free_callback(struct kref *kref) 1089 { 1090 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); 1091 1092 gss_free(gss_auth); 1093 } 1094 1095 static void 1096 gss_put_auth(struct gss_auth *gss_auth) 1097 { 1098 kref_put(&gss_auth->kref, gss_free_callback); 1099 } 1100 1101 static void 1102 gss_destroy(struct rpc_auth *auth) 1103 { 1104 struct gss_auth *gss_auth = container_of(auth, 1105 struct gss_auth, rpc_auth); 1106 1107 dprintk("RPC: destroying GSS authenticator %p flavor %d\n", 1108 auth, auth->au_flavor); 1109 1110 if (hash_hashed(&gss_auth->hash)) { 1111 spin_lock(&gss_auth_hash_lock); 1112 hash_del(&gss_auth->hash); 1113 spin_unlock(&gss_auth_hash_lock); 1114 } 1115 1116 gss_pipe_free(gss_auth->gss_pipe[0]); 1117 gss_auth->gss_pipe[0] = NULL; 1118 gss_pipe_free(gss_auth->gss_pipe[1]); 1119 gss_auth->gss_pipe[1] = NULL; 1120 rpcauth_destroy_credcache(auth); 1121 1122 gss_put_auth(gss_auth); 1123 } 1124 1125 /* 1126 * Auths may be shared between rpc clients that were cloned from a 1127 * common client with the same xprt, if they also share the flavor and 1128 * target_name. 1129 * 1130 * The auth is looked up from the oldest parent sharing the same 1131 * cl_xprt, and the auth itself references only that common parent 1132 * (which is guaranteed to last as long as any of its descendants). 1133 */ 1134 static struct gss_auth * 1135 gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args, 1136 struct rpc_clnt *clnt, 1137 struct gss_auth *new) 1138 { 1139 struct gss_auth *gss_auth; 1140 unsigned long hashval = (unsigned long)clnt; 1141 1142 spin_lock(&gss_auth_hash_lock); 1143 hash_for_each_possible(gss_auth_hash_table, 1144 gss_auth, 1145 hash, 1146 hashval) { 1147 if (gss_auth->client != clnt) 1148 continue; 1149 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor) 1150 continue; 1151 if (gss_auth->target_name != args->target_name) { 1152 if (gss_auth->target_name == NULL) 1153 continue; 1154 if (args->target_name == NULL) 1155 continue; 1156 if (strcmp(gss_auth->target_name, args->target_name)) 1157 continue; 1158 } 1159 if (!atomic_inc_not_zero(&gss_auth->rpc_auth.au_count)) 1160 continue; 1161 goto out; 1162 } 1163 if (new) 1164 hash_add(gss_auth_hash_table, &new->hash, hashval); 1165 gss_auth = new; 1166 out: 1167 spin_unlock(&gss_auth_hash_lock); 1168 return gss_auth; 1169 } 1170 1171 static struct gss_auth * 1172 gss_create_hashed(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1173 { 1174 struct gss_auth *gss_auth; 1175 struct gss_auth *new; 1176 1177 gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL); 1178 if (gss_auth != NULL) 1179 goto out; 1180 new = gss_create_new(args, clnt); 1181 if (IS_ERR(new)) 1182 return new; 1183 gss_auth = gss_auth_find_or_add_hashed(args, clnt, new); 1184 if (gss_auth != new) 1185 gss_destroy(&new->rpc_auth); 1186 out: 1187 return gss_auth; 1188 } 1189 1190 static struct rpc_auth * 1191 gss_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1192 { 1193 struct gss_auth *gss_auth; 1194 struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch); 1195 1196 while (clnt != clnt->cl_parent) { 1197 struct rpc_clnt *parent = clnt->cl_parent; 1198 /* Find the original parent for this transport */ 1199 if (rcu_access_pointer(parent->cl_xpi.xpi_xpswitch) != xps) 1200 break; 1201 clnt = parent; 1202 } 1203 1204 gss_auth = gss_create_hashed(args, clnt); 1205 if (IS_ERR(gss_auth)) 1206 return ERR_CAST(gss_auth); 1207 return &gss_auth->rpc_auth; 1208 } 1209 1210 /* 1211 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call 1212 * to the server with the GSS control procedure field set to 1213 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release 1214 * all RPCSEC_GSS state associated with that context. 1215 */ 1216 static int 1217 gss_destroying_context(struct rpc_cred *cred) 1218 { 1219 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1220 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1221 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1); 1222 struct rpc_task *task; 1223 1224 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 1225 return 0; 1226 1227 ctx->gc_proc = RPC_GSS_PROC_DESTROY; 1228 cred->cr_ops = &gss_nullops; 1229 1230 /* Take a reference to ensure the cred will be destroyed either 1231 * by the RPC call or by the put_rpccred() below */ 1232 get_rpccred(cred); 1233 1234 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT); 1235 if (!IS_ERR(task)) 1236 rpc_put_task(task); 1237 1238 put_rpccred(cred); 1239 return 1; 1240 } 1241 1242 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure 1243 * to create a new cred or context, so they check that things have been 1244 * allocated before freeing them. */ 1245 static void 1246 gss_do_free_ctx(struct gss_cl_ctx *ctx) 1247 { 1248 dprintk("RPC: %s\n", __func__); 1249 1250 gss_delete_sec_context(&ctx->gc_gss_ctx); 1251 kfree(ctx->gc_wire_ctx.data); 1252 kfree(ctx->gc_acceptor.data); 1253 kfree(ctx); 1254 } 1255 1256 static void 1257 gss_free_ctx_callback(struct rcu_head *head) 1258 { 1259 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); 1260 gss_do_free_ctx(ctx); 1261 } 1262 1263 static void 1264 gss_free_ctx(struct gss_cl_ctx *ctx) 1265 { 1266 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); 1267 } 1268 1269 static void 1270 gss_free_cred(struct gss_cred *gss_cred) 1271 { 1272 dprintk("RPC: %s cred=%p\n", __func__, gss_cred); 1273 kfree(gss_cred); 1274 } 1275 1276 static void 1277 gss_free_cred_callback(struct rcu_head *head) 1278 { 1279 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); 1280 gss_free_cred(gss_cred); 1281 } 1282 1283 static void 1284 gss_destroy_nullcred(struct rpc_cred *cred) 1285 { 1286 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1287 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1288 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1); 1289 1290 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); 1291 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1292 if (ctx) 1293 gss_put_ctx(ctx); 1294 gss_put_auth(gss_auth); 1295 } 1296 1297 static void 1298 gss_destroy_cred(struct rpc_cred *cred) 1299 { 1300 1301 if (gss_destroying_context(cred)) 1302 return; 1303 gss_destroy_nullcred(cred); 1304 } 1305 1306 static int 1307 gss_hash_cred(struct auth_cred *acred, unsigned int hashbits) 1308 { 1309 return hash_64(from_kuid(&init_user_ns, acred->uid), hashbits); 1310 } 1311 1312 /* 1313 * Lookup RPCSEC_GSS cred for the current process 1314 */ 1315 static struct rpc_cred * 1316 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1317 { 1318 return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS); 1319 } 1320 1321 static struct rpc_cred * 1322 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp) 1323 { 1324 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1325 struct gss_cred *cred = NULL; 1326 int err = -ENOMEM; 1327 1328 dprintk("RPC: %s for uid %d, flavor %d\n", 1329 __func__, from_kuid(&init_user_ns, acred->uid), 1330 auth->au_flavor); 1331 1332 if (!(cred = kzalloc(sizeof(*cred), gfp))) 1333 goto out_err; 1334 1335 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); 1336 /* 1337 * Note: in order to force a call to call_refresh(), we deliberately 1338 * fail to flag the credential as RPCAUTH_CRED_UPTODATE. 1339 */ 1340 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; 1341 cred->gc_service = gss_auth->service; 1342 cred->gc_principal = NULL; 1343 if (acred->machine_cred) 1344 cred->gc_principal = acred->principal; 1345 kref_get(&gss_auth->kref); 1346 return &cred->gc_base; 1347 1348 out_err: 1349 dprintk("RPC: %s failed with error %d\n", __func__, err); 1350 return ERR_PTR(err); 1351 } 1352 1353 static int 1354 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) 1355 { 1356 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1357 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); 1358 int err; 1359 1360 do { 1361 err = gss_create_upcall(gss_auth, gss_cred); 1362 } while (err == -EAGAIN); 1363 return err; 1364 } 1365 1366 static char * 1367 gss_stringify_acceptor(struct rpc_cred *cred) 1368 { 1369 char *string = NULL; 1370 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1371 struct gss_cl_ctx *ctx; 1372 unsigned int len; 1373 struct xdr_netobj *acceptor; 1374 1375 rcu_read_lock(); 1376 ctx = rcu_dereference(gss_cred->gc_ctx); 1377 if (!ctx) 1378 goto out; 1379 1380 len = ctx->gc_acceptor.len; 1381 rcu_read_unlock(); 1382 1383 /* no point if there's no string */ 1384 if (!len) 1385 return NULL; 1386 realloc: 1387 string = kmalloc(len + 1, GFP_KERNEL); 1388 if (!string) 1389 return NULL; 1390 1391 rcu_read_lock(); 1392 ctx = rcu_dereference(gss_cred->gc_ctx); 1393 1394 /* did the ctx disappear or was it replaced by one with no acceptor? */ 1395 if (!ctx || !ctx->gc_acceptor.len) { 1396 kfree(string); 1397 string = NULL; 1398 goto out; 1399 } 1400 1401 acceptor = &ctx->gc_acceptor; 1402 1403 /* 1404 * Did we find a new acceptor that's longer than the original? Allocate 1405 * a longer buffer and try again. 1406 */ 1407 if (len < acceptor->len) { 1408 len = acceptor->len; 1409 rcu_read_unlock(); 1410 kfree(string); 1411 goto realloc; 1412 } 1413 1414 memcpy(string, acceptor->data, acceptor->len); 1415 string[acceptor->len] = '\0'; 1416 out: 1417 rcu_read_unlock(); 1418 return string; 1419 } 1420 1421 /* 1422 * Returns -EACCES if GSS context is NULL or will expire within the 1423 * timeout (miliseconds) 1424 */ 1425 static int 1426 gss_key_timeout(struct rpc_cred *rc) 1427 { 1428 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1429 struct gss_cl_ctx *ctx; 1430 unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ); 1431 int ret = 0; 1432 1433 rcu_read_lock(); 1434 ctx = rcu_dereference(gss_cred->gc_ctx); 1435 if (!ctx || time_after(timeout, ctx->gc_expiry)) 1436 ret = -EACCES; 1437 rcu_read_unlock(); 1438 1439 return ret; 1440 } 1441 1442 static int 1443 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) 1444 { 1445 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1446 struct gss_cl_ctx *ctx; 1447 int ret; 1448 1449 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) 1450 goto out; 1451 /* Don't match with creds that have expired. */ 1452 rcu_read_lock(); 1453 ctx = rcu_dereference(gss_cred->gc_ctx); 1454 if (!ctx || time_after(jiffies, ctx->gc_expiry)) { 1455 rcu_read_unlock(); 1456 return 0; 1457 } 1458 rcu_read_unlock(); 1459 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags)) 1460 return 0; 1461 out: 1462 if (acred->principal != NULL) { 1463 if (gss_cred->gc_principal == NULL) 1464 return 0; 1465 ret = strcmp(acred->principal, gss_cred->gc_principal) == 0; 1466 goto check_expire; 1467 } 1468 if (gss_cred->gc_principal != NULL) 1469 return 0; 1470 ret = uid_eq(rc->cr_uid, acred->uid); 1471 1472 check_expire: 1473 if (ret == 0) 1474 return ret; 1475 1476 /* Notify acred users of GSS context expiration timeout */ 1477 if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags) && 1478 (gss_key_timeout(rc) != 0)) { 1479 /* test will now be done from generic cred */ 1480 test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags); 1481 /* tell NFS layer that key will expire soon */ 1482 set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); 1483 } 1484 return ret; 1485 } 1486 1487 /* 1488 * Marshal credentials. 1489 * Maybe we should keep a cached credential for performance reasons. 1490 */ 1491 static __be32 * 1492 gss_marshal(struct rpc_task *task, __be32 *p) 1493 { 1494 struct rpc_rqst *req = task->tk_rqstp; 1495 struct rpc_cred *cred = req->rq_cred; 1496 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1497 gc_base); 1498 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1499 __be32 *cred_len; 1500 u32 maj_stat = 0; 1501 struct xdr_netobj mic; 1502 struct kvec iov; 1503 struct xdr_buf verf_buf; 1504 1505 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1506 1507 *p++ = htonl(RPC_AUTH_GSS); 1508 cred_len = p++; 1509 1510 spin_lock(&ctx->gc_seq_lock); 1511 req->rq_seqno = ctx->gc_seq++; 1512 spin_unlock(&ctx->gc_seq_lock); 1513 1514 *p++ = htonl((u32) RPC_GSS_VERSION); 1515 *p++ = htonl((u32) ctx->gc_proc); 1516 *p++ = htonl((u32) req->rq_seqno); 1517 *p++ = htonl((u32) gss_cred->gc_service); 1518 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); 1519 *cred_len = htonl((p - (cred_len + 1)) << 2); 1520 1521 /* We compute the checksum for the verifier over the xdr-encoded bytes 1522 * starting with the xid and ending at the end of the credential: */ 1523 iov.iov_base = xprt_skip_transport_header(req->rq_xprt, 1524 req->rq_snd_buf.head[0].iov_base); 1525 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; 1526 xdr_buf_from_iov(&iov, &verf_buf); 1527 1528 /* set verifier flavor*/ 1529 *p++ = htonl(RPC_AUTH_GSS); 1530 1531 mic.data = (u8 *)(p + 1); 1532 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1533 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1534 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1535 } else if (maj_stat != 0) { 1536 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1537 goto out_put_ctx; 1538 } 1539 p = xdr_encode_opaque(p, NULL, mic.len); 1540 gss_put_ctx(ctx); 1541 return p; 1542 out_put_ctx: 1543 gss_put_ctx(ctx); 1544 return NULL; 1545 } 1546 1547 static int gss_renew_cred(struct rpc_task *task) 1548 { 1549 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; 1550 struct gss_cred *gss_cred = container_of(oldcred, 1551 struct gss_cred, 1552 gc_base); 1553 struct rpc_auth *auth = oldcred->cr_auth; 1554 struct auth_cred acred = { 1555 .uid = oldcred->cr_uid, 1556 .principal = gss_cred->gc_principal, 1557 .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0), 1558 }; 1559 struct rpc_cred *new; 1560 1561 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); 1562 if (IS_ERR(new)) 1563 return PTR_ERR(new); 1564 task->tk_rqstp->rq_cred = new; 1565 put_rpccred(oldcred); 1566 return 0; 1567 } 1568 1569 static int gss_cred_is_negative_entry(struct rpc_cred *cred) 1570 { 1571 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { 1572 unsigned long now = jiffies; 1573 unsigned long begin, expire; 1574 struct gss_cred *gss_cred; 1575 1576 gss_cred = container_of(cred, struct gss_cred, gc_base); 1577 begin = gss_cred->gc_upcall_timestamp; 1578 expire = begin + gss_expired_cred_retry_delay * HZ; 1579 1580 if (time_in_range_open(now, begin, expire)) 1581 return 1; 1582 } 1583 return 0; 1584 } 1585 1586 /* 1587 * Refresh credentials. XXX - finish 1588 */ 1589 static int 1590 gss_refresh(struct rpc_task *task) 1591 { 1592 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1593 int ret = 0; 1594 1595 if (gss_cred_is_negative_entry(cred)) 1596 return -EKEYEXPIRED; 1597 1598 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && 1599 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 1600 ret = gss_renew_cred(task); 1601 if (ret < 0) 1602 goto out; 1603 cred = task->tk_rqstp->rq_cred; 1604 } 1605 1606 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 1607 ret = gss_refresh_upcall(task); 1608 out: 1609 return ret; 1610 } 1611 1612 /* Dummy refresh routine: used only when destroying the context */ 1613 static int 1614 gss_refresh_null(struct rpc_task *task) 1615 { 1616 return 0; 1617 } 1618 1619 static __be32 * 1620 gss_validate(struct rpc_task *task, __be32 *p) 1621 { 1622 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1623 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1624 __be32 *seq = NULL; 1625 struct kvec iov; 1626 struct xdr_buf verf_buf; 1627 struct xdr_netobj mic; 1628 u32 flav,len; 1629 u32 maj_stat; 1630 __be32 *ret = ERR_PTR(-EIO); 1631 1632 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1633 1634 flav = ntohl(*p++); 1635 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 1636 goto out_bad; 1637 if (flav != RPC_AUTH_GSS) 1638 goto out_bad; 1639 seq = kmalloc(4, GFP_NOFS); 1640 if (!seq) 1641 goto out_bad; 1642 *seq = htonl(task->tk_rqstp->rq_seqno); 1643 iov.iov_base = seq; 1644 iov.iov_len = 4; 1645 xdr_buf_from_iov(&iov, &verf_buf); 1646 mic.data = (u8 *)p; 1647 mic.len = len; 1648 1649 ret = ERR_PTR(-EACCES); 1650 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1651 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1652 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1653 if (maj_stat) { 1654 dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n", 1655 task->tk_pid, __func__, maj_stat); 1656 goto out_bad; 1657 } 1658 /* We leave it to unwrap to calculate au_rslack. For now we just 1659 * calculate the length of the verifier: */ 1660 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; 1661 gss_put_ctx(ctx); 1662 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", 1663 task->tk_pid, __func__); 1664 kfree(seq); 1665 return p + XDR_QUADLEN(len); 1666 out_bad: 1667 gss_put_ctx(ctx); 1668 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, 1669 PTR_ERR(ret)); 1670 kfree(seq); 1671 return ret; 1672 } 1673 1674 static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, 1675 __be32 *p, void *obj) 1676 { 1677 struct xdr_stream xdr; 1678 1679 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); 1680 encode(rqstp, &xdr, obj); 1681 } 1682 1683 static inline int 1684 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1685 kxdreproc_t encode, struct rpc_rqst *rqstp, 1686 __be32 *p, void *obj) 1687 { 1688 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1689 struct xdr_buf integ_buf; 1690 __be32 *integ_len = NULL; 1691 struct xdr_netobj mic; 1692 u32 offset; 1693 __be32 *q; 1694 struct kvec *iov; 1695 u32 maj_stat = 0; 1696 int status = -EIO; 1697 1698 integ_len = p++; 1699 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1700 *p++ = htonl(rqstp->rq_seqno); 1701 1702 gss_wrap_req_encode(encode, rqstp, p, obj); 1703 1704 if (xdr_buf_subsegment(snd_buf, &integ_buf, 1705 offset, snd_buf->len - offset)) 1706 return status; 1707 *integ_len = htonl(integ_buf.len); 1708 1709 /* guess whether we're in the head or the tail: */ 1710 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1711 iov = snd_buf->tail; 1712 else 1713 iov = snd_buf->head; 1714 p = iov->iov_base + iov->iov_len; 1715 mic.data = (u8 *)(p + 1); 1716 1717 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1718 status = -EIO; /* XXX? */ 1719 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1720 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1721 else if (maj_stat) 1722 return status; 1723 q = xdr_encode_opaque(p, NULL, mic.len); 1724 1725 offset = (u8 *)q - (u8 *)p; 1726 iov->iov_len += offset; 1727 snd_buf->len += offset; 1728 return 0; 1729 } 1730 1731 static void 1732 priv_release_snd_buf(struct rpc_rqst *rqstp) 1733 { 1734 int i; 1735 1736 for (i=0; i < rqstp->rq_enc_pages_num; i++) 1737 __free_page(rqstp->rq_enc_pages[i]); 1738 kfree(rqstp->rq_enc_pages); 1739 } 1740 1741 static int 1742 alloc_enc_pages(struct rpc_rqst *rqstp) 1743 { 1744 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1745 int first, last, i; 1746 1747 if (snd_buf->page_len == 0) { 1748 rqstp->rq_enc_pages_num = 0; 1749 return 0; 1750 } 1751 1752 first = snd_buf->page_base >> PAGE_SHIFT; 1753 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT; 1754 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1755 rqstp->rq_enc_pages 1756 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1757 GFP_NOFS); 1758 if (!rqstp->rq_enc_pages) 1759 goto out; 1760 for (i=0; i < rqstp->rq_enc_pages_num; i++) { 1761 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); 1762 if (rqstp->rq_enc_pages[i] == NULL) 1763 goto out_free; 1764 } 1765 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1766 return 0; 1767 out_free: 1768 rqstp->rq_enc_pages_num = i; 1769 priv_release_snd_buf(rqstp); 1770 out: 1771 return -EAGAIN; 1772 } 1773 1774 static inline int 1775 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1776 kxdreproc_t encode, struct rpc_rqst *rqstp, 1777 __be32 *p, void *obj) 1778 { 1779 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1780 u32 offset; 1781 u32 maj_stat; 1782 int status; 1783 __be32 *opaque_len; 1784 struct page **inpages; 1785 int first; 1786 int pad; 1787 struct kvec *iov; 1788 char *tmp; 1789 1790 opaque_len = p++; 1791 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1792 *p++ = htonl(rqstp->rq_seqno); 1793 1794 gss_wrap_req_encode(encode, rqstp, p, obj); 1795 1796 status = alloc_enc_pages(rqstp); 1797 if (status) 1798 return status; 1799 first = snd_buf->page_base >> PAGE_SHIFT; 1800 inpages = snd_buf->pages + first; 1801 snd_buf->pages = rqstp->rq_enc_pages; 1802 snd_buf->page_base -= first << PAGE_SHIFT; 1803 /* 1804 * Give the tail its own page, in case we need extra space in the 1805 * head when wrapping: 1806 * 1807 * call_allocate() allocates twice the slack space required 1808 * by the authentication flavor to rq_callsize. 1809 * For GSS, slack is GSS_CRED_SLACK. 1810 */ 1811 if (snd_buf->page_len || snd_buf->tail[0].iov_len) { 1812 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); 1813 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); 1814 snd_buf->tail[0].iov_base = tmp; 1815 } 1816 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1817 /* slack space should prevent this ever happening: */ 1818 BUG_ON(snd_buf->len > snd_buf->buflen); 1819 status = -EIO; 1820 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1821 * done anyway, so it's safe to put the request on the wire: */ 1822 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1823 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1824 else if (maj_stat) 1825 return status; 1826 1827 *opaque_len = htonl(snd_buf->len - offset); 1828 /* guess whether we're in the head or the tail: */ 1829 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1830 iov = snd_buf->tail; 1831 else 1832 iov = snd_buf->head; 1833 p = iov->iov_base + iov->iov_len; 1834 pad = 3 - ((snd_buf->len - offset - 1) & 3); 1835 memset(p, 0, pad); 1836 iov->iov_len += pad; 1837 snd_buf->len += pad; 1838 1839 return 0; 1840 } 1841 1842 static int 1843 gss_wrap_req(struct rpc_task *task, 1844 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) 1845 { 1846 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1847 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1848 gc_base); 1849 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1850 int status = -EIO; 1851 1852 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1853 if (ctx->gc_proc != RPC_GSS_PROC_DATA) { 1854 /* The spec seems a little ambiguous here, but I think that not 1855 * wrapping context destruction requests makes the most sense. 1856 */ 1857 gss_wrap_req_encode(encode, rqstp, p, obj); 1858 status = 0; 1859 goto out; 1860 } 1861 switch (gss_cred->gc_service) { 1862 case RPC_GSS_SVC_NONE: 1863 gss_wrap_req_encode(encode, rqstp, p, obj); 1864 status = 0; 1865 break; 1866 case RPC_GSS_SVC_INTEGRITY: 1867 status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj); 1868 break; 1869 case RPC_GSS_SVC_PRIVACY: 1870 status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj); 1871 break; 1872 } 1873 out: 1874 gss_put_ctx(ctx); 1875 dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status); 1876 return status; 1877 } 1878 1879 static inline int 1880 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1881 struct rpc_rqst *rqstp, __be32 **p) 1882 { 1883 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1884 struct xdr_buf integ_buf; 1885 struct xdr_netobj mic; 1886 u32 data_offset, mic_offset; 1887 u32 integ_len; 1888 u32 maj_stat; 1889 int status = -EIO; 1890 1891 integ_len = ntohl(*(*p)++); 1892 if (integ_len & 3) 1893 return status; 1894 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1895 mic_offset = integ_len + data_offset; 1896 if (mic_offset > rcv_buf->len) 1897 return status; 1898 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1899 return status; 1900 1901 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, 1902 mic_offset - data_offset)) 1903 return status; 1904 1905 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) 1906 return status; 1907 1908 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1909 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1910 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1911 if (maj_stat != GSS_S_COMPLETE) 1912 return status; 1913 return 0; 1914 } 1915 1916 static inline int 1917 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1918 struct rpc_rqst *rqstp, __be32 **p) 1919 { 1920 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1921 u32 offset; 1922 u32 opaque_len; 1923 u32 maj_stat; 1924 int status = -EIO; 1925 1926 opaque_len = ntohl(*(*p)++); 1927 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1928 if (offset + opaque_len > rcv_buf->len) 1929 return status; 1930 /* remove padding: */ 1931 rcv_buf->len = offset + opaque_len; 1932 1933 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); 1934 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1935 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1936 if (maj_stat != GSS_S_COMPLETE) 1937 return status; 1938 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1939 return status; 1940 1941 return 0; 1942 } 1943 1944 static int 1945 gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, 1946 __be32 *p, void *obj) 1947 { 1948 struct xdr_stream xdr; 1949 1950 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 1951 return decode(rqstp, &xdr, obj); 1952 } 1953 1954 static int 1955 gss_unwrap_resp(struct rpc_task *task, 1956 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) 1957 { 1958 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1959 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1960 gc_base); 1961 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1962 __be32 *savedp = p; 1963 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; 1964 int savedlen = head->iov_len; 1965 int status = -EIO; 1966 1967 if (ctx->gc_proc != RPC_GSS_PROC_DATA) 1968 goto out_decode; 1969 switch (gss_cred->gc_service) { 1970 case RPC_GSS_SVC_NONE: 1971 break; 1972 case RPC_GSS_SVC_INTEGRITY: 1973 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); 1974 if (status) 1975 goto out; 1976 break; 1977 case RPC_GSS_SVC_PRIVACY: 1978 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1979 if (status) 1980 goto out; 1981 break; 1982 } 1983 /* take into account extra slack for integrity and privacy cases: */ 1984 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1985 + (savedlen - head->iov_len); 1986 out_decode: 1987 status = gss_unwrap_req_decode(decode, rqstp, p, obj); 1988 out: 1989 gss_put_ctx(ctx); 1990 dprintk("RPC: %5u %s returning %d\n", 1991 task->tk_pid, __func__, status); 1992 return status; 1993 } 1994 1995 static const struct rpc_authops authgss_ops = { 1996 .owner = THIS_MODULE, 1997 .au_flavor = RPC_AUTH_GSS, 1998 .au_name = "RPCSEC_GSS", 1999 .create = gss_create, 2000 .destroy = gss_destroy, 2001 .hash_cred = gss_hash_cred, 2002 .lookup_cred = gss_lookup_cred, 2003 .crcreate = gss_create_cred, 2004 .list_pseudoflavors = gss_mech_list_pseudoflavors, 2005 .info2flavor = gss_mech_info2flavor, 2006 .flavor2info = gss_mech_flavor2info, 2007 }; 2008 2009 static const struct rpc_credops gss_credops = { 2010 .cr_name = "AUTH_GSS", 2011 .crdestroy = gss_destroy_cred, 2012 .cr_init = gss_cred_init, 2013 .crbind = rpcauth_generic_bind_cred, 2014 .crmatch = gss_match, 2015 .crmarshal = gss_marshal, 2016 .crrefresh = gss_refresh, 2017 .crvalidate = gss_validate, 2018 .crwrap_req = gss_wrap_req, 2019 .crunwrap_resp = gss_unwrap_resp, 2020 .crkey_timeout = gss_key_timeout, 2021 .crstringify_acceptor = gss_stringify_acceptor, 2022 }; 2023 2024 static const struct rpc_credops gss_nullops = { 2025 .cr_name = "AUTH_GSS", 2026 .crdestroy = gss_destroy_nullcred, 2027 .crbind = rpcauth_generic_bind_cred, 2028 .crmatch = gss_match, 2029 .crmarshal = gss_marshal, 2030 .crrefresh = gss_refresh_null, 2031 .crvalidate = gss_validate, 2032 .crwrap_req = gss_wrap_req, 2033 .crunwrap_resp = gss_unwrap_resp, 2034 .crstringify_acceptor = gss_stringify_acceptor, 2035 }; 2036 2037 static const struct rpc_pipe_ops gss_upcall_ops_v0 = { 2038 .upcall = rpc_pipe_generic_upcall, 2039 .downcall = gss_pipe_downcall, 2040 .destroy_msg = gss_pipe_destroy_msg, 2041 .open_pipe = gss_pipe_open_v0, 2042 .release_pipe = gss_pipe_release, 2043 }; 2044 2045 static const struct rpc_pipe_ops gss_upcall_ops_v1 = { 2046 .upcall = rpc_pipe_generic_upcall, 2047 .downcall = gss_pipe_downcall, 2048 .destroy_msg = gss_pipe_destroy_msg, 2049 .open_pipe = gss_pipe_open_v1, 2050 .release_pipe = gss_pipe_release, 2051 }; 2052 2053 static __net_init int rpcsec_gss_init_net(struct net *net) 2054 { 2055 return gss_svc_init_net(net); 2056 } 2057 2058 static __net_exit void rpcsec_gss_exit_net(struct net *net) 2059 { 2060 gss_svc_shutdown_net(net); 2061 } 2062 2063 static struct pernet_operations rpcsec_gss_net_ops = { 2064 .init = rpcsec_gss_init_net, 2065 .exit = rpcsec_gss_exit_net, 2066 }; 2067 2068 /* 2069 * Initialize RPCSEC_GSS module 2070 */ 2071 static int __init init_rpcsec_gss(void) 2072 { 2073 int err = 0; 2074 2075 err = rpcauth_register(&authgss_ops); 2076 if (err) 2077 goto out; 2078 err = gss_svc_init(); 2079 if (err) 2080 goto out_unregister; 2081 err = register_pernet_subsys(&rpcsec_gss_net_ops); 2082 if (err) 2083 goto out_svc_exit; 2084 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); 2085 return 0; 2086 out_svc_exit: 2087 gss_svc_shutdown(); 2088 out_unregister: 2089 rpcauth_unregister(&authgss_ops); 2090 out: 2091 return err; 2092 } 2093 2094 static void __exit exit_rpcsec_gss(void) 2095 { 2096 unregister_pernet_subsys(&rpcsec_gss_net_ops); 2097 gss_svc_shutdown(); 2098 rpcauth_unregister(&authgss_ops); 2099 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 2100 } 2101 2102 MODULE_ALIAS("rpc-auth-6"); 2103 MODULE_LICENSE("GPL"); 2104 module_param_named(expired_cred_retry_delay, 2105 gss_expired_cred_retry_delay, 2106 uint, 0644); 2107 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " 2108 "the RPC engine retries an expired credential"); 2109 2110 module_param_named(key_expire_timeo, 2111 gss_key_expire_timeo, 2112 uint, 0644); 2113 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a " 2114 "credential keys lifetime where the NFS layer cleans up " 2115 "prior to key expiration"); 2116 2117 module_init(init_rpcsec_gss) 2118 module_exit(exit_rpcsec_gss) 2119