1 /* 2 * linux/net/sunrpc/auth_gss/auth_gss.c 3 * 4 * RPCSEC_GSS client authentication. 5 * 6 * Copyright (c) 2000 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Dug Song <dugsong@monkey.org> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <linux/module.h> 40 #include <linux/init.h> 41 #include <linux/types.h> 42 #include <linux/slab.h> 43 #include <linux/sched.h> 44 #include <linux/pagemap.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/sunrpc/auth.h> 47 #include <linux/sunrpc/auth_gss.h> 48 #include <linux/sunrpc/svcauth_gss.h> 49 #include <linux/sunrpc/gss_err.h> 50 #include <linux/workqueue.h> 51 #include <linux/sunrpc/rpc_pipe_fs.h> 52 #include <linux/sunrpc/gss_api.h> 53 #include <asm/uaccess.h> 54 55 #include "../netns.h" 56 57 static const struct rpc_authops authgss_ops; 58 59 static const struct rpc_credops gss_credops; 60 static const struct rpc_credops gss_nullops; 61 62 #define GSS_RETRY_EXPIRED 5 63 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; 64 65 #ifdef RPC_DEBUG 66 # define RPCDBG_FACILITY RPCDBG_AUTH 67 #endif 68 69 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) 70 /* length of a krb5 verifier (48), plus data added before arguments when 71 * using integrity (two 4-byte integers): */ 72 #define GSS_VERF_SLACK 100 73 74 struct gss_auth { 75 struct kref kref; 76 struct rpc_auth rpc_auth; 77 struct gss_api_mech *mech; 78 enum rpc_gss_svc service; 79 struct rpc_clnt *client; 80 /* 81 * There are two upcall pipes; dentry[1], named "gssd", is used 82 * for the new text-based upcall; dentry[0] is named after the 83 * mechanism (for example, "krb5") and exists for 84 * backwards-compatibility with older gssd's. 85 */ 86 struct rpc_pipe *pipe[2]; 87 }; 88 89 /* pipe_version >= 0 if and only if someone has a pipe open. */ 90 static DEFINE_SPINLOCK(pipe_version_lock); 91 static struct rpc_wait_queue pipe_version_rpc_waitqueue; 92 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 93 94 static void gss_free_ctx(struct gss_cl_ctx *); 95 static const struct rpc_pipe_ops gss_upcall_ops_v0; 96 static const struct rpc_pipe_ops gss_upcall_ops_v1; 97 98 static inline struct gss_cl_ctx * 99 gss_get_ctx(struct gss_cl_ctx *ctx) 100 { 101 atomic_inc(&ctx->count); 102 return ctx; 103 } 104 105 static inline void 106 gss_put_ctx(struct gss_cl_ctx *ctx) 107 { 108 if (atomic_dec_and_test(&ctx->count)) 109 gss_free_ctx(ctx); 110 } 111 112 /* gss_cred_set_ctx: 113 * called by gss_upcall_callback and gss_create_upcall in order 114 * to set the gss context. The actual exchange of an old context 115 * and a new one is protected by the pipe->lock. 116 */ 117 static void 118 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 119 { 120 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 121 122 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 123 return; 124 gss_get_ctx(ctx); 125 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 126 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 127 smp_mb__before_clear_bit(); 128 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 129 } 130 131 static const void * 132 simple_get_bytes(const void *p, const void *end, void *res, size_t len) 133 { 134 const void *q = (const void *)((const char *)p + len); 135 if (unlikely(q > end || q < p)) 136 return ERR_PTR(-EFAULT); 137 memcpy(res, p, len); 138 return q; 139 } 140 141 static inline const void * 142 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) 143 { 144 const void *q; 145 unsigned int len; 146 147 p = simple_get_bytes(p, end, &len, sizeof(len)); 148 if (IS_ERR(p)) 149 return p; 150 q = (const void *)((const char *)p + len); 151 if (unlikely(q > end || q < p)) 152 return ERR_PTR(-EFAULT); 153 dest->data = kmemdup(p, len, GFP_NOFS); 154 if (unlikely(dest->data == NULL)) 155 return ERR_PTR(-ENOMEM); 156 dest->len = len; 157 return q; 158 } 159 160 static struct gss_cl_ctx * 161 gss_cred_get_ctx(struct rpc_cred *cred) 162 { 163 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 164 struct gss_cl_ctx *ctx = NULL; 165 166 rcu_read_lock(); 167 if (gss_cred->gc_ctx) 168 ctx = gss_get_ctx(gss_cred->gc_ctx); 169 rcu_read_unlock(); 170 return ctx; 171 } 172 173 static struct gss_cl_ctx * 174 gss_alloc_context(void) 175 { 176 struct gss_cl_ctx *ctx; 177 178 ctx = kzalloc(sizeof(*ctx), GFP_NOFS); 179 if (ctx != NULL) { 180 ctx->gc_proc = RPC_GSS_PROC_DATA; 181 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 182 spin_lock_init(&ctx->gc_seq_lock); 183 atomic_set(&ctx->count,1); 184 } 185 return ctx; 186 } 187 188 #define GSSD_MIN_TIMEOUT (60 * 60) 189 static const void * 190 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm) 191 { 192 const void *q; 193 unsigned int seclen; 194 unsigned int timeout; 195 unsigned long now = jiffies; 196 u32 window_size; 197 int ret; 198 199 /* First unsigned int gives the remaining lifetime in seconds of the 200 * credential - e.g. the remaining TGT lifetime for Kerberos or 201 * the -t value passed to GSSD. 202 */ 203 p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); 204 if (IS_ERR(p)) 205 goto err; 206 if (timeout == 0) 207 timeout = GSSD_MIN_TIMEOUT; 208 ctx->gc_expiry = now + ((unsigned long)timeout * HZ); 209 /* Sequence number window. Determines the maximum number of 210 * simultaneous requests 211 */ 212 p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); 213 if (IS_ERR(p)) 214 goto err; 215 ctx->gc_win = window_size; 216 /* gssd signals an error by passing ctx->gc_win = 0: */ 217 if (ctx->gc_win == 0) { 218 /* 219 * in which case, p points to an error code. Anything other 220 * than -EKEYEXPIRED gets converted to -EACCES. 221 */ 222 p = simple_get_bytes(p, end, &ret, sizeof(ret)); 223 if (!IS_ERR(p)) 224 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : 225 ERR_PTR(-EACCES); 226 goto err; 227 } 228 /* copy the opaque wire context */ 229 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx); 230 if (IS_ERR(p)) 231 goto err; 232 /* import the opaque security context */ 233 p = simple_get_bytes(p, end, &seclen, sizeof(seclen)); 234 if (IS_ERR(p)) 235 goto err; 236 q = (const void *)((const char *)p + seclen); 237 if (unlikely(q > end || q < p)) { 238 p = ERR_PTR(-EFAULT); 239 goto err; 240 } 241 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS); 242 if (ret < 0) { 243 p = ERR_PTR(ret); 244 goto err; 245 } 246 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n", 247 __func__, ctx->gc_expiry, now, timeout); 248 return q; 249 err: 250 dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p)); 251 return p; 252 } 253 254 #define UPCALL_BUF_LEN 128 255 256 struct gss_upcall_msg { 257 atomic_t count; 258 kuid_t uid; 259 struct rpc_pipe_msg msg; 260 struct list_head list; 261 struct gss_auth *auth; 262 struct rpc_pipe *pipe; 263 struct rpc_wait_queue rpc_waitqueue; 264 wait_queue_head_t waitqueue; 265 struct gss_cl_ctx *ctx; 266 char databuf[UPCALL_BUF_LEN]; 267 }; 268 269 static int get_pipe_version(struct net *net) 270 { 271 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 272 int ret; 273 274 spin_lock(&pipe_version_lock); 275 if (sn->pipe_version >= 0) { 276 atomic_inc(&sn->pipe_users); 277 ret = sn->pipe_version; 278 } else 279 ret = -EAGAIN; 280 spin_unlock(&pipe_version_lock); 281 return ret; 282 } 283 284 static void put_pipe_version(struct net *net) 285 { 286 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 287 288 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) { 289 sn->pipe_version = -1; 290 spin_unlock(&pipe_version_lock); 291 } 292 } 293 294 static void 295 gss_release_msg(struct gss_upcall_msg *gss_msg) 296 { 297 struct net *net = rpc_net_ns(gss_msg->auth->client); 298 if (!atomic_dec_and_test(&gss_msg->count)) 299 return; 300 put_pipe_version(net); 301 BUG_ON(!list_empty(&gss_msg->list)); 302 if (gss_msg->ctx != NULL) 303 gss_put_ctx(gss_msg->ctx); 304 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 305 kfree(gss_msg); 306 } 307 308 static struct gss_upcall_msg * 309 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) 310 { 311 struct gss_upcall_msg *pos; 312 list_for_each_entry(pos, &pipe->in_downcall, list) { 313 if (!uid_eq(pos->uid, uid)) 314 continue; 315 atomic_inc(&pos->count); 316 dprintk("RPC: %s found msg %p\n", __func__, pos); 317 return pos; 318 } 319 dprintk("RPC: %s found nothing\n", __func__); 320 return NULL; 321 } 322 323 /* Try to add an upcall to the pipefs queue. 324 * If an upcall owned by our uid already exists, then we return a reference 325 * to that upcall instead of adding the new upcall. 326 */ 327 static inline struct gss_upcall_msg * 328 gss_add_msg(struct gss_upcall_msg *gss_msg) 329 { 330 struct rpc_pipe *pipe = gss_msg->pipe; 331 struct gss_upcall_msg *old; 332 333 spin_lock(&pipe->lock); 334 old = __gss_find_upcall(pipe, gss_msg->uid); 335 if (old == NULL) { 336 atomic_inc(&gss_msg->count); 337 list_add(&gss_msg->list, &pipe->in_downcall); 338 } else 339 gss_msg = old; 340 spin_unlock(&pipe->lock); 341 return gss_msg; 342 } 343 344 static void 345 __gss_unhash_msg(struct gss_upcall_msg *gss_msg) 346 { 347 list_del_init(&gss_msg->list); 348 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 349 wake_up_all(&gss_msg->waitqueue); 350 atomic_dec(&gss_msg->count); 351 } 352 353 static void 354 gss_unhash_msg(struct gss_upcall_msg *gss_msg) 355 { 356 struct rpc_pipe *pipe = gss_msg->pipe; 357 358 if (list_empty(&gss_msg->list)) 359 return; 360 spin_lock(&pipe->lock); 361 if (!list_empty(&gss_msg->list)) 362 __gss_unhash_msg(gss_msg); 363 spin_unlock(&pipe->lock); 364 } 365 366 static void 367 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) 368 { 369 switch (gss_msg->msg.errno) { 370 case 0: 371 if (gss_msg->ctx == NULL) 372 break; 373 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 374 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); 375 break; 376 case -EKEYEXPIRED: 377 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 378 } 379 gss_cred->gc_upcall_timestamp = jiffies; 380 gss_cred->gc_upcall = NULL; 381 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 382 } 383 384 static void 385 gss_upcall_callback(struct rpc_task *task) 386 { 387 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, 388 struct gss_cred, gc_base); 389 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 390 struct rpc_pipe *pipe = gss_msg->pipe; 391 392 spin_lock(&pipe->lock); 393 gss_handle_downcall_result(gss_cred, gss_msg); 394 spin_unlock(&pipe->lock); 395 task->tk_status = gss_msg->msg.errno; 396 gss_release_msg(gss_msg); 397 } 398 399 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) 400 { 401 uid_t uid = from_kuid(&init_user_ns, gss_msg->uid); 402 memcpy(gss_msg->databuf, &uid, sizeof(uid)); 403 gss_msg->msg.data = gss_msg->databuf; 404 gss_msg->msg.len = sizeof(uid); 405 BUG_ON(sizeof(uid) > UPCALL_BUF_LEN); 406 } 407 408 static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, 409 struct rpc_clnt *clnt, 410 const char *service_name) 411 { 412 struct gss_api_mech *mech = gss_msg->auth->mech; 413 char *p = gss_msg->databuf; 414 int len = 0; 415 416 gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", 417 mech->gm_name, 418 from_kuid(&init_user_ns, gss_msg->uid)); 419 p += gss_msg->msg.len; 420 if (clnt->cl_principal) { 421 len = sprintf(p, "target=%s ", clnt->cl_principal); 422 p += len; 423 gss_msg->msg.len += len; 424 } 425 if (service_name != NULL) { 426 len = sprintf(p, "service=%s ", service_name); 427 p += len; 428 gss_msg->msg.len += len; 429 } 430 if (mech->gm_upcall_enctypes) { 431 len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes); 432 p += len; 433 gss_msg->msg.len += len; 434 } 435 len = sprintf(p, "\n"); 436 gss_msg->msg.len += len; 437 438 gss_msg->msg.data = gss_msg->databuf; 439 BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN); 440 } 441 442 static void gss_encode_msg(struct gss_upcall_msg *gss_msg, 443 struct rpc_clnt *clnt, 444 const char *service_name) 445 { 446 struct net *net = rpc_net_ns(clnt); 447 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 448 449 if (sn->pipe_version == 0) 450 gss_encode_v0_msg(gss_msg); 451 else /* pipe_version == 1 */ 452 gss_encode_v1_msg(gss_msg, clnt, service_name); 453 } 454 455 static struct gss_upcall_msg * 456 gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt, 457 kuid_t uid, const char *service_name) 458 { 459 struct gss_upcall_msg *gss_msg; 460 int vers; 461 462 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); 463 if (gss_msg == NULL) 464 return ERR_PTR(-ENOMEM); 465 vers = get_pipe_version(rpc_net_ns(clnt)); 466 if (vers < 0) { 467 kfree(gss_msg); 468 return ERR_PTR(vers); 469 } 470 gss_msg->pipe = gss_auth->pipe[vers]; 471 INIT_LIST_HEAD(&gss_msg->list); 472 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 473 init_waitqueue_head(&gss_msg->waitqueue); 474 atomic_set(&gss_msg->count, 1); 475 gss_msg->uid = uid; 476 gss_msg->auth = gss_auth; 477 gss_encode_msg(gss_msg, clnt, service_name); 478 return gss_msg; 479 } 480 481 static struct gss_upcall_msg * 482 gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred) 483 { 484 struct gss_cred *gss_cred = container_of(cred, 485 struct gss_cred, gc_base); 486 struct gss_upcall_msg *gss_new, *gss_msg; 487 kuid_t uid = cred->cr_uid; 488 489 gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal); 490 if (IS_ERR(gss_new)) 491 return gss_new; 492 gss_msg = gss_add_msg(gss_new); 493 if (gss_msg == gss_new) { 494 int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); 495 if (res) { 496 gss_unhash_msg(gss_new); 497 gss_msg = ERR_PTR(res); 498 } 499 } else 500 gss_release_msg(gss_new); 501 return gss_msg; 502 } 503 504 static void warn_gssd(void) 505 { 506 static unsigned long ratelimit; 507 unsigned long now = jiffies; 508 509 if (time_after(now, ratelimit)) { 510 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" 511 "Please check user daemon is running.\n"); 512 ratelimit = now + 15*HZ; 513 } 514 } 515 516 static inline int 517 gss_refresh_upcall(struct rpc_task *task) 518 { 519 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 520 struct gss_auth *gss_auth = container_of(cred->cr_auth, 521 struct gss_auth, rpc_auth); 522 struct gss_cred *gss_cred = container_of(cred, 523 struct gss_cred, gc_base); 524 struct gss_upcall_msg *gss_msg; 525 struct rpc_pipe *pipe; 526 int err = 0; 527 528 dprintk("RPC: %5u %s for uid %u\n", 529 task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid)); 530 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); 531 if (PTR_ERR(gss_msg) == -EAGAIN) { 532 /* XXX: warning on the first, under the assumption we 533 * shouldn't normally hit this case on a refresh. */ 534 warn_gssd(); 535 task->tk_timeout = 15*HZ; 536 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); 537 return -EAGAIN; 538 } 539 if (IS_ERR(gss_msg)) { 540 err = PTR_ERR(gss_msg); 541 goto out; 542 } 543 pipe = gss_msg->pipe; 544 spin_lock(&pipe->lock); 545 if (gss_cred->gc_upcall != NULL) 546 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 547 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 548 task->tk_timeout = 0; 549 gss_cred->gc_upcall = gss_msg; 550 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 551 atomic_inc(&gss_msg->count); 552 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); 553 } else { 554 gss_handle_downcall_result(gss_cred, gss_msg); 555 err = gss_msg->msg.errno; 556 } 557 spin_unlock(&pipe->lock); 558 gss_release_msg(gss_msg); 559 out: 560 dprintk("RPC: %5u %s for uid %u result %d\n", 561 task->tk_pid, __func__, 562 from_kuid(&init_user_ns, cred->cr_uid), err); 563 return err; 564 } 565 566 static inline int 567 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 568 { 569 struct net *net = rpc_net_ns(gss_auth->client); 570 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 571 struct rpc_pipe *pipe; 572 struct rpc_cred *cred = &gss_cred->gc_base; 573 struct gss_upcall_msg *gss_msg; 574 unsigned long timeout; 575 DEFINE_WAIT(wait); 576 int err; 577 578 dprintk("RPC: %s for uid %u\n", 579 __func__, from_kuid(&init_user_ns, cred->cr_uid)); 580 retry: 581 err = 0; 582 /* Default timeout is 15s unless we know that gssd is not running */ 583 timeout = 15 * HZ; 584 if (!sn->gssd_running) 585 timeout = HZ >> 2; 586 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); 587 if (PTR_ERR(gss_msg) == -EAGAIN) { 588 err = wait_event_interruptible_timeout(pipe_version_waitqueue, 589 sn->pipe_version >= 0, timeout); 590 if (sn->pipe_version < 0) { 591 if (err == 0) 592 sn->gssd_running = 0; 593 warn_gssd(); 594 err = -EACCES; 595 } 596 if (err < 0) 597 goto out; 598 goto retry; 599 } 600 if (IS_ERR(gss_msg)) { 601 err = PTR_ERR(gss_msg); 602 goto out; 603 } 604 pipe = gss_msg->pipe; 605 for (;;) { 606 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); 607 spin_lock(&pipe->lock); 608 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 609 break; 610 } 611 spin_unlock(&pipe->lock); 612 if (fatal_signal_pending(current)) { 613 err = -ERESTARTSYS; 614 goto out_intr; 615 } 616 schedule(); 617 } 618 if (gss_msg->ctx) 619 gss_cred_set_ctx(cred, gss_msg->ctx); 620 else 621 err = gss_msg->msg.errno; 622 spin_unlock(&pipe->lock); 623 out_intr: 624 finish_wait(&gss_msg->waitqueue, &wait); 625 gss_release_msg(gss_msg); 626 out: 627 dprintk("RPC: %s for uid %u result %d\n", 628 __func__, from_kuid(&init_user_ns, cred->cr_uid), err); 629 return err; 630 } 631 632 #define MSG_BUF_MAXSIZE 1024 633 634 static ssize_t 635 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 636 { 637 const void *p, *end; 638 void *buf; 639 struct gss_upcall_msg *gss_msg; 640 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe; 641 struct gss_cl_ctx *ctx; 642 uid_t id; 643 kuid_t uid; 644 ssize_t err = -EFBIG; 645 646 if (mlen > MSG_BUF_MAXSIZE) 647 goto out; 648 err = -ENOMEM; 649 buf = kmalloc(mlen, GFP_NOFS); 650 if (!buf) 651 goto out; 652 653 err = -EFAULT; 654 if (copy_from_user(buf, src, mlen)) 655 goto err; 656 657 end = (const void *)((char *)buf + mlen); 658 p = simple_get_bytes(buf, end, &id, sizeof(id)); 659 if (IS_ERR(p)) { 660 err = PTR_ERR(p); 661 goto err; 662 } 663 664 uid = make_kuid(&init_user_ns, id); 665 if (!uid_valid(uid)) { 666 err = -EINVAL; 667 goto err; 668 } 669 670 err = -ENOMEM; 671 ctx = gss_alloc_context(); 672 if (ctx == NULL) 673 goto err; 674 675 err = -ENOENT; 676 /* Find a matching upcall */ 677 spin_lock(&pipe->lock); 678 gss_msg = __gss_find_upcall(pipe, uid); 679 if (gss_msg == NULL) { 680 spin_unlock(&pipe->lock); 681 goto err_put_ctx; 682 } 683 list_del_init(&gss_msg->list); 684 spin_unlock(&pipe->lock); 685 686 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 687 if (IS_ERR(p)) { 688 err = PTR_ERR(p); 689 switch (err) { 690 case -EACCES: 691 case -EKEYEXPIRED: 692 gss_msg->msg.errno = err; 693 err = mlen; 694 break; 695 case -EFAULT: 696 case -ENOMEM: 697 case -EINVAL: 698 case -ENOSYS: 699 gss_msg->msg.errno = -EAGAIN; 700 break; 701 default: 702 printk(KERN_CRIT "%s: bad return from " 703 "gss_fill_context: %zd\n", __func__, err); 704 BUG(); 705 } 706 goto err_release_msg; 707 } 708 gss_msg->ctx = gss_get_ctx(ctx); 709 err = mlen; 710 711 err_release_msg: 712 spin_lock(&pipe->lock); 713 __gss_unhash_msg(gss_msg); 714 spin_unlock(&pipe->lock); 715 gss_release_msg(gss_msg); 716 err_put_ctx: 717 gss_put_ctx(ctx); 718 err: 719 kfree(buf); 720 out: 721 dprintk("RPC: %s returning %Zd\n", __func__, err); 722 return err; 723 } 724 725 static int gss_pipe_open(struct inode *inode, int new_version) 726 { 727 struct net *net = inode->i_sb->s_fs_info; 728 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 729 int ret = 0; 730 731 spin_lock(&pipe_version_lock); 732 if (sn->pipe_version < 0) { 733 /* First open of any gss pipe determines the version: */ 734 sn->pipe_version = new_version; 735 rpc_wake_up(&pipe_version_rpc_waitqueue); 736 wake_up(&pipe_version_waitqueue); 737 } else if (sn->pipe_version != new_version) { 738 /* Trying to open a pipe of a different version */ 739 ret = -EBUSY; 740 goto out; 741 } 742 atomic_inc(&sn->pipe_users); 743 out: 744 spin_unlock(&pipe_version_lock); 745 return ret; 746 747 } 748 749 static int gss_pipe_open_v0(struct inode *inode) 750 { 751 return gss_pipe_open(inode, 0); 752 } 753 754 static int gss_pipe_open_v1(struct inode *inode) 755 { 756 return gss_pipe_open(inode, 1); 757 } 758 759 static void 760 gss_pipe_release(struct inode *inode) 761 { 762 struct net *net = inode->i_sb->s_fs_info; 763 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 764 struct gss_upcall_msg *gss_msg; 765 766 restart: 767 spin_lock(&pipe->lock); 768 list_for_each_entry(gss_msg, &pipe->in_downcall, list) { 769 770 if (!list_empty(&gss_msg->msg.list)) 771 continue; 772 gss_msg->msg.errno = -EPIPE; 773 atomic_inc(&gss_msg->count); 774 __gss_unhash_msg(gss_msg); 775 spin_unlock(&pipe->lock); 776 gss_release_msg(gss_msg); 777 goto restart; 778 } 779 spin_unlock(&pipe->lock); 780 781 put_pipe_version(net); 782 } 783 784 static void 785 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) 786 { 787 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); 788 789 if (msg->errno < 0) { 790 dprintk("RPC: %s releasing msg %p\n", 791 __func__, gss_msg); 792 atomic_inc(&gss_msg->count); 793 gss_unhash_msg(gss_msg); 794 if (msg->errno == -ETIMEDOUT) 795 warn_gssd(); 796 gss_release_msg(gss_msg); 797 } 798 } 799 800 static void gss_pipes_dentries_destroy(struct rpc_auth *auth) 801 { 802 struct gss_auth *gss_auth; 803 804 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 805 if (gss_auth->pipe[0]->dentry) 806 rpc_unlink(gss_auth->pipe[0]->dentry); 807 if (gss_auth->pipe[1]->dentry) 808 rpc_unlink(gss_auth->pipe[1]->dentry); 809 } 810 811 static int gss_pipes_dentries_create(struct rpc_auth *auth) 812 { 813 int err; 814 struct gss_auth *gss_auth; 815 struct rpc_clnt *clnt; 816 817 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 818 clnt = gss_auth->client; 819 820 gss_auth->pipe[1]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry, 821 "gssd", 822 clnt, gss_auth->pipe[1]); 823 if (IS_ERR(gss_auth->pipe[1]->dentry)) 824 return PTR_ERR(gss_auth->pipe[1]->dentry); 825 gss_auth->pipe[0]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry, 826 gss_auth->mech->gm_name, 827 clnt, gss_auth->pipe[0]); 828 if (IS_ERR(gss_auth->pipe[0]->dentry)) { 829 err = PTR_ERR(gss_auth->pipe[0]->dentry); 830 goto err_unlink_pipe_1; 831 } 832 return 0; 833 834 err_unlink_pipe_1: 835 rpc_unlink(gss_auth->pipe[1]->dentry); 836 return err; 837 } 838 839 static void gss_pipes_dentries_destroy_net(struct rpc_clnt *clnt, 840 struct rpc_auth *auth) 841 { 842 struct net *net = rpc_net_ns(clnt); 843 struct super_block *sb; 844 845 sb = rpc_get_sb_net(net); 846 if (sb) { 847 if (clnt->cl_dentry) 848 gss_pipes_dentries_destroy(auth); 849 rpc_put_sb_net(net); 850 } 851 } 852 853 static int gss_pipes_dentries_create_net(struct rpc_clnt *clnt, 854 struct rpc_auth *auth) 855 { 856 struct net *net = rpc_net_ns(clnt); 857 struct super_block *sb; 858 int err = 0; 859 860 sb = rpc_get_sb_net(net); 861 if (sb) { 862 if (clnt->cl_dentry) 863 err = gss_pipes_dentries_create(auth); 864 rpc_put_sb_net(net); 865 } 866 return err; 867 } 868 869 /* 870 * NOTE: we have the opportunity to use different 871 * parameters based on the input flavor (which must be a pseudoflavor) 872 */ 873 static struct rpc_auth * 874 gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) 875 { 876 struct gss_auth *gss_auth; 877 struct rpc_auth * auth; 878 int err = -ENOMEM; /* XXX? */ 879 880 dprintk("RPC: creating GSS authenticator for client %p\n", clnt); 881 882 if (!try_module_get(THIS_MODULE)) 883 return ERR_PTR(err); 884 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) 885 goto out_dec; 886 gss_auth->client = clnt; 887 err = -EINVAL; 888 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 889 if (!gss_auth->mech) { 890 dprintk("RPC: Pseudoflavor %d not found!\n", flavor); 891 goto err_free; 892 } 893 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 894 if (gss_auth->service == 0) 895 goto err_put_mech; 896 auth = &gss_auth->rpc_auth; 897 auth->au_cslack = GSS_CRED_SLACK >> 2; 898 auth->au_rslack = GSS_VERF_SLACK >> 2; 899 auth->au_ops = &authgss_ops; 900 auth->au_flavor = flavor; 901 atomic_set(&auth->au_count, 1); 902 kref_init(&gss_auth->kref); 903 904 /* 905 * Note: if we created the old pipe first, then someone who 906 * examined the directory at the right moment might conclude 907 * that we supported only the old pipe. So we instead create 908 * the new pipe first. 909 */ 910 gss_auth->pipe[1] = rpc_mkpipe_data(&gss_upcall_ops_v1, 911 RPC_PIPE_WAIT_FOR_OPEN); 912 if (IS_ERR(gss_auth->pipe[1])) { 913 err = PTR_ERR(gss_auth->pipe[1]); 914 goto err_put_mech; 915 } 916 917 gss_auth->pipe[0] = rpc_mkpipe_data(&gss_upcall_ops_v0, 918 RPC_PIPE_WAIT_FOR_OPEN); 919 if (IS_ERR(gss_auth->pipe[0])) { 920 err = PTR_ERR(gss_auth->pipe[0]); 921 goto err_destroy_pipe_1; 922 } 923 err = gss_pipes_dentries_create_net(clnt, auth); 924 if (err) 925 goto err_destroy_pipe_0; 926 err = rpcauth_init_credcache(auth); 927 if (err) 928 goto err_unlink_pipes; 929 930 return auth; 931 err_unlink_pipes: 932 gss_pipes_dentries_destroy_net(clnt, auth); 933 err_destroy_pipe_0: 934 rpc_destroy_pipe_data(gss_auth->pipe[0]); 935 err_destroy_pipe_1: 936 rpc_destroy_pipe_data(gss_auth->pipe[1]); 937 err_put_mech: 938 gss_mech_put(gss_auth->mech); 939 err_free: 940 kfree(gss_auth); 941 out_dec: 942 module_put(THIS_MODULE); 943 return ERR_PTR(err); 944 } 945 946 static void 947 gss_free(struct gss_auth *gss_auth) 948 { 949 gss_pipes_dentries_destroy_net(gss_auth->client, &gss_auth->rpc_auth); 950 rpc_destroy_pipe_data(gss_auth->pipe[0]); 951 rpc_destroy_pipe_data(gss_auth->pipe[1]); 952 gss_mech_put(gss_auth->mech); 953 954 kfree(gss_auth); 955 module_put(THIS_MODULE); 956 } 957 958 static void 959 gss_free_callback(struct kref *kref) 960 { 961 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); 962 963 gss_free(gss_auth); 964 } 965 966 static void 967 gss_destroy(struct rpc_auth *auth) 968 { 969 struct gss_auth *gss_auth; 970 971 dprintk("RPC: destroying GSS authenticator %p flavor %d\n", 972 auth, auth->au_flavor); 973 974 rpcauth_destroy_credcache(auth); 975 976 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 977 kref_put(&gss_auth->kref, gss_free_callback); 978 } 979 980 /* 981 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call 982 * to the server with the GSS control procedure field set to 983 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release 984 * all RPCSEC_GSS state associated with that context. 985 */ 986 static int 987 gss_destroying_context(struct rpc_cred *cred) 988 { 989 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 990 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 991 struct rpc_task *task; 992 993 if (gss_cred->gc_ctx == NULL || 994 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 995 return 0; 996 997 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; 998 cred->cr_ops = &gss_nullops; 999 1000 /* Take a reference to ensure the cred will be destroyed either 1001 * by the RPC call or by the put_rpccred() below */ 1002 get_rpccred(cred); 1003 1004 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT); 1005 if (!IS_ERR(task)) 1006 rpc_put_task(task); 1007 1008 put_rpccred(cred); 1009 return 1; 1010 } 1011 1012 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure 1013 * to create a new cred or context, so they check that things have been 1014 * allocated before freeing them. */ 1015 static void 1016 gss_do_free_ctx(struct gss_cl_ctx *ctx) 1017 { 1018 dprintk("RPC: %s\n", __func__); 1019 1020 gss_delete_sec_context(&ctx->gc_gss_ctx); 1021 kfree(ctx->gc_wire_ctx.data); 1022 kfree(ctx); 1023 } 1024 1025 static void 1026 gss_free_ctx_callback(struct rcu_head *head) 1027 { 1028 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); 1029 gss_do_free_ctx(ctx); 1030 } 1031 1032 static void 1033 gss_free_ctx(struct gss_cl_ctx *ctx) 1034 { 1035 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); 1036 } 1037 1038 static void 1039 gss_free_cred(struct gss_cred *gss_cred) 1040 { 1041 dprintk("RPC: %s cred=%p\n", __func__, gss_cred); 1042 kfree(gss_cred); 1043 } 1044 1045 static void 1046 gss_free_cred_callback(struct rcu_head *head) 1047 { 1048 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); 1049 gss_free_cred(gss_cred); 1050 } 1051 1052 static void 1053 gss_destroy_nullcred(struct rpc_cred *cred) 1054 { 1055 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1056 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1057 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 1058 1059 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); 1060 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1061 if (ctx) 1062 gss_put_ctx(ctx); 1063 kref_put(&gss_auth->kref, gss_free_callback); 1064 } 1065 1066 static void 1067 gss_destroy_cred(struct rpc_cred *cred) 1068 { 1069 1070 if (gss_destroying_context(cred)) 1071 return; 1072 gss_destroy_nullcred(cred); 1073 } 1074 1075 /* 1076 * Lookup RPCSEC_GSS cred for the current process 1077 */ 1078 static struct rpc_cred * 1079 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1080 { 1081 return rpcauth_lookup_credcache(auth, acred, flags); 1082 } 1083 1084 static struct rpc_cred * 1085 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1086 { 1087 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1088 struct gss_cred *cred = NULL; 1089 int err = -ENOMEM; 1090 1091 dprintk("RPC: %s for uid %d, flavor %d\n", 1092 __func__, from_kuid(&init_user_ns, acred->uid), 1093 auth->au_flavor); 1094 1095 if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) 1096 goto out_err; 1097 1098 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); 1099 /* 1100 * Note: in order to force a call to call_refresh(), we deliberately 1101 * fail to flag the credential as RPCAUTH_CRED_UPTODATE. 1102 */ 1103 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; 1104 cred->gc_service = gss_auth->service; 1105 cred->gc_principal = NULL; 1106 if (acred->machine_cred) 1107 cred->gc_principal = acred->principal; 1108 kref_get(&gss_auth->kref); 1109 return &cred->gc_base; 1110 1111 out_err: 1112 dprintk("RPC: %s failed with error %d\n", __func__, err); 1113 return ERR_PTR(err); 1114 } 1115 1116 static int 1117 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) 1118 { 1119 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1120 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); 1121 int err; 1122 1123 do { 1124 err = gss_create_upcall(gss_auth, gss_cred); 1125 } while (err == -EAGAIN); 1126 return err; 1127 } 1128 1129 static int 1130 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) 1131 { 1132 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1133 1134 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) 1135 goto out; 1136 /* Don't match with creds that have expired. */ 1137 if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) 1138 return 0; 1139 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags)) 1140 return 0; 1141 out: 1142 if (acred->principal != NULL) { 1143 if (gss_cred->gc_principal == NULL) 1144 return 0; 1145 return strcmp(acred->principal, gss_cred->gc_principal) == 0; 1146 } 1147 if (gss_cred->gc_principal != NULL) 1148 return 0; 1149 return uid_eq(rc->cr_uid, acred->uid); 1150 } 1151 1152 /* 1153 * Marshal credentials. 1154 * Maybe we should keep a cached credential for performance reasons. 1155 */ 1156 static __be32 * 1157 gss_marshal(struct rpc_task *task, __be32 *p) 1158 { 1159 struct rpc_rqst *req = task->tk_rqstp; 1160 struct rpc_cred *cred = req->rq_cred; 1161 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1162 gc_base); 1163 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1164 __be32 *cred_len; 1165 u32 maj_stat = 0; 1166 struct xdr_netobj mic; 1167 struct kvec iov; 1168 struct xdr_buf verf_buf; 1169 1170 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1171 1172 *p++ = htonl(RPC_AUTH_GSS); 1173 cred_len = p++; 1174 1175 spin_lock(&ctx->gc_seq_lock); 1176 req->rq_seqno = ctx->gc_seq++; 1177 spin_unlock(&ctx->gc_seq_lock); 1178 1179 *p++ = htonl((u32) RPC_GSS_VERSION); 1180 *p++ = htonl((u32) ctx->gc_proc); 1181 *p++ = htonl((u32) req->rq_seqno); 1182 *p++ = htonl((u32) gss_cred->gc_service); 1183 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); 1184 *cred_len = htonl((p - (cred_len + 1)) << 2); 1185 1186 /* We compute the checksum for the verifier over the xdr-encoded bytes 1187 * starting with the xid and ending at the end of the credential: */ 1188 iov.iov_base = xprt_skip_transport_header(req->rq_xprt, 1189 req->rq_snd_buf.head[0].iov_base); 1190 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; 1191 xdr_buf_from_iov(&iov, &verf_buf); 1192 1193 /* set verifier flavor*/ 1194 *p++ = htonl(RPC_AUTH_GSS); 1195 1196 mic.data = (u8 *)(p + 1); 1197 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1198 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1199 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1200 } else if (maj_stat != 0) { 1201 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1202 goto out_put_ctx; 1203 } 1204 p = xdr_encode_opaque(p, NULL, mic.len); 1205 gss_put_ctx(ctx); 1206 return p; 1207 out_put_ctx: 1208 gss_put_ctx(ctx); 1209 return NULL; 1210 } 1211 1212 static int gss_renew_cred(struct rpc_task *task) 1213 { 1214 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; 1215 struct gss_cred *gss_cred = container_of(oldcred, 1216 struct gss_cred, 1217 gc_base); 1218 struct rpc_auth *auth = oldcred->cr_auth; 1219 struct auth_cred acred = { 1220 .uid = oldcred->cr_uid, 1221 .principal = gss_cred->gc_principal, 1222 .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0), 1223 }; 1224 struct rpc_cred *new; 1225 1226 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); 1227 if (IS_ERR(new)) 1228 return PTR_ERR(new); 1229 task->tk_rqstp->rq_cred = new; 1230 put_rpccred(oldcred); 1231 return 0; 1232 } 1233 1234 static int gss_cred_is_negative_entry(struct rpc_cred *cred) 1235 { 1236 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { 1237 unsigned long now = jiffies; 1238 unsigned long begin, expire; 1239 struct gss_cred *gss_cred; 1240 1241 gss_cred = container_of(cred, struct gss_cred, gc_base); 1242 begin = gss_cred->gc_upcall_timestamp; 1243 expire = begin + gss_expired_cred_retry_delay * HZ; 1244 1245 if (time_in_range_open(now, begin, expire)) 1246 return 1; 1247 } 1248 return 0; 1249 } 1250 1251 /* 1252 * Refresh credentials. XXX - finish 1253 */ 1254 static int 1255 gss_refresh(struct rpc_task *task) 1256 { 1257 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1258 int ret = 0; 1259 1260 if (gss_cred_is_negative_entry(cred)) 1261 return -EKEYEXPIRED; 1262 1263 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && 1264 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 1265 ret = gss_renew_cred(task); 1266 if (ret < 0) 1267 goto out; 1268 cred = task->tk_rqstp->rq_cred; 1269 } 1270 1271 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 1272 ret = gss_refresh_upcall(task); 1273 out: 1274 return ret; 1275 } 1276 1277 /* Dummy refresh routine: used only when destroying the context */ 1278 static int 1279 gss_refresh_null(struct rpc_task *task) 1280 { 1281 return -EACCES; 1282 } 1283 1284 static __be32 * 1285 gss_validate(struct rpc_task *task, __be32 *p) 1286 { 1287 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1288 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1289 __be32 seq; 1290 struct kvec iov; 1291 struct xdr_buf verf_buf; 1292 struct xdr_netobj mic; 1293 u32 flav,len; 1294 u32 maj_stat; 1295 1296 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1297 1298 flav = ntohl(*p++); 1299 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 1300 goto out_bad; 1301 if (flav != RPC_AUTH_GSS) 1302 goto out_bad; 1303 seq = htonl(task->tk_rqstp->rq_seqno); 1304 iov.iov_base = &seq; 1305 iov.iov_len = sizeof(seq); 1306 xdr_buf_from_iov(&iov, &verf_buf); 1307 mic.data = (u8 *)p; 1308 mic.len = len; 1309 1310 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1311 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1312 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1313 if (maj_stat) { 1314 dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n", 1315 task->tk_pid, __func__, maj_stat); 1316 goto out_bad; 1317 } 1318 /* We leave it to unwrap to calculate au_rslack. For now we just 1319 * calculate the length of the verifier: */ 1320 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; 1321 gss_put_ctx(ctx); 1322 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", 1323 task->tk_pid, __func__); 1324 return p + XDR_QUADLEN(len); 1325 out_bad: 1326 gss_put_ctx(ctx); 1327 dprintk("RPC: %5u %s failed.\n", task->tk_pid, __func__); 1328 return NULL; 1329 } 1330 1331 static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, 1332 __be32 *p, void *obj) 1333 { 1334 struct xdr_stream xdr; 1335 1336 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); 1337 encode(rqstp, &xdr, obj); 1338 } 1339 1340 static inline int 1341 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1342 kxdreproc_t encode, struct rpc_rqst *rqstp, 1343 __be32 *p, void *obj) 1344 { 1345 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1346 struct xdr_buf integ_buf; 1347 __be32 *integ_len = NULL; 1348 struct xdr_netobj mic; 1349 u32 offset; 1350 __be32 *q; 1351 struct kvec *iov; 1352 u32 maj_stat = 0; 1353 int status = -EIO; 1354 1355 integ_len = p++; 1356 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1357 *p++ = htonl(rqstp->rq_seqno); 1358 1359 gss_wrap_req_encode(encode, rqstp, p, obj); 1360 1361 if (xdr_buf_subsegment(snd_buf, &integ_buf, 1362 offset, snd_buf->len - offset)) 1363 return status; 1364 *integ_len = htonl(integ_buf.len); 1365 1366 /* guess whether we're in the head or the tail: */ 1367 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1368 iov = snd_buf->tail; 1369 else 1370 iov = snd_buf->head; 1371 p = iov->iov_base + iov->iov_len; 1372 mic.data = (u8 *)(p + 1); 1373 1374 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1375 status = -EIO; /* XXX? */ 1376 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1377 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1378 else if (maj_stat) 1379 return status; 1380 q = xdr_encode_opaque(p, NULL, mic.len); 1381 1382 offset = (u8 *)q - (u8 *)p; 1383 iov->iov_len += offset; 1384 snd_buf->len += offset; 1385 return 0; 1386 } 1387 1388 static void 1389 priv_release_snd_buf(struct rpc_rqst *rqstp) 1390 { 1391 int i; 1392 1393 for (i=0; i < rqstp->rq_enc_pages_num; i++) 1394 __free_page(rqstp->rq_enc_pages[i]); 1395 kfree(rqstp->rq_enc_pages); 1396 } 1397 1398 static int 1399 alloc_enc_pages(struct rpc_rqst *rqstp) 1400 { 1401 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1402 int first, last, i; 1403 1404 if (snd_buf->page_len == 0) { 1405 rqstp->rq_enc_pages_num = 0; 1406 return 0; 1407 } 1408 1409 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1410 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; 1411 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1412 rqstp->rq_enc_pages 1413 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1414 GFP_NOFS); 1415 if (!rqstp->rq_enc_pages) 1416 goto out; 1417 for (i=0; i < rqstp->rq_enc_pages_num; i++) { 1418 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); 1419 if (rqstp->rq_enc_pages[i] == NULL) 1420 goto out_free; 1421 } 1422 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1423 return 0; 1424 out_free: 1425 rqstp->rq_enc_pages_num = i; 1426 priv_release_snd_buf(rqstp); 1427 out: 1428 return -EAGAIN; 1429 } 1430 1431 static inline int 1432 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1433 kxdreproc_t encode, struct rpc_rqst *rqstp, 1434 __be32 *p, void *obj) 1435 { 1436 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1437 u32 offset; 1438 u32 maj_stat; 1439 int status; 1440 __be32 *opaque_len; 1441 struct page **inpages; 1442 int first; 1443 int pad; 1444 struct kvec *iov; 1445 char *tmp; 1446 1447 opaque_len = p++; 1448 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1449 *p++ = htonl(rqstp->rq_seqno); 1450 1451 gss_wrap_req_encode(encode, rqstp, p, obj); 1452 1453 status = alloc_enc_pages(rqstp); 1454 if (status) 1455 return status; 1456 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1457 inpages = snd_buf->pages + first; 1458 snd_buf->pages = rqstp->rq_enc_pages; 1459 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1460 /* 1461 * Give the tail its own page, in case we need extra space in the 1462 * head when wrapping: 1463 * 1464 * call_allocate() allocates twice the slack space required 1465 * by the authentication flavor to rq_callsize. 1466 * For GSS, slack is GSS_CRED_SLACK. 1467 */ 1468 if (snd_buf->page_len || snd_buf->tail[0].iov_len) { 1469 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); 1470 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); 1471 snd_buf->tail[0].iov_base = tmp; 1472 } 1473 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1474 /* slack space should prevent this ever happening: */ 1475 BUG_ON(snd_buf->len > snd_buf->buflen); 1476 status = -EIO; 1477 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1478 * done anyway, so it's safe to put the request on the wire: */ 1479 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1480 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1481 else if (maj_stat) 1482 return status; 1483 1484 *opaque_len = htonl(snd_buf->len - offset); 1485 /* guess whether we're in the head or the tail: */ 1486 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1487 iov = snd_buf->tail; 1488 else 1489 iov = snd_buf->head; 1490 p = iov->iov_base + iov->iov_len; 1491 pad = 3 - ((snd_buf->len - offset - 1) & 3); 1492 memset(p, 0, pad); 1493 iov->iov_len += pad; 1494 snd_buf->len += pad; 1495 1496 return 0; 1497 } 1498 1499 static int 1500 gss_wrap_req(struct rpc_task *task, 1501 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) 1502 { 1503 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1504 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1505 gc_base); 1506 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1507 int status = -EIO; 1508 1509 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1510 if (ctx->gc_proc != RPC_GSS_PROC_DATA) { 1511 /* The spec seems a little ambiguous here, but I think that not 1512 * wrapping context destruction requests makes the most sense. 1513 */ 1514 gss_wrap_req_encode(encode, rqstp, p, obj); 1515 status = 0; 1516 goto out; 1517 } 1518 switch (gss_cred->gc_service) { 1519 case RPC_GSS_SVC_NONE: 1520 gss_wrap_req_encode(encode, rqstp, p, obj); 1521 status = 0; 1522 break; 1523 case RPC_GSS_SVC_INTEGRITY: 1524 status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj); 1525 break; 1526 case RPC_GSS_SVC_PRIVACY: 1527 status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj); 1528 break; 1529 } 1530 out: 1531 gss_put_ctx(ctx); 1532 dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status); 1533 return status; 1534 } 1535 1536 static inline int 1537 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1538 struct rpc_rqst *rqstp, __be32 **p) 1539 { 1540 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1541 struct xdr_buf integ_buf; 1542 struct xdr_netobj mic; 1543 u32 data_offset, mic_offset; 1544 u32 integ_len; 1545 u32 maj_stat; 1546 int status = -EIO; 1547 1548 integ_len = ntohl(*(*p)++); 1549 if (integ_len & 3) 1550 return status; 1551 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1552 mic_offset = integ_len + data_offset; 1553 if (mic_offset > rcv_buf->len) 1554 return status; 1555 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1556 return status; 1557 1558 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, 1559 mic_offset - data_offset)) 1560 return status; 1561 1562 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) 1563 return status; 1564 1565 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1566 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1567 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1568 if (maj_stat != GSS_S_COMPLETE) 1569 return status; 1570 return 0; 1571 } 1572 1573 static inline int 1574 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1575 struct rpc_rqst *rqstp, __be32 **p) 1576 { 1577 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1578 u32 offset; 1579 u32 opaque_len; 1580 u32 maj_stat; 1581 int status = -EIO; 1582 1583 opaque_len = ntohl(*(*p)++); 1584 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1585 if (offset + opaque_len > rcv_buf->len) 1586 return status; 1587 /* remove padding: */ 1588 rcv_buf->len = offset + opaque_len; 1589 1590 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); 1591 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1592 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1593 if (maj_stat != GSS_S_COMPLETE) 1594 return status; 1595 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1596 return status; 1597 1598 return 0; 1599 } 1600 1601 static int 1602 gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, 1603 __be32 *p, void *obj) 1604 { 1605 struct xdr_stream xdr; 1606 1607 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 1608 return decode(rqstp, &xdr, obj); 1609 } 1610 1611 static int 1612 gss_unwrap_resp(struct rpc_task *task, 1613 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) 1614 { 1615 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1616 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1617 gc_base); 1618 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1619 __be32 *savedp = p; 1620 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; 1621 int savedlen = head->iov_len; 1622 int status = -EIO; 1623 1624 if (ctx->gc_proc != RPC_GSS_PROC_DATA) 1625 goto out_decode; 1626 switch (gss_cred->gc_service) { 1627 case RPC_GSS_SVC_NONE: 1628 break; 1629 case RPC_GSS_SVC_INTEGRITY: 1630 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); 1631 if (status) 1632 goto out; 1633 break; 1634 case RPC_GSS_SVC_PRIVACY: 1635 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1636 if (status) 1637 goto out; 1638 break; 1639 } 1640 /* take into account extra slack for integrity and privacy cases: */ 1641 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1642 + (savedlen - head->iov_len); 1643 out_decode: 1644 status = gss_unwrap_req_decode(decode, rqstp, p, obj); 1645 out: 1646 gss_put_ctx(ctx); 1647 dprintk("RPC: %5u %s returning %d\n", 1648 task->tk_pid, __func__, status); 1649 return status; 1650 } 1651 1652 static const struct rpc_authops authgss_ops = { 1653 .owner = THIS_MODULE, 1654 .au_flavor = RPC_AUTH_GSS, 1655 .au_name = "RPCSEC_GSS", 1656 .create = gss_create, 1657 .destroy = gss_destroy, 1658 .lookup_cred = gss_lookup_cred, 1659 .crcreate = gss_create_cred, 1660 .pipes_create = gss_pipes_dentries_create, 1661 .pipes_destroy = gss_pipes_dentries_destroy, 1662 .list_pseudoflavors = gss_mech_list_pseudoflavors, 1663 .info2flavor = gss_mech_info2flavor, 1664 .flavor2info = gss_mech_flavor2info, 1665 }; 1666 1667 static const struct rpc_credops gss_credops = { 1668 .cr_name = "AUTH_GSS", 1669 .crdestroy = gss_destroy_cred, 1670 .cr_init = gss_cred_init, 1671 .crbind = rpcauth_generic_bind_cred, 1672 .crmatch = gss_match, 1673 .crmarshal = gss_marshal, 1674 .crrefresh = gss_refresh, 1675 .crvalidate = gss_validate, 1676 .crwrap_req = gss_wrap_req, 1677 .crunwrap_resp = gss_unwrap_resp, 1678 }; 1679 1680 static const struct rpc_credops gss_nullops = { 1681 .cr_name = "AUTH_GSS", 1682 .crdestroy = gss_destroy_nullcred, 1683 .crbind = rpcauth_generic_bind_cred, 1684 .crmatch = gss_match, 1685 .crmarshal = gss_marshal, 1686 .crrefresh = gss_refresh_null, 1687 .crvalidate = gss_validate, 1688 .crwrap_req = gss_wrap_req, 1689 .crunwrap_resp = gss_unwrap_resp, 1690 }; 1691 1692 static const struct rpc_pipe_ops gss_upcall_ops_v0 = { 1693 .upcall = rpc_pipe_generic_upcall, 1694 .downcall = gss_pipe_downcall, 1695 .destroy_msg = gss_pipe_destroy_msg, 1696 .open_pipe = gss_pipe_open_v0, 1697 .release_pipe = gss_pipe_release, 1698 }; 1699 1700 static const struct rpc_pipe_ops gss_upcall_ops_v1 = { 1701 .upcall = rpc_pipe_generic_upcall, 1702 .downcall = gss_pipe_downcall, 1703 .destroy_msg = gss_pipe_destroy_msg, 1704 .open_pipe = gss_pipe_open_v1, 1705 .release_pipe = gss_pipe_release, 1706 }; 1707 1708 static __net_init int rpcsec_gss_init_net(struct net *net) 1709 { 1710 return gss_svc_init_net(net); 1711 } 1712 1713 static __net_exit void rpcsec_gss_exit_net(struct net *net) 1714 { 1715 gss_svc_shutdown_net(net); 1716 } 1717 1718 static struct pernet_operations rpcsec_gss_net_ops = { 1719 .init = rpcsec_gss_init_net, 1720 .exit = rpcsec_gss_exit_net, 1721 }; 1722 1723 /* 1724 * Initialize RPCSEC_GSS module 1725 */ 1726 static int __init init_rpcsec_gss(void) 1727 { 1728 int err = 0; 1729 1730 err = rpcauth_register(&authgss_ops); 1731 if (err) 1732 goto out; 1733 err = gss_svc_init(); 1734 if (err) 1735 goto out_unregister; 1736 err = register_pernet_subsys(&rpcsec_gss_net_ops); 1737 if (err) 1738 goto out_svc_exit; 1739 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); 1740 return 0; 1741 out_svc_exit: 1742 gss_svc_shutdown(); 1743 out_unregister: 1744 rpcauth_unregister(&authgss_ops); 1745 out: 1746 return err; 1747 } 1748 1749 static void __exit exit_rpcsec_gss(void) 1750 { 1751 unregister_pernet_subsys(&rpcsec_gss_net_ops); 1752 gss_svc_shutdown(); 1753 rpcauth_unregister(&authgss_ops); 1754 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1755 } 1756 1757 MODULE_ALIAS("rpc-auth-6"); 1758 MODULE_LICENSE("GPL"); 1759 module_param_named(expired_cred_retry_delay, 1760 gss_expired_cred_retry_delay, 1761 uint, 0644); 1762 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " 1763 "the RPC engine retries an expired credential"); 1764 1765 module_init(init_rpcsec_gss) 1766 module_exit(exit_rpcsec_gss) 1767