1 /* 2 * Neil Brown <neilb@cse.unsw.edu.au> 3 * J. Bruce Fields <bfields@umich.edu> 4 * Andy Adamson <andros@umich.edu> 5 * Dug Song <dugsong@monkey.org> 6 * 7 * RPCSEC_GSS server authentication. 8 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078 9 * (gssapi) 10 * 11 * The RPCSEC_GSS involves three stages: 12 * 1/ context creation 13 * 2/ data exchange 14 * 3/ context destruction 15 * 16 * Context creation is handled largely by upcalls to user-space. 17 * In particular, GSS_Accept_sec_context is handled by an upcall 18 * Data exchange is handled entirely within the kernel 19 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel. 20 * Context destruction is handled in-kernel 21 * GSS_Delete_sec_context is in-kernel 22 * 23 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving. 24 * The context handle and gss_token are used as a key into the rpcsec_init cache. 25 * The content of this cache includes some of the outputs of GSS_Accept_sec_context, 26 * being major_status, minor_status, context_handle, reply_token. 27 * These are sent back to the client. 28 * Sequence window management is handled by the kernel. The window size if currently 29 * a compile time constant. 30 * 31 * When user-space is happy that a context is established, it places an entry 32 * in the rpcsec_context cache. The key for this cache is the context_handle. 33 * The content includes: 34 * uid/gidlist - for determining access rights 35 * mechanism type 36 * mechanism specific information, such as a key 37 * 38 */ 39 40 #include <linux/types.h> 41 #include <linux/module.h> 42 #include <linux/pagemap.h> 43 44 #include <linux/sunrpc/auth_gss.h> 45 #include <linux/sunrpc/svcauth.h> 46 #include <linux/sunrpc/gss_err.h> 47 #include <linux/sunrpc/svcauth.h> 48 #include <linux/sunrpc/svcauth_gss.h> 49 #include <linux/sunrpc/cache.h> 50 51 #ifdef RPC_DEBUG 52 # define RPCDBG_FACILITY RPCDBG_AUTH 53 #endif 54 55 /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests 56 * into replies. 57 * 58 * Key is context handle (\x if empty) and gss_token. 59 * Content is major_status minor_status (integers) context_handle, reply_token. 60 * 61 */ 62 63 static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b) 64 { 65 return a->len == b->len && 0 == memcmp(a->data, b->data, a->len); 66 } 67 68 #define RSI_HASHBITS 6 69 #define RSI_HASHMAX (1<<RSI_HASHBITS) 70 #define RSI_HASHMASK (RSI_HASHMAX-1) 71 72 struct rsi { 73 struct cache_head h; 74 struct xdr_netobj in_handle, in_token; 75 struct xdr_netobj out_handle, out_token; 76 int major_status, minor_status; 77 }; 78 79 static struct cache_head *rsi_table[RSI_HASHMAX]; 80 static struct cache_detail rsi_cache; 81 static struct rsi *rsi_lookup(struct rsi *item, int set); 82 83 static void rsi_free(struct rsi *rsii) 84 { 85 kfree(rsii->in_handle.data); 86 kfree(rsii->in_token.data); 87 kfree(rsii->out_handle.data); 88 kfree(rsii->out_token.data); 89 } 90 91 static void rsi_put(struct cache_head *item, struct cache_detail *cd) 92 { 93 struct rsi *rsii = container_of(item, struct rsi, h); 94 if (cache_put(item, cd)) { 95 rsi_free(rsii); 96 kfree(rsii); 97 } 98 } 99 100 static inline int rsi_hash(struct rsi *item) 101 { 102 return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS) 103 ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS); 104 } 105 106 static inline int rsi_match(struct rsi *item, struct rsi *tmp) 107 { 108 return netobj_equal(&item->in_handle, &tmp->in_handle) 109 && netobj_equal(&item->in_token, &tmp->in_token); 110 } 111 112 static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len) 113 { 114 dst->len = len; 115 dst->data = (len ? kmalloc(len, GFP_KERNEL) : NULL); 116 if (dst->data) 117 memcpy(dst->data, src, len); 118 if (len && !dst->data) 119 return -ENOMEM; 120 return 0; 121 } 122 123 static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src) 124 { 125 return dup_to_netobj(dst, src->data, src->len); 126 } 127 128 static inline void rsi_init(struct rsi *new, struct rsi *item) 129 { 130 new->out_handle.data = NULL; 131 new->out_handle.len = 0; 132 new->out_token.data = NULL; 133 new->out_token.len = 0; 134 new->in_handle.len = item->in_handle.len; 135 item->in_handle.len = 0; 136 new->in_token.len = item->in_token.len; 137 item->in_token.len = 0; 138 new->in_handle.data = item->in_handle.data; 139 item->in_handle.data = NULL; 140 new->in_token.data = item->in_token.data; 141 item->in_token.data = NULL; 142 } 143 144 static inline void rsi_update(struct rsi *new, struct rsi *item) 145 { 146 BUG_ON(new->out_handle.data || new->out_token.data); 147 new->out_handle.len = item->out_handle.len; 148 item->out_handle.len = 0; 149 new->out_token.len = item->out_token.len; 150 item->out_token.len = 0; 151 new->out_handle.data = item->out_handle.data; 152 item->out_handle.data = NULL; 153 new->out_token.data = item->out_token.data; 154 item->out_token.data = NULL; 155 156 new->major_status = item->major_status; 157 new->minor_status = item->minor_status; 158 } 159 160 static void rsi_request(struct cache_detail *cd, 161 struct cache_head *h, 162 char **bpp, int *blen) 163 { 164 struct rsi *rsii = container_of(h, struct rsi, h); 165 166 qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len); 167 qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len); 168 (*bpp)[-1] = '\n'; 169 } 170 171 172 static int rsi_parse(struct cache_detail *cd, 173 char *mesg, int mlen) 174 { 175 /* context token expiry major minor context token */ 176 char *buf = mesg; 177 char *ep; 178 int len; 179 struct rsi rsii, *rsip = NULL; 180 time_t expiry; 181 int status = -EINVAL; 182 183 memset(&rsii, 0, sizeof(rsii)); 184 /* handle */ 185 len = qword_get(&mesg, buf, mlen); 186 if (len < 0) 187 goto out; 188 status = -ENOMEM; 189 if (dup_to_netobj(&rsii.in_handle, buf, len)) 190 goto out; 191 192 /* token */ 193 len = qword_get(&mesg, buf, mlen); 194 status = -EINVAL; 195 if (len < 0) 196 goto out; 197 status = -ENOMEM; 198 if (dup_to_netobj(&rsii.in_token, buf, len)) 199 goto out; 200 201 rsii.h.flags = 0; 202 /* expiry */ 203 expiry = get_expiry(&mesg); 204 status = -EINVAL; 205 if (expiry == 0) 206 goto out; 207 208 /* major/minor */ 209 len = qword_get(&mesg, buf, mlen); 210 if (len < 0) 211 goto out; 212 if (len == 0) { 213 goto out; 214 } else { 215 rsii.major_status = simple_strtoul(buf, &ep, 10); 216 if (*ep) 217 goto out; 218 len = qword_get(&mesg, buf, mlen); 219 if (len <= 0) 220 goto out; 221 rsii.minor_status = simple_strtoul(buf, &ep, 10); 222 if (*ep) 223 goto out; 224 225 /* out_handle */ 226 len = qword_get(&mesg, buf, mlen); 227 if (len < 0) 228 goto out; 229 status = -ENOMEM; 230 if (dup_to_netobj(&rsii.out_handle, buf, len)) 231 goto out; 232 233 /* out_token */ 234 len = qword_get(&mesg, buf, mlen); 235 status = -EINVAL; 236 if (len < 0) 237 goto out; 238 status = -ENOMEM; 239 if (dup_to_netobj(&rsii.out_token, buf, len)) 240 goto out; 241 } 242 rsii.h.expiry_time = expiry; 243 rsip = rsi_lookup(&rsii, 1); 244 status = 0; 245 out: 246 rsi_free(&rsii); 247 if (rsip) 248 rsi_put(&rsip->h, &rsi_cache); 249 return status; 250 } 251 252 static struct cache_detail rsi_cache = { 253 .owner = THIS_MODULE, 254 .hash_size = RSI_HASHMAX, 255 .hash_table = rsi_table, 256 .name = "auth.rpcsec.init", 257 .cache_put = rsi_put, 258 .cache_request = rsi_request, 259 .cache_parse = rsi_parse, 260 }; 261 262 static DefineSimpleCacheLookup(rsi, 0) 263 264 /* 265 * The rpcsec_context cache is used to store a context that is 266 * used in data exchange. 267 * The key is a context handle. The content is: 268 * uid, gidlist, mechanism, service-set, mech-specific-data 269 */ 270 271 #define RSC_HASHBITS 10 272 #define RSC_HASHMAX (1<<RSC_HASHBITS) 273 #define RSC_HASHMASK (RSC_HASHMAX-1) 274 275 #define GSS_SEQ_WIN 128 276 277 struct gss_svc_seq_data { 278 /* highest seq number seen so far: */ 279 int sd_max; 280 /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of 281 * sd_win is nonzero iff sequence number i has been seen already: */ 282 unsigned long sd_win[GSS_SEQ_WIN/BITS_PER_LONG]; 283 spinlock_t sd_lock; 284 }; 285 286 struct rsc { 287 struct cache_head h; 288 struct xdr_netobj handle; 289 struct svc_cred cred; 290 struct gss_svc_seq_data seqdata; 291 struct gss_ctx *mechctx; 292 }; 293 294 static struct cache_head *rsc_table[RSC_HASHMAX]; 295 static struct cache_detail rsc_cache; 296 static struct rsc *rsc_lookup(struct rsc *item, int set); 297 298 static void rsc_free(struct rsc *rsci) 299 { 300 kfree(rsci->handle.data); 301 if (rsci->mechctx) 302 gss_delete_sec_context(&rsci->mechctx); 303 if (rsci->cred.cr_group_info) 304 put_group_info(rsci->cred.cr_group_info); 305 } 306 307 static void rsc_put(struct cache_head *item, struct cache_detail *cd) 308 { 309 struct rsc *rsci = container_of(item, struct rsc, h); 310 311 if (cache_put(item, cd)) { 312 rsc_free(rsci); 313 kfree(rsci); 314 } 315 } 316 317 static inline int 318 rsc_hash(struct rsc *rsci) 319 { 320 return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS); 321 } 322 323 static inline int 324 rsc_match(struct rsc *new, struct rsc *tmp) 325 { 326 return netobj_equal(&new->handle, &tmp->handle); 327 } 328 329 static inline void 330 rsc_init(struct rsc *new, struct rsc *tmp) 331 { 332 new->handle.len = tmp->handle.len; 333 tmp->handle.len = 0; 334 new->handle.data = tmp->handle.data; 335 tmp->handle.data = NULL; 336 new->mechctx = NULL; 337 new->cred.cr_group_info = NULL; 338 } 339 340 static inline void 341 rsc_update(struct rsc *new, struct rsc *tmp) 342 { 343 new->mechctx = tmp->mechctx; 344 tmp->mechctx = NULL; 345 memset(&new->seqdata, 0, sizeof(new->seqdata)); 346 spin_lock_init(&new->seqdata.sd_lock); 347 new->cred = tmp->cred; 348 tmp->cred.cr_group_info = NULL; 349 } 350 351 static int rsc_parse(struct cache_detail *cd, 352 char *mesg, int mlen) 353 { 354 /* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */ 355 char *buf = mesg; 356 int len, rv; 357 struct rsc rsci, *rscp = NULL; 358 time_t expiry; 359 int status = -EINVAL; 360 361 memset(&rsci, 0, sizeof(rsci)); 362 /* context handle */ 363 len = qword_get(&mesg, buf, mlen); 364 if (len < 0) goto out; 365 status = -ENOMEM; 366 if (dup_to_netobj(&rsci.handle, buf, len)) 367 goto out; 368 369 rsci.h.flags = 0; 370 /* expiry */ 371 expiry = get_expiry(&mesg); 372 status = -EINVAL; 373 if (expiry == 0) 374 goto out; 375 376 /* uid, or NEGATIVE */ 377 rv = get_int(&mesg, &rsci.cred.cr_uid); 378 if (rv == -EINVAL) 379 goto out; 380 if (rv == -ENOENT) 381 set_bit(CACHE_NEGATIVE, &rsci.h.flags); 382 else { 383 int N, i; 384 struct gss_api_mech *gm; 385 386 /* gid */ 387 if (get_int(&mesg, &rsci.cred.cr_gid)) 388 goto out; 389 390 /* number of additional gid's */ 391 if (get_int(&mesg, &N)) 392 goto out; 393 status = -ENOMEM; 394 rsci.cred.cr_group_info = groups_alloc(N); 395 if (rsci.cred.cr_group_info == NULL) 396 goto out; 397 398 /* gid's */ 399 status = -EINVAL; 400 for (i=0; i<N; i++) { 401 gid_t gid; 402 if (get_int(&mesg, &gid)) 403 goto out; 404 GROUP_AT(rsci.cred.cr_group_info, i) = gid; 405 } 406 407 /* mech name */ 408 len = qword_get(&mesg, buf, mlen); 409 if (len < 0) 410 goto out; 411 gm = gss_mech_get_by_name(buf); 412 status = -EOPNOTSUPP; 413 if (!gm) 414 goto out; 415 416 status = -EINVAL; 417 /* mech-specific data: */ 418 len = qword_get(&mesg, buf, mlen); 419 if (len < 0) { 420 gss_mech_put(gm); 421 goto out; 422 } 423 if (gss_import_sec_context(buf, len, gm, &rsci.mechctx)) { 424 gss_mech_put(gm); 425 goto out; 426 } 427 gss_mech_put(gm); 428 } 429 rsci.h.expiry_time = expiry; 430 rscp = rsc_lookup(&rsci, 1); 431 status = 0; 432 out: 433 rsc_free(&rsci); 434 if (rscp) 435 rsc_put(&rscp->h, &rsc_cache); 436 return status; 437 } 438 439 static struct cache_detail rsc_cache = { 440 .owner = THIS_MODULE, 441 .hash_size = RSC_HASHMAX, 442 .hash_table = rsc_table, 443 .name = "auth.rpcsec.context", 444 .cache_put = rsc_put, 445 .cache_parse = rsc_parse, 446 }; 447 448 static DefineSimpleCacheLookup(rsc, 0); 449 450 static struct rsc * 451 gss_svc_searchbyctx(struct xdr_netobj *handle) 452 { 453 struct rsc rsci; 454 struct rsc *found; 455 456 memset(&rsci, 0, sizeof(rsci)); 457 if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) 458 return NULL; 459 found = rsc_lookup(&rsci, 0); 460 rsc_free(&rsci); 461 if (!found) 462 return NULL; 463 if (cache_check(&rsc_cache, &found->h, NULL)) 464 return NULL; 465 return found; 466 } 467 468 /* Implements sequence number algorithm as specified in RFC 2203. */ 469 static int 470 gss_check_seq_num(struct rsc *rsci, int seq_num) 471 { 472 struct gss_svc_seq_data *sd = &rsci->seqdata; 473 474 spin_lock(&sd->sd_lock); 475 if (seq_num > sd->sd_max) { 476 if (seq_num >= sd->sd_max + GSS_SEQ_WIN) { 477 memset(sd->sd_win,0,sizeof(sd->sd_win)); 478 sd->sd_max = seq_num; 479 } else while (sd->sd_max < seq_num) { 480 sd->sd_max++; 481 __clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win); 482 } 483 __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win); 484 goto ok; 485 } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) { 486 goto drop; 487 } 488 /* sd_max - GSS_SEQ_WIN < seq_num <= sd_max */ 489 if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win)) 490 goto drop; 491 ok: 492 spin_unlock(&sd->sd_lock); 493 return 1; 494 drop: 495 spin_unlock(&sd->sd_lock); 496 return 0; 497 } 498 499 static inline u32 round_up_to_quad(u32 i) 500 { 501 return (i + 3 ) & ~3; 502 } 503 504 static inline int 505 svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o) 506 { 507 int l; 508 509 if (argv->iov_len < 4) 510 return -1; 511 o->len = ntohl(svc_getu32(argv)); 512 l = round_up_to_quad(o->len); 513 if (argv->iov_len < l) 514 return -1; 515 o->data = argv->iov_base; 516 argv->iov_base += l; 517 argv->iov_len -= l; 518 return 0; 519 } 520 521 static inline int 522 svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o) 523 { 524 u32 *p; 525 526 if (resv->iov_len + 4 > PAGE_SIZE) 527 return -1; 528 svc_putu32(resv, htonl(o->len)); 529 p = resv->iov_base + resv->iov_len; 530 resv->iov_len += round_up_to_quad(o->len); 531 if (resv->iov_len > PAGE_SIZE) 532 return -1; 533 memcpy(p, o->data, o->len); 534 memset((u8 *)p + o->len, 0, round_up_to_quad(o->len) - o->len); 535 return 0; 536 } 537 538 /* Verify the checksum on the header and return SVC_OK on success. 539 * Otherwise, return SVC_DROP (in the case of a bad sequence number) 540 * or return SVC_DENIED and indicate error in authp. 541 */ 542 static int 543 gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, 544 u32 *rpcstart, struct rpc_gss_wire_cred *gc, u32 *authp) 545 { 546 struct gss_ctx *ctx_id = rsci->mechctx; 547 struct xdr_buf rpchdr; 548 struct xdr_netobj checksum; 549 u32 flavor = 0; 550 struct kvec *argv = &rqstp->rq_arg.head[0]; 551 struct kvec iov; 552 553 /* data to compute the checksum over: */ 554 iov.iov_base = rpcstart; 555 iov.iov_len = (u8 *)argv->iov_base - (u8 *)rpcstart; 556 xdr_buf_from_iov(&iov, &rpchdr); 557 558 *authp = rpc_autherr_badverf; 559 if (argv->iov_len < 4) 560 return SVC_DENIED; 561 flavor = ntohl(svc_getu32(argv)); 562 if (flavor != RPC_AUTH_GSS) 563 return SVC_DENIED; 564 if (svc_safe_getnetobj(argv, &checksum)) 565 return SVC_DENIED; 566 567 if (rqstp->rq_deferred) /* skip verification of revisited request */ 568 return SVC_OK; 569 if (gss_verify_mic(ctx_id, &rpchdr, &checksum, NULL) 570 != GSS_S_COMPLETE) { 571 *authp = rpcsec_gsserr_credproblem; 572 return SVC_DENIED; 573 } 574 575 if (gc->gc_seq > MAXSEQ) { 576 dprintk("RPC: svcauth_gss: discarding request with large sequence number %d\n", 577 gc->gc_seq); 578 *authp = rpcsec_gsserr_ctxproblem; 579 return SVC_DENIED; 580 } 581 if (!gss_check_seq_num(rsci, gc->gc_seq)) { 582 dprintk("RPC: svcauth_gss: discarding request with old sequence number %d\n", 583 gc->gc_seq); 584 return SVC_DROP; 585 } 586 return SVC_OK; 587 } 588 589 static int 590 gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) 591 { 592 u32 xdr_seq; 593 u32 maj_stat; 594 struct xdr_buf verf_data; 595 struct xdr_netobj mic; 596 u32 *p; 597 struct kvec iov; 598 599 svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_GSS)); 600 xdr_seq = htonl(seq); 601 602 iov.iov_base = &xdr_seq; 603 iov.iov_len = sizeof(xdr_seq); 604 xdr_buf_from_iov(&iov, &verf_data); 605 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; 606 mic.data = (u8 *)(p + 1); 607 maj_stat = gss_get_mic(ctx_id, 0, &verf_data, &mic); 608 if (maj_stat != GSS_S_COMPLETE) 609 return -1; 610 *p++ = htonl(mic.len); 611 memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len); 612 p += XDR_QUADLEN(mic.len); 613 if (!xdr_ressize_check(rqstp, p)) 614 return -1; 615 return 0; 616 } 617 618 struct gss_domain { 619 struct auth_domain h; 620 u32 pseudoflavor; 621 }; 622 623 static struct auth_domain * 624 find_gss_auth_domain(struct gss_ctx *ctx, u32 svc) 625 { 626 char *name; 627 628 name = gss_service_to_auth_domain_name(ctx->mech_type, svc); 629 if (!name) 630 return NULL; 631 return auth_domain_find(name); 632 } 633 634 int 635 svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) 636 { 637 struct gss_domain *new; 638 struct auth_domain *test; 639 int stat = -ENOMEM; 640 641 new = kmalloc(sizeof(*new), GFP_KERNEL); 642 if (!new) 643 goto out; 644 cache_init(&new->h.h); 645 new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL); 646 if (!new->h.name) 647 goto out_free_dom; 648 strcpy(new->h.name, name); 649 new->h.flavour = RPC_AUTH_GSS; 650 new->pseudoflavor = pseudoflavor; 651 new->h.h.expiry_time = NEVER; 652 653 test = auth_domain_lookup(&new->h, 1); 654 if (test == &new->h) { 655 BUG_ON(atomic_dec_and_test(&new->h.h.refcnt)); 656 } else { /* XXX Duplicate registration? */ 657 auth_domain_put(&new->h); 658 goto out; 659 } 660 return 0; 661 662 out_free_dom: 663 kfree(new); 664 out: 665 return stat; 666 } 667 668 EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor); 669 670 static inline int 671 read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) 672 { 673 u32 raw; 674 int status; 675 676 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 677 if (status) 678 return status; 679 *obj = ntohl(raw); 680 return 0; 681 } 682 683 /* It would be nice if this bit of code could be shared with the client. 684 * Obstacles: 685 * The client shouldn't malloc(), would have to pass in own memory. 686 * The server uses base of head iovec as read pointer, while the 687 * client uses separate pointer. */ 688 static int 689 unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) 690 { 691 int stat = -EINVAL; 692 u32 integ_len, maj_stat; 693 struct xdr_netobj mic; 694 struct xdr_buf integ_buf; 695 696 integ_len = ntohl(svc_getu32(&buf->head[0])); 697 if (integ_len & 3) 698 goto out; 699 if (integ_len > buf->len) 700 goto out; 701 if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) 702 BUG(); 703 /* copy out mic... */ 704 if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) 705 BUG(); 706 if (mic.len > RPC_MAX_AUTH_SIZE) 707 goto out; 708 mic.data = kmalloc(mic.len, GFP_KERNEL); 709 if (!mic.data) 710 goto out; 711 if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) 712 goto out; 713 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic, NULL); 714 if (maj_stat != GSS_S_COMPLETE) 715 goto out; 716 if (ntohl(svc_getu32(&buf->head[0])) != seq) 717 goto out; 718 stat = 0; 719 out: 720 return stat; 721 } 722 723 struct gss_svc_data { 724 /* decoded gss client cred: */ 725 struct rpc_gss_wire_cred clcred; 726 /* pointer to the beginning of the procedure-specific results, 727 * which may be encrypted/checksummed in svcauth_gss_release: */ 728 u32 *body_start; 729 struct rsc *rsci; 730 }; 731 732 static int 733 svcauth_gss_set_client(struct svc_rqst *rqstp) 734 { 735 struct gss_svc_data *svcdata = rqstp->rq_auth_data; 736 struct rsc *rsci = svcdata->rsci; 737 struct rpc_gss_wire_cred *gc = &svcdata->clcred; 738 739 rqstp->rq_client = find_gss_auth_domain(rsci->mechctx, gc->gc_svc); 740 if (rqstp->rq_client == NULL) 741 return SVC_DENIED; 742 return SVC_OK; 743 } 744 745 /* 746 * Accept an rpcsec packet. 747 * If context establishment, punt to user space 748 * If data exchange, verify/decrypt 749 * If context destruction, handle here 750 * In the context establishment and destruction case we encode 751 * response here and return SVC_COMPLETE. 752 */ 753 static int 754 svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp) 755 { 756 struct kvec *argv = &rqstp->rq_arg.head[0]; 757 struct kvec *resv = &rqstp->rq_res.head[0]; 758 u32 crlen; 759 struct xdr_netobj tmpobj; 760 struct gss_svc_data *svcdata = rqstp->rq_auth_data; 761 struct rpc_gss_wire_cred *gc; 762 struct rsc *rsci = NULL; 763 struct rsi *rsip, rsikey; 764 u32 *rpcstart; 765 u32 *reject_stat = resv->iov_base + resv->iov_len; 766 int ret; 767 768 dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len); 769 770 *authp = rpc_autherr_badcred; 771 if (!svcdata) 772 svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL); 773 if (!svcdata) 774 goto auth_err; 775 rqstp->rq_auth_data = svcdata; 776 svcdata->body_start = NULL; 777 svcdata->rsci = NULL; 778 gc = &svcdata->clcred; 779 780 /* start of rpc packet is 7 u32's back from here: 781 * xid direction rpcversion prog vers proc flavour 782 */ 783 rpcstart = argv->iov_base; 784 rpcstart -= 7; 785 786 /* credential is: 787 * version(==1), proc(0,1,2,3), seq, service (1,2,3), handle 788 * at least 5 u32s, and is preceeded by length, so that makes 6. 789 */ 790 791 if (argv->iov_len < 5 * 4) 792 goto auth_err; 793 crlen = ntohl(svc_getu32(argv)); 794 if (ntohl(svc_getu32(argv)) != RPC_GSS_VERSION) 795 goto auth_err; 796 gc->gc_proc = ntohl(svc_getu32(argv)); 797 gc->gc_seq = ntohl(svc_getu32(argv)); 798 gc->gc_svc = ntohl(svc_getu32(argv)); 799 if (svc_safe_getnetobj(argv, &gc->gc_ctx)) 800 goto auth_err; 801 if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4) 802 goto auth_err; 803 804 if ((gc->gc_proc != RPC_GSS_PROC_DATA) && (rqstp->rq_proc != 0)) 805 goto auth_err; 806 807 /* 808 * We've successfully parsed the credential. Let's check out the 809 * verifier. An AUTH_NULL verifier is allowed (and required) for 810 * INIT and CONTINUE_INIT requests. AUTH_RPCSEC_GSS is required for 811 * PROC_DATA and PROC_DESTROY. 812 * 813 * AUTH_NULL verifier is 0 (AUTH_NULL), 0 (length). 814 * AUTH_RPCSEC_GSS verifier is: 815 * 6 (AUTH_RPCSEC_GSS), length, checksum. 816 * checksum is calculated over rpcheader from xid up to here. 817 */ 818 *authp = rpc_autherr_badverf; 819 switch (gc->gc_proc) { 820 case RPC_GSS_PROC_INIT: 821 case RPC_GSS_PROC_CONTINUE_INIT: 822 if (argv->iov_len < 2 * 4) 823 goto auth_err; 824 if (ntohl(svc_getu32(argv)) != RPC_AUTH_NULL) 825 goto auth_err; 826 if (ntohl(svc_getu32(argv)) != 0) 827 goto auth_err; 828 break; 829 case RPC_GSS_PROC_DATA: 830 case RPC_GSS_PROC_DESTROY: 831 *authp = rpcsec_gsserr_credproblem; 832 rsci = gss_svc_searchbyctx(&gc->gc_ctx); 833 if (!rsci) 834 goto auth_err; 835 switch (gss_verify_header(rqstp, rsci, rpcstart, gc, authp)) { 836 case SVC_OK: 837 break; 838 case SVC_DENIED: 839 goto auth_err; 840 case SVC_DROP: 841 goto drop; 842 } 843 break; 844 default: 845 *authp = rpc_autherr_rejectedcred; 846 goto auth_err; 847 } 848 849 /* now act upon the command: */ 850 switch (gc->gc_proc) { 851 case RPC_GSS_PROC_INIT: 852 case RPC_GSS_PROC_CONTINUE_INIT: 853 *authp = rpc_autherr_badcred; 854 if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0) 855 goto auth_err; 856 memset(&rsikey, 0, sizeof(rsikey)); 857 if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx)) 858 goto drop; 859 *authp = rpc_autherr_badverf; 860 if (svc_safe_getnetobj(argv, &tmpobj)) { 861 kfree(rsikey.in_handle.data); 862 goto auth_err; 863 } 864 if (dup_netobj(&rsikey.in_token, &tmpobj)) { 865 kfree(rsikey.in_handle.data); 866 goto drop; 867 } 868 869 rsip = rsi_lookup(&rsikey, 0); 870 rsi_free(&rsikey); 871 if (!rsip) { 872 goto drop; 873 } 874 switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) { 875 case -EAGAIN: 876 goto drop; 877 case -ENOENT: 878 goto drop; 879 case 0: 880 rsci = gss_svc_searchbyctx(&rsip->out_handle); 881 if (!rsci) { 882 goto drop; 883 } 884 if (gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN)) 885 goto drop; 886 if (resv->iov_len + 4 > PAGE_SIZE) 887 goto drop; 888 svc_putu32(resv, rpc_success); 889 if (svc_safe_putnetobj(resv, &rsip->out_handle)) 890 goto drop; 891 if (resv->iov_len + 3 * 4 > PAGE_SIZE) 892 goto drop; 893 svc_putu32(resv, htonl(rsip->major_status)); 894 svc_putu32(resv, htonl(rsip->minor_status)); 895 svc_putu32(resv, htonl(GSS_SEQ_WIN)); 896 if (svc_safe_putnetobj(resv, &rsip->out_token)) 897 goto drop; 898 rqstp->rq_client = NULL; 899 } 900 goto complete; 901 case RPC_GSS_PROC_DESTROY: 902 set_bit(CACHE_NEGATIVE, &rsci->h.flags); 903 if (resv->iov_len + 4 > PAGE_SIZE) 904 goto drop; 905 svc_putu32(resv, rpc_success); 906 goto complete; 907 case RPC_GSS_PROC_DATA: 908 *authp = rpcsec_gsserr_ctxproblem; 909 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) 910 goto auth_err; 911 rqstp->rq_cred = rsci->cred; 912 get_group_info(rsci->cred.cr_group_info); 913 *authp = rpc_autherr_badcred; 914 switch (gc->gc_svc) { 915 case RPC_GSS_SVC_NONE: 916 break; 917 case RPC_GSS_SVC_INTEGRITY: 918 if (unwrap_integ_data(&rqstp->rq_arg, 919 gc->gc_seq, rsci->mechctx)) 920 goto auth_err; 921 /* placeholders for length and seq. number: */ 922 svcdata->body_start = resv->iov_base + resv->iov_len; 923 svc_putu32(resv, 0); 924 svc_putu32(resv, 0); 925 break; 926 case RPC_GSS_SVC_PRIVACY: 927 /* currently unsupported */ 928 default: 929 goto auth_err; 930 } 931 svcdata->rsci = rsci; 932 cache_get(&rsci->h); 933 ret = SVC_OK; 934 goto out; 935 } 936 auth_err: 937 /* Restore write pointer to original value: */ 938 xdr_ressize_check(rqstp, reject_stat); 939 ret = SVC_DENIED; 940 goto out; 941 complete: 942 ret = SVC_COMPLETE; 943 goto out; 944 drop: 945 ret = SVC_DROP; 946 out: 947 if (rsci) 948 rsc_put(&rsci->h, &rsc_cache); 949 return ret; 950 } 951 952 static int 953 svcauth_gss_release(struct svc_rqst *rqstp) 954 { 955 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; 956 struct rpc_gss_wire_cred *gc = &gsd->clcred; 957 struct xdr_buf *resbuf = &rqstp->rq_res; 958 struct xdr_buf integ_buf; 959 struct xdr_netobj mic; 960 struct kvec *resv; 961 u32 *p; 962 int integ_offset, integ_len; 963 int stat = -EINVAL; 964 965 if (gc->gc_proc != RPC_GSS_PROC_DATA) 966 goto out; 967 /* Release can be called twice, but we only wrap once. */ 968 if (gsd->body_start == NULL) 969 goto out; 970 /* normally not set till svc_send, but we need it here: */ 971 resbuf->len = resbuf->head[0].iov_len 972 + resbuf->page_len + resbuf->tail[0].iov_len; 973 switch (gc->gc_svc) { 974 case RPC_GSS_SVC_NONE: 975 break; 976 case RPC_GSS_SVC_INTEGRITY: 977 p = gsd->body_start; 978 gsd->body_start = NULL; 979 /* move accept_stat to right place: */ 980 memcpy(p, p + 2, 4); 981 /* don't wrap in failure case: */ 982 /* Note: counting on not getting here if call was not even 983 * accepted! */ 984 if (*p != rpc_success) { 985 resbuf->head[0].iov_len -= 2 * 4; 986 goto out; 987 } 988 p++; 989 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; 990 integ_len = resbuf->len - integ_offset; 991 BUG_ON(integ_len % 4); 992 *p++ = htonl(integ_len); 993 *p++ = htonl(gc->gc_seq); 994 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, 995 integ_len)) 996 BUG(); 997 if (resbuf->page_len == 0 998 && resbuf->tail[0].iov_len + RPC_MAX_AUTH_SIZE 999 < PAGE_SIZE) { 1000 BUG_ON(resbuf->tail[0].iov_len); 1001 /* Use head for everything */ 1002 resv = &resbuf->head[0]; 1003 } else if (resbuf->tail[0].iov_base == NULL) { 1004 /* copied from nfsd4_encode_read */ 1005 svc_take_page(rqstp); 1006 resbuf->tail[0].iov_base = page_address(rqstp 1007 ->rq_respages[rqstp->rq_resused-1]); 1008 rqstp->rq_restailpage = rqstp->rq_resused-1; 1009 resbuf->tail[0].iov_len = 0; 1010 resv = &resbuf->tail[0]; 1011 } else { 1012 resv = &resbuf->tail[0]; 1013 } 1014 mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; 1015 if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic)) 1016 goto out_err; 1017 svc_putu32(resv, htonl(mic.len)); 1018 memset(mic.data + mic.len, 0, 1019 round_up_to_quad(mic.len) - mic.len); 1020 resv->iov_len += XDR_QUADLEN(mic.len) << 2; 1021 /* not strictly required: */ 1022 resbuf->len += XDR_QUADLEN(mic.len) << 2; 1023 BUG_ON(resv->iov_len > PAGE_SIZE); 1024 break; 1025 case RPC_GSS_SVC_PRIVACY: 1026 default: 1027 goto out_err; 1028 } 1029 1030 out: 1031 stat = 0; 1032 out_err: 1033 if (rqstp->rq_client) 1034 auth_domain_put(rqstp->rq_client); 1035 rqstp->rq_client = NULL; 1036 if (rqstp->rq_cred.cr_group_info) 1037 put_group_info(rqstp->rq_cred.cr_group_info); 1038 rqstp->rq_cred.cr_group_info = NULL; 1039 if (gsd->rsci) 1040 rsc_put(&gsd->rsci->h, &rsc_cache); 1041 gsd->rsci = NULL; 1042 1043 return stat; 1044 } 1045 1046 static void 1047 svcauth_gss_domain_release(struct auth_domain *dom) 1048 { 1049 struct gss_domain *gd = container_of(dom, struct gss_domain, h); 1050 1051 kfree(dom->name); 1052 kfree(gd); 1053 } 1054 1055 static struct auth_ops svcauthops_gss = { 1056 .name = "rpcsec_gss", 1057 .owner = THIS_MODULE, 1058 .flavour = RPC_AUTH_GSS, 1059 .accept = svcauth_gss_accept, 1060 .release = svcauth_gss_release, 1061 .domain_release = svcauth_gss_domain_release, 1062 .set_client = svcauth_gss_set_client, 1063 }; 1064 1065 int 1066 gss_svc_init(void) 1067 { 1068 int rv = svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss); 1069 if (rv == 0) { 1070 cache_register(&rsc_cache); 1071 cache_register(&rsi_cache); 1072 } 1073 return rv; 1074 } 1075 1076 void 1077 gss_svc_shutdown(void) 1078 { 1079 if (cache_unregister(&rsc_cache)) 1080 printk(KERN_ERR "auth_rpcgss: failed to unregister rsc cache\n"); 1081 if (cache_unregister(&rsi_cache)) 1082 printk(KERN_ERR "auth_rpcgss: failed to unregister rsi cache\n"); 1083 svc_auth_unregister(RPC_AUTH_GSS); 1084 } 1085