1 /* 2 * linux/net/sunrpc/svc.c 3 * 4 * High-level RPC service routines 5 * 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 7 * 8 * Multiple threads pools and NUMAisation 9 * Copyright (c) 2006 Silicon Graphics, Inc. 10 * by Greg Banks <gnb@melbourne.sgi.com> 11 */ 12 13 #include <linux/linkage.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/net.h> 17 #include <linux/in.h> 18 #include <linux/mm.h> 19 #include <linux/interrupt.h> 20 #include <linux/module.h> 21 #include <linux/kthread.h> 22 23 #include <linux/sunrpc/types.h> 24 #include <linux/sunrpc/xdr.h> 25 #include <linux/sunrpc/stats.h> 26 #include <linux/sunrpc/svcsock.h> 27 #include <linux/sunrpc/clnt.h> 28 29 #define RPCDBG_FACILITY RPCDBG_SVCDSP 30 31 static void svc_unregister(const struct svc_serv *serv); 32 33 #define svc_serv_is_pooled(serv) ((serv)->sv_function) 34 35 /* 36 * Mode for mapping cpus to pools. 37 */ 38 enum { 39 SVC_POOL_AUTO = -1, /* choose one of the others */ 40 SVC_POOL_GLOBAL, /* no mapping, just a single global pool 41 * (legacy & UP mode) */ 42 SVC_POOL_PERCPU, /* one pool per cpu */ 43 SVC_POOL_PERNODE /* one pool per numa node */ 44 }; 45 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL 46 47 /* 48 * Structure for mapping cpus to pools and vice versa. 49 * Setup once during sunrpc initialisation. 50 */ 51 static struct svc_pool_map { 52 int count; /* How many svc_servs use us */ 53 int mode; /* Note: int not enum to avoid 54 * warnings about "enumeration value 55 * not handled in switch" */ 56 unsigned int npools; 57 unsigned int *pool_to; /* maps pool id to cpu or node */ 58 unsigned int *to_pool; /* maps cpu or node to pool id */ 59 } svc_pool_map = { 60 .count = 0, 61 .mode = SVC_POOL_DEFAULT 62 }; 63 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ 64 65 static int 66 param_set_pool_mode(const char *val, struct kernel_param *kp) 67 { 68 int *ip = (int *)kp->arg; 69 struct svc_pool_map *m = &svc_pool_map; 70 int err; 71 72 mutex_lock(&svc_pool_map_mutex); 73 74 err = -EBUSY; 75 if (m->count) 76 goto out; 77 78 err = 0; 79 if (!strncmp(val, "auto", 4)) 80 *ip = SVC_POOL_AUTO; 81 else if (!strncmp(val, "global", 6)) 82 *ip = SVC_POOL_GLOBAL; 83 else if (!strncmp(val, "percpu", 6)) 84 *ip = SVC_POOL_PERCPU; 85 else if (!strncmp(val, "pernode", 7)) 86 *ip = SVC_POOL_PERNODE; 87 else 88 err = -EINVAL; 89 90 out: 91 mutex_unlock(&svc_pool_map_mutex); 92 return err; 93 } 94 95 static int 96 param_get_pool_mode(char *buf, struct kernel_param *kp) 97 { 98 int *ip = (int *)kp->arg; 99 100 switch (*ip) 101 { 102 case SVC_POOL_AUTO: 103 return strlcpy(buf, "auto", 20); 104 case SVC_POOL_GLOBAL: 105 return strlcpy(buf, "global", 20); 106 case SVC_POOL_PERCPU: 107 return strlcpy(buf, "percpu", 20); 108 case SVC_POOL_PERNODE: 109 return strlcpy(buf, "pernode", 20); 110 default: 111 return sprintf(buf, "%d", *ip); 112 } 113 } 114 115 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, 116 &svc_pool_map.mode, 0644); 117 118 /* 119 * Detect best pool mapping mode heuristically, 120 * according to the machine's topology. 121 */ 122 static int 123 svc_pool_map_choose_mode(void) 124 { 125 unsigned int node; 126 127 if (num_online_nodes() > 1) { 128 /* 129 * Actually have multiple NUMA nodes, 130 * so split pools on NUMA node boundaries 131 */ 132 return SVC_POOL_PERNODE; 133 } 134 135 node = any_online_node(node_online_map); 136 if (nr_cpus_node(node) > 2) { 137 /* 138 * Non-trivial SMP, or CONFIG_NUMA on 139 * non-NUMA hardware, e.g. with a generic 140 * x86_64 kernel on Xeons. In this case we 141 * want to divide the pools on cpu boundaries. 142 */ 143 return SVC_POOL_PERCPU; 144 } 145 146 /* default: one global pool */ 147 return SVC_POOL_GLOBAL; 148 } 149 150 /* 151 * Allocate the to_pool[] and pool_to[] arrays. 152 * Returns 0 on success or an errno. 153 */ 154 static int 155 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) 156 { 157 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 158 if (!m->to_pool) 159 goto fail; 160 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 161 if (!m->pool_to) 162 goto fail_free; 163 164 return 0; 165 166 fail_free: 167 kfree(m->to_pool); 168 fail: 169 return -ENOMEM; 170 } 171 172 /* 173 * Initialise the pool map for SVC_POOL_PERCPU mode. 174 * Returns number of pools or <0 on error. 175 */ 176 static int 177 svc_pool_map_init_percpu(struct svc_pool_map *m) 178 { 179 unsigned int maxpools = nr_cpu_ids; 180 unsigned int pidx = 0; 181 unsigned int cpu; 182 int err; 183 184 err = svc_pool_map_alloc_arrays(m, maxpools); 185 if (err) 186 return err; 187 188 for_each_online_cpu(cpu) { 189 BUG_ON(pidx > maxpools); 190 m->to_pool[cpu] = pidx; 191 m->pool_to[pidx] = cpu; 192 pidx++; 193 } 194 /* cpus brought online later all get mapped to pool0, sorry */ 195 196 return pidx; 197 }; 198 199 200 /* 201 * Initialise the pool map for SVC_POOL_PERNODE mode. 202 * Returns number of pools or <0 on error. 203 */ 204 static int 205 svc_pool_map_init_pernode(struct svc_pool_map *m) 206 { 207 unsigned int maxpools = nr_node_ids; 208 unsigned int pidx = 0; 209 unsigned int node; 210 int err; 211 212 err = svc_pool_map_alloc_arrays(m, maxpools); 213 if (err) 214 return err; 215 216 for_each_node_with_cpus(node) { 217 /* some architectures (e.g. SN2) have cpuless nodes */ 218 BUG_ON(pidx > maxpools); 219 m->to_pool[node] = pidx; 220 m->pool_to[pidx] = node; 221 pidx++; 222 } 223 /* nodes brought online later all get mapped to pool0, sorry */ 224 225 return pidx; 226 } 227 228 229 /* 230 * Add a reference to the global map of cpus to pools (and 231 * vice versa). Initialise the map if we're the first user. 232 * Returns the number of pools. 233 */ 234 static unsigned int 235 svc_pool_map_get(void) 236 { 237 struct svc_pool_map *m = &svc_pool_map; 238 int npools = -1; 239 240 mutex_lock(&svc_pool_map_mutex); 241 242 if (m->count++) { 243 mutex_unlock(&svc_pool_map_mutex); 244 return m->npools; 245 } 246 247 if (m->mode == SVC_POOL_AUTO) 248 m->mode = svc_pool_map_choose_mode(); 249 250 switch (m->mode) { 251 case SVC_POOL_PERCPU: 252 npools = svc_pool_map_init_percpu(m); 253 break; 254 case SVC_POOL_PERNODE: 255 npools = svc_pool_map_init_pernode(m); 256 break; 257 } 258 259 if (npools < 0) { 260 /* default, or memory allocation failure */ 261 npools = 1; 262 m->mode = SVC_POOL_GLOBAL; 263 } 264 m->npools = npools; 265 266 mutex_unlock(&svc_pool_map_mutex); 267 return m->npools; 268 } 269 270 271 /* 272 * Drop a reference to the global map of cpus to pools. 273 * When the last reference is dropped, the map data is 274 * freed; this allows the sysadmin to change the pool 275 * mode using the pool_mode module option without 276 * rebooting or re-loading sunrpc.ko. 277 */ 278 static void 279 svc_pool_map_put(void) 280 { 281 struct svc_pool_map *m = &svc_pool_map; 282 283 mutex_lock(&svc_pool_map_mutex); 284 285 if (!--m->count) { 286 m->mode = SVC_POOL_DEFAULT; 287 kfree(m->to_pool); 288 kfree(m->pool_to); 289 m->npools = 0; 290 } 291 292 mutex_unlock(&svc_pool_map_mutex); 293 } 294 295 296 /* 297 * Set the given thread's cpus_allowed mask so that it 298 * will only run on cpus in the given pool. 299 */ 300 static inline void 301 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) 302 { 303 struct svc_pool_map *m = &svc_pool_map; 304 unsigned int node = m->pool_to[pidx]; 305 306 /* 307 * The caller checks for sv_nrpools > 1, which 308 * implies that we've been initialized. 309 */ 310 BUG_ON(m->count == 0); 311 312 switch (m->mode) { 313 case SVC_POOL_PERCPU: 314 { 315 set_cpus_allowed_ptr(task, cpumask_of(node)); 316 break; 317 } 318 case SVC_POOL_PERNODE: 319 { 320 node_to_cpumask_ptr(nodecpumask, node); 321 set_cpus_allowed_ptr(task, nodecpumask); 322 break; 323 } 324 } 325 } 326 327 /* 328 * Use the mapping mode to choose a pool for a given CPU. 329 * Used when enqueueing an incoming RPC. Always returns 330 * a non-NULL pool pointer. 331 */ 332 struct svc_pool * 333 svc_pool_for_cpu(struct svc_serv *serv, int cpu) 334 { 335 struct svc_pool_map *m = &svc_pool_map; 336 unsigned int pidx = 0; 337 338 /* 339 * An uninitialised map happens in a pure client when 340 * lockd is brought up, so silently treat it the 341 * same as SVC_POOL_GLOBAL. 342 */ 343 if (svc_serv_is_pooled(serv)) { 344 switch (m->mode) { 345 case SVC_POOL_PERCPU: 346 pidx = m->to_pool[cpu]; 347 break; 348 case SVC_POOL_PERNODE: 349 pidx = m->to_pool[cpu_to_node(cpu)]; 350 break; 351 } 352 } 353 return &serv->sv_pools[pidx % serv->sv_nrpools]; 354 } 355 356 357 /* 358 * Create an RPC service 359 */ 360 static struct svc_serv * 361 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, 362 void (*shutdown)(struct svc_serv *serv)) 363 { 364 struct svc_serv *serv; 365 unsigned int vers; 366 unsigned int xdrsize; 367 unsigned int i; 368 369 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 370 return NULL; 371 serv->sv_name = prog->pg_name; 372 serv->sv_program = prog; 373 serv->sv_nrthreads = 1; 374 serv->sv_stats = prog->pg_stats; 375 if (bufsize > RPCSVC_MAXPAYLOAD) 376 bufsize = RPCSVC_MAXPAYLOAD; 377 serv->sv_max_payload = bufsize? bufsize : 4096; 378 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); 379 serv->sv_shutdown = shutdown; 380 xdrsize = 0; 381 while (prog) { 382 prog->pg_lovers = prog->pg_nvers-1; 383 for (vers=0; vers<prog->pg_nvers ; vers++) 384 if (prog->pg_vers[vers]) { 385 prog->pg_hivers = vers; 386 if (prog->pg_lovers > vers) 387 prog->pg_lovers = vers; 388 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 389 xdrsize = prog->pg_vers[vers]->vs_xdrsize; 390 } 391 prog = prog->pg_next; 392 } 393 serv->sv_xdrsize = xdrsize; 394 INIT_LIST_HEAD(&serv->sv_tempsocks); 395 INIT_LIST_HEAD(&serv->sv_permsocks); 396 init_timer(&serv->sv_temptimer); 397 spin_lock_init(&serv->sv_lock); 398 399 serv->sv_nrpools = npools; 400 serv->sv_pools = 401 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), 402 GFP_KERNEL); 403 if (!serv->sv_pools) { 404 kfree(serv); 405 return NULL; 406 } 407 408 for (i = 0; i < serv->sv_nrpools; i++) { 409 struct svc_pool *pool = &serv->sv_pools[i]; 410 411 dprintk("svc: initialising pool %u for %s\n", 412 i, serv->sv_name); 413 414 pool->sp_id = i; 415 INIT_LIST_HEAD(&pool->sp_threads); 416 INIT_LIST_HEAD(&pool->sp_sockets); 417 INIT_LIST_HEAD(&pool->sp_all_threads); 418 spin_lock_init(&pool->sp_lock); 419 } 420 421 /* Remove any stale portmap registrations */ 422 svc_unregister(serv); 423 424 return serv; 425 } 426 427 struct svc_serv * 428 svc_create(struct svc_program *prog, unsigned int bufsize, 429 void (*shutdown)(struct svc_serv *serv)) 430 { 431 return __svc_create(prog, bufsize, /*npools*/1, shutdown); 432 } 433 EXPORT_SYMBOL_GPL(svc_create); 434 435 struct svc_serv * 436 svc_create_pooled(struct svc_program *prog, unsigned int bufsize, 437 void (*shutdown)(struct svc_serv *serv), 438 svc_thread_fn func, struct module *mod) 439 { 440 struct svc_serv *serv; 441 unsigned int npools = svc_pool_map_get(); 442 443 serv = __svc_create(prog, bufsize, npools, shutdown); 444 445 if (serv != NULL) { 446 serv->sv_function = func; 447 serv->sv_module = mod; 448 } 449 450 return serv; 451 } 452 EXPORT_SYMBOL_GPL(svc_create_pooled); 453 454 /* 455 * Destroy an RPC service. Should be called with appropriate locking to 456 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks. 457 */ 458 void 459 svc_destroy(struct svc_serv *serv) 460 { 461 dprintk("svc: svc_destroy(%s, %d)\n", 462 serv->sv_program->pg_name, 463 serv->sv_nrthreads); 464 465 if (serv->sv_nrthreads) { 466 if (--(serv->sv_nrthreads) != 0) { 467 svc_sock_update_bufs(serv); 468 return; 469 } 470 } else 471 printk("svc_destroy: no threads for serv=%p!\n", serv); 472 473 del_timer_sync(&serv->sv_temptimer); 474 475 svc_close_all(&serv->sv_tempsocks); 476 477 if (serv->sv_shutdown) 478 serv->sv_shutdown(serv); 479 480 svc_close_all(&serv->sv_permsocks); 481 482 BUG_ON(!list_empty(&serv->sv_permsocks)); 483 BUG_ON(!list_empty(&serv->sv_tempsocks)); 484 485 cache_clean_deferred(serv); 486 487 if (svc_serv_is_pooled(serv)) 488 svc_pool_map_put(); 489 490 svc_unregister(serv); 491 kfree(serv->sv_pools); 492 kfree(serv); 493 } 494 EXPORT_SYMBOL_GPL(svc_destroy); 495 496 /* 497 * Allocate an RPC server's buffer space. 498 * We allocate pages and place them in rq_argpages. 499 */ 500 static int 501 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) 502 { 503 unsigned int pages, arghi; 504 505 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 506 * We assume one is at most one page 507 */ 508 arghi = 0; 509 BUG_ON(pages > RPCSVC_MAXPAGES); 510 while (pages) { 511 struct page *p = alloc_page(GFP_KERNEL); 512 if (!p) 513 break; 514 rqstp->rq_pages[arghi++] = p; 515 pages--; 516 } 517 return pages == 0; 518 } 519 520 /* 521 * Release an RPC server buffer 522 */ 523 static void 524 svc_release_buffer(struct svc_rqst *rqstp) 525 { 526 unsigned int i; 527 528 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) 529 if (rqstp->rq_pages[i]) 530 put_page(rqstp->rq_pages[i]); 531 } 532 533 struct svc_rqst * 534 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool) 535 { 536 struct svc_rqst *rqstp; 537 538 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 539 if (!rqstp) 540 goto out_enomem; 541 542 init_waitqueue_head(&rqstp->rq_wait); 543 544 serv->sv_nrthreads++; 545 spin_lock_bh(&pool->sp_lock); 546 pool->sp_nrthreads++; 547 list_add(&rqstp->rq_all, &pool->sp_all_threads); 548 spin_unlock_bh(&pool->sp_lock); 549 rqstp->rq_server = serv; 550 rqstp->rq_pool = pool; 551 552 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 553 if (!rqstp->rq_argp) 554 goto out_thread; 555 556 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 557 if (!rqstp->rq_resp) 558 goto out_thread; 559 560 if (!svc_init_buffer(rqstp, serv->sv_max_mesg)) 561 goto out_thread; 562 563 return rqstp; 564 out_thread: 565 svc_exit_thread(rqstp); 566 out_enomem: 567 return ERR_PTR(-ENOMEM); 568 } 569 EXPORT_SYMBOL_GPL(svc_prepare_thread); 570 571 /* 572 * Choose a pool in which to create a new thread, for svc_set_num_threads 573 */ 574 static inline struct svc_pool * 575 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 576 { 577 if (pool != NULL) 578 return pool; 579 580 return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 581 } 582 583 /* 584 * Choose a thread to kill, for svc_set_num_threads 585 */ 586 static inline struct task_struct * 587 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 588 { 589 unsigned int i; 590 struct task_struct *task = NULL; 591 592 if (pool != NULL) { 593 spin_lock_bh(&pool->sp_lock); 594 } else { 595 /* choose a pool in round-robin fashion */ 596 for (i = 0; i < serv->sv_nrpools; i++) { 597 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 598 spin_lock_bh(&pool->sp_lock); 599 if (!list_empty(&pool->sp_all_threads)) 600 goto found_pool; 601 spin_unlock_bh(&pool->sp_lock); 602 } 603 return NULL; 604 } 605 606 found_pool: 607 if (!list_empty(&pool->sp_all_threads)) { 608 struct svc_rqst *rqstp; 609 610 /* 611 * Remove from the pool->sp_all_threads list 612 * so we don't try to kill it again. 613 */ 614 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); 615 list_del_init(&rqstp->rq_all); 616 task = rqstp->rq_task; 617 } 618 spin_unlock_bh(&pool->sp_lock); 619 620 return task; 621 } 622 623 /* 624 * Create or destroy enough new threads to make the number 625 * of threads the given number. If `pool' is non-NULL, applies 626 * only to threads in that pool, otherwise round-robins between 627 * all pools. Must be called with a svc_get() reference and 628 * the BKL or another lock to protect access to svc_serv fields. 629 * 630 * Destroying threads relies on the service threads filling in 631 * rqstp->rq_task, which only the nfs ones do. Assumes the serv 632 * has been created using svc_create_pooled(). 633 * 634 * Based on code that used to be in nfsd_svc() but tweaked 635 * to be pool-aware. 636 */ 637 int 638 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 639 { 640 struct svc_rqst *rqstp; 641 struct task_struct *task; 642 struct svc_pool *chosen_pool; 643 int error = 0; 644 unsigned int state = serv->sv_nrthreads-1; 645 646 if (pool == NULL) { 647 /* The -1 assumes caller has done a svc_get() */ 648 nrservs -= (serv->sv_nrthreads-1); 649 } else { 650 spin_lock_bh(&pool->sp_lock); 651 nrservs -= pool->sp_nrthreads; 652 spin_unlock_bh(&pool->sp_lock); 653 } 654 655 /* create new threads */ 656 while (nrservs > 0) { 657 nrservs--; 658 chosen_pool = choose_pool(serv, pool, &state); 659 660 rqstp = svc_prepare_thread(serv, chosen_pool); 661 if (IS_ERR(rqstp)) { 662 error = PTR_ERR(rqstp); 663 break; 664 } 665 666 __module_get(serv->sv_module); 667 task = kthread_create(serv->sv_function, rqstp, serv->sv_name); 668 if (IS_ERR(task)) { 669 error = PTR_ERR(task); 670 module_put(serv->sv_module); 671 svc_exit_thread(rqstp); 672 break; 673 } 674 675 rqstp->rq_task = task; 676 if (serv->sv_nrpools > 1) 677 svc_pool_map_set_cpumask(task, chosen_pool->sp_id); 678 679 svc_sock_update_bufs(serv); 680 wake_up_process(task); 681 } 682 /* destroy old threads */ 683 while (nrservs < 0 && 684 (task = choose_victim(serv, pool, &state)) != NULL) { 685 send_sig(SIGINT, task, 1); 686 nrservs++; 687 } 688 689 return error; 690 } 691 EXPORT_SYMBOL_GPL(svc_set_num_threads); 692 693 /* 694 * Called from a server thread as it's exiting. Caller must hold the BKL or 695 * the "service mutex", whichever is appropriate for the service. 696 */ 697 void 698 svc_exit_thread(struct svc_rqst *rqstp) 699 { 700 struct svc_serv *serv = rqstp->rq_server; 701 struct svc_pool *pool = rqstp->rq_pool; 702 703 svc_release_buffer(rqstp); 704 kfree(rqstp->rq_resp); 705 kfree(rqstp->rq_argp); 706 kfree(rqstp->rq_auth_data); 707 708 spin_lock_bh(&pool->sp_lock); 709 pool->sp_nrthreads--; 710 list_del(&rqstp->rq_all); 711 spin_unlock_bh(&pool->sp_lock); 712 713 kfree(rqstp); 714 715 /* Release the server */ 716 if (serv) 717 svc_destroy(serv); 718 } 719 EXPORT_SYMBOL_GPL(svc_exit_thread); 720 721 /* 722 * Register an "inet" protocol family netid with the local 723 * rpcbind daemon via an rpcbind v4 SET request. 724 * 725 * No netconfig infrastructure is available in the kernel, so 726 * we map IP_ protocol numbers to netids by hand. 727 * 728 * Returns zero on success; a negative errno value is returned 729 * if any error occurs. 730 */ 731 static int __svc_rpcb_register4(const u32 program, const u32 version, 732 const unsigned short protocol, 733 const unsigned short port) 734 { 735 const struct sockaddr_in sin = { 736 .sin_family = AF_INET, 737 .sin_addr.s_addr = htonl(INADDR_ANY), 738 .sin_port = htons(port), 739 }; 740 const char *netid; 741 int error; 742 743 switch (protocol) { 744 case IPPROTO_UDP: 745 netid = RPCBIND_NETID_UDP; 746 break; 747 case IPPROTO_TCP: 748 netid = RPCBIND_NETID_TCP; 749 break; 750 default: 751 return -ENOPROTOOPT; 752 } 753 754 error = rpcb_v4_register(program, version, 755 (const struct sockaddr *)&sin, netid); 756 757 /* 758 * User space didn't support rpcbind v4, so retry this 759 * registration request with the legacy rpcbind v2 protocol. 760 */ 761 if (error == -EPROTONOSUPPORT) 762 error = rpcb_register(program, version, protocol, port); 763 764 return error; 765 } 766 767 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 768 /* 769 * Register an "inet6" protocol family netid with the local 770 * rpcbind daemon via an rpcbind v4 SET request. 771 * 772 * No netconfig infrastructure is available in the kernel, so 773 * we map IP_ protocol numbers to netids by hand. 774 * 775 * Returns zero on success; a negative errno value is returned 776 * if any error occurs. 777 */ 778 static int __svc_rpcb_register6(const u32 program, const u32 version, 779 const unsigned short protocol, 780 const unsigned short port) 781 { 782 const struct sockaddr_in6 sin6 = { 783 .sin6_family = AF_INET6, 784 .sin6_addr = IN6ADDR_ANY_INIT, 785 .sin6_port = htons(port), 786 }; 787 const char *netid; 788 int error; 789 790 switch (protocol) { 791 case IPPROTO_UDP: 792 netid = RPCBIND_NETID_UDP6; 793 break; 794 case IPPROTO_TCP: 795 netid = RPCBIND_NETID_TCP6; 796 break; 797 default: 798 return -ENOPROTOOPT; 799 } 800 801 error = rpcb_v4_register(program, version, 802 (const struct sockaddr *)&sin6, netid); 803 804 /* 805 * User space didn't support rpcbind version 4, so we won't 806 * use a PF_INET6 listener. 807 */ 808 if (error == -EPROTONOSUPPORT) 809 error = -EAFNOSUPPORT; 810 811 return error; 812 } 813 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 814 815 /* 816 * Register a kernel RPC service via rpcbind version 4. 817 * 818 * Returns zero on success; a negative errno value is returned 819 * if any error occurs. 820 */ 821 static int __svc_register(const char *progname, 822 const u32 program, const u32 version, 823 const int family, 824 const unsigned short protocol, 825 const unsigned short port) 826 { 827 int error = -EAFNOSUPPORT; 828 829 switch (family) { 830 case PF_INET: 831 error = __svc_rpcb_register4(program, version, 832 protocol, port); 833 break; 834 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 835 case PF_INET6: 836 error = __svc_rpcb_register6(program, version, 837 protocol, port); 838 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 839 } 840 841 if (error < 0) 842 printk(KERN_WARNING "svc: failed to register %sv%u RPC " 843 "service (errno %d).\n", progname, version, -error); 844 return error; 845 } 846 847 /** 848 * svc_register - register an RPC service with the local portmapper 849 * @serv: svc_serv struct for the service to register 850 * @family: protocol family of service's listener socket 851 * @proto: transport protocol number to advertise 852 * @port: port to advertise 853 * 854 * Service is registered for any address in the passed-in protocol family 855 */ 856 int svc_register(const struct svc_serv *serv, const int family, 857 const unsigned short proto, const unsigned short port) 858 { 859 struct svc_program *progp; 860 unsigned int i; 861 int error = 0; 862 863 BUG_ON(proto == 0 && port == 0); 864 865 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 866 for (i = 0; i < progp->pg_nvers; i++) { 867 if (progp->pg_vers[i] == NULL) 868 continue; 869 870 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n", 871 progp->pg_name, 872 i, 873 proto == IPPROTO_UDP? "udp" : "tcp", 874 port, 875 family, 876 progp->pg_vers[i]->vs_hidden? 877 " (but not telling portmap)" : ""); 878 879 if (progp->pg_vers[i]->vs_hidden) 880 continue; 881 882 error = __svc_register(progp->pg_name, progp->pg_prog, 883 i, family, proto, port); 884 if (error < 0) 885 break; 886 } 887 } 888 889 return error; 890 } 891 892 /* 893 * If user space is running rpcbind, it should take the v4 UNSET 894 * and clear everything for this [program, version]. If user space 895 * is running portmap, it will reject the v4 UNSET, but won't have 896 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient 897 * in this case to clear all existing entries for [program, version]. 898 */ 899 static void __svc_unregister(const u32 program, const u32 version, 900 const char *progname) 901 { 902 int error; 903 904 error = rpcb_v4_register(program, version, NULL, ""); 905 906 /* 907 * User space didn't support rpcbind v4, so retry this 908 * request with the legacy rpcbind v2 protocol. 909 */ 910 if (error == -EPROTONOSUPPORT) 911 error = rpcb_register(program, version, 0, 0); 912 913 dprintk("svc: %s(%sv%u), error %d\n", 914 __func__, progname, version, error); 915 } 916 917 /* 918 * All netids, bind addresses and ports registered for [program, version] 919 * are removed from the local rpcbind database (if the service is not 920 * hidden) to make way for a new instance of the service. 921 * 922 * The result of unregistration is reported via dprintk for those who want 923 * verification of the result, but is otherwise not important. 924 */ 925 static void svc_unregister(const struct svc_serv *serv) 926 { 927 struct svc_program *progp; 928 unsigned long flags; 929 unsigned int i; 930 931 clear_thread_flag(TIF_SIGPENDING); 932 933 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 934 for (i = 0; i < progp->pg_nvers; i++) { 935 if (progp->pg_vers[i] == NULL) 936 continue; 937 if (progp->pg_vers[i]->vs_hidden) 938 continue; 939 940 __svc_unregister(progp->pg_prog, i, progp->pg_name); 941 } 942 } 943 944 spin_lock_irqsave(¤t->sighand->siglock, flags); 945 recalc_sigpending(); 946 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 947 } 948 949 /* 950 * Printk the given error with the address of the client that caused it. 951 */ 952 static int 953 __attribute__ ((format (printf, 2, 3))) 954 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 955 { 956 va_list args; 957 int r; 958 char buf[RPC_MAX_ADDRBUFLEN]; 959 960 if (!net_ratelimit()) 961 return 0; 962 963 printk(KERN_WARNING "svc: %s: ", 964 svc_print_addr(rqstp, buf, sizeof(buf))); 965 966 va_start(args, fmt); 967 r = vprintk(fmt, args); 968 va_end(args); 969 970 return r; 971 } 972 973 /* 974 * Process the RPC request. 975 */ 976 int 977 svc_process(struct svc_rqst *rqstp) 978 { 979 struct svc_program *progp; 980 struct svc_version *versp = NULL; /* compiler food */ 981 struct svc_procedure *procp = NULL; 982 struct kvec * argv = &rqstp->rq_arg.head[0]; 983 struct kvec * resv = &rqstp->rq_res.head[0]; 984 struct svc_serv *serv = rqstp->rq_server; 985 kxdrproc_t xdr; 986 __be32 *statp; 987 u32 dir, prog, vers, proc; 988 __be32 auth_stat, rpc_stat; 989 int auth_res; 990 __be32 *reply_statp; 991 992 rpc_stat = rpc_success; 993 994 if (argv->iov_len < 6*4) 995 goto err_short_len; 996 997 /* setup response xdr_buf. 998 * Initially it has just one page 999 */ 1000 rqstp->rq_resused = 1; 1001 resv->iov_base = page_address(rqstp->rq_respages[0]); 1002 resv->iov_len = 0; 1003 rqstp->rq_res.pages = rqstp->rq_respages + 1; 1004 rqstp->rq_res.len = 0; 1005 rqstp->rq_res.page_base = 0; 1006 rqstp->rq_res.page_len = 0; 1007 rqstp->rq_res.buflen = PAGE_SIZE; 1008 rqstp->rq_res.tail[0].iov_base = NULL; 1009 rqstp->rq_res.tail[0].iov_len = 0; 1010 /* Will be turned off only in gss privacy case: */ 1011 rqstp->rq_splice_ok = 1; 1012 1013 /* Setup reply header */ 1014 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); 1015 1016 rqstp->rq_xid = svc_getu32(argv); 1017 svc_putu32(resv, rqstp->rq_xid); 1018 1019 dir = svc_getnl(argv); 1020 vers = svc_getnl(argv); 1021 1022 /* First words of reply: */ 1023 svc_putnl(resv, 1); /* REPLY */ 1024 1025 if (dir != 0) /* direction != CALL */ 1026 goto err_bad_dir; 1027 if (vers != 2) /* RPC version number */ 1028 goto err_bad_rpc; 1029 1030 /* Save position in case we later decide to reject: */ 1031 reply_statp = resv->iov_base + resv->iov_len; 1032 1033 svc_putnl(resv, 0); /* ACCEPT */ 1034 1035 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */ 1036 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */ 1037 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */ 1038 1039 progp = serv->sv_program; 1040 1041 for (progp = serv->sv_program; progp; progp = progp->pg_next) 1042 if (prog == progp->pg_prog) 1043 break; 1044 1045 /* 1046 * Decode auth data, and add verifier to reply buffer. 1047 * We do this before anything else in order to get a decent 1048 * auth verifier. 1049 */ 1050 auth_res = svc_authenticate(rqstp, &auth_stat); 1051 /* Also give the program a chance to reject this call: */ 1052 if (auth_res == SVC_OK && progp) { 1053 auth_stat = rpc_autherr_badcred; 1054 auth_res = progp->pg_authenticate(rqstp); 1055 } 1056 switch (auth_res) { 1057 case SVC_OK: 1058 break; 1059 case SVC_GARBAGE: 1060 goto err_garbage; 1061 case SVC_SYSERR: 1062 rpc_stat = rpc_system_err; 1063 goto err_bad; 1064 case SVC_DENIED: 1065 goto err_bad_auth; 1066 case SVC_DROP: 1067 goto dropit; 1068 case SVC_COMPLETE: 1069 goto sendit; 1070 } 1071 1072 if (progp == NULL) 1073 goto err_bad_prog; 1074 1075 if (vers >= progp->pg_nvers || 1076 !(versp = progp->pg_vers[vers])) 1077 goto err_bad_vers; 1078 1079 procp = versp->vs_proc + proc; 1080 if (proc >= versp->vs_nproc || !procp->pc_func) 1081 goto err_bad_proc; 1082 rqstp->rq_server = serv; 1083 rqstp->rq_procinfo = procp; 1084 1085 /* Syntactic check complete */ 1086 serv->sv_stats->rpccnt++; 1087 1088 /* Build the reply header. */ 1089 statp = resv->iov_base +resv->iov_len; 1090 svc_putnl(resv, RPC_SUCCESS); 1091 1092 /* Bump per-procedure stats counter */ 1093 procp->pc_count++; 1094 1095 /* Initialize storage for argp and resp */ 1096 memset(rqstp->rq_argp, 0, procp->pc_argsize); 1097 memset(rqstp->rq_resp, 0, procp->pc_ressize); 1098 1099 /* un-reserve some of the out-queue now that we have a 1100 * better idea of reply size 1101 */ 1102 if (procp->pc_xdrressize) 1103 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); 1104 1105 /* Call the function that processes the request. */ 1106 if (!versp->vs_dispatch) { 1107 /* Decode arguments */ 1108 xdr = procp->pc_decode; 1109 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp)) 1110 goto err_garbage; 1111 1112 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); 1113 1114 /* Encode reply */ 1115 if (*statp == rpc_drop_reply) { 1116 if (procp->pc_release) 1117 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1118 goto dropit; 1119 } 1120 if (*statp == rpc_success && (xdr = procp->pc_encode) 1121 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { 1122 dprintk("svc: failed to encode reply\n"); 1123 /* serv->sv_stats->rpcsystemerr++; */ 1124 *statp = rpc_system_err; 1125 } 1126 } else { 1127 dprintk("svc: calling dispatcher\n"); 1128 if (!versp->vs_dispatch(rqstp, statp)) { 1129 /* Release reply info */ 1130 if (procp->pc_release) 1131 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1132 goto dropit; 1133 } 1134 } 1135 1136 /* Check RPC status result */ 1137 if (*statp != rpc_success) 1138 resv->iov_len = ((void*)statp) - resv->iov_base + 4; 1139 1140 /* Release reply info */ 1141 if (procp->pc_release) 1142 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1143 1144 if (procp->pc_encode == NULL) 1145 goto dropit; 1146 1147 sendit: 1148 if (svc_authorise(rqstp)) 1149 goto dropit; 1150 return svc_send(rqstp); 1151 1152 dropit: 1153 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1154 dprintk("svc: svc_process dropit\n"); 1155 svc_drop(rqstp); 1156 return 0; 1157 1158 err_short_len: 1159 svc_printk(rqstp, "short len %Zd, dropping request\n", 1160 argv->iov_len); 1161 1162 goto dropit; /* drop request */ 1163 1164 err_bad_dir: 1165 svc_printk(rqstp, "bad direction %d, dropping request\n", dir); 1166 1167 serv->sv_stats->rpcbadfmt++; 1168 goto dropit; /* drop request */ 1169 1170 err_bad_rpc: 1171 serv->sv_stats->rpcbadfmt++; 1172 svc_putnl(resv, 1); /* REJECT */ 1173 svc_putnl(resv, 0); /* RPC_MISMATCH */ 1174 svc_putnl(resv, 2); /* Only RPCv2 supported */ 1175 svc_putnl(resv, 2); 1176 goto sendit; 1177 1178 err_bad_auth: 1179 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat)); 1180 serv->sv_stats->rpcbadauth++; 1181 /* Restore write pointer to location of accept status: */ 1182 xdr_ressize_check(rqstp, reply_statp); 1183 svc_putnl(resv, 1); /* REJECT */ 1184 svc_putnl(resv, 1); /* AUTH_ERROR */ 1185 svc_putnl(resv, ntohl(auth_stat)); /* status */ 1186 goto sendit; 1187 1188 err_bad_prog: 1189 dprintk("svc: unknown program %d\n", prog); 1190 serv->sv_stats->rpcbadfmt++; 1191 svc_putnl(resv, RPC_PROG_UNAVAIL); 1192 goto sendit; 1193 1194 err_bad_vers: 1195 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", 1196 vers, prog, progp->pg_name); 1197 1198 serv->sv_stats->rpcbadfmt++; 1199 svc_putnl(resv, RPC_PROG_MISMATCH); 1200 svc_putnl(resv, progp->pg_lovers); 1201 svc_putnl(resv, progp->pg_hivers); 1202 goto sendit; 1203 1204 err_bad_proc: 1205 svc_printk(rqstp, "unknown procedure (%d)\n", proc); 1206 1207 serv->sv_stats->rpcbadfmt++; 1208 svc_putnl(resv, RPC_PROC_UNAVAIL); 1209 goto sendit; 1210 1211 err_garbage: 1212 svc_printk(rqstp, "failed to decode args\n"); 1213 1214 rpc_stat = rpc_garbage_args; 1215 err_bad: 1216 serv->sv_stats->rpcbadfmt++; 1217 svc_putnl(resv, ntohl(rpc_stat)); 1218 goto sendit; 1219 } 1220 EXPORT_SYMBOL_GPL(svc_process); 1221 1222 /* 1223 * Return (transport-specific) limit on the rpc payload. 1224 */ 1225 u32 svc_max_payload(const struct svc_rqst *rqstp) 1226 { 1227 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; 1228 1229 if (rqstp->rq_server->sv_max_payload < max) 1230 max = rqstp->rq_server->sv_max_payload; 1231 return max; 1232 } 1233 EXPORT_SYMBOL_GPL(svc_max_payload); 1234