1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/svc.c 4 * 5 * High-level RPC service routines 6 * 7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 * 9 * Multiple threads pools and NUMAisation 10 * Copyright (c) 2006 Silicon Graphics, Inc. 11 * by Greg Banks <gnb@melbourne.sgi.com> 12 */ 13 14 #include <linux/linkage.h> 15 #include <linux/sched/signal.h> 16 #include <linux/errno.h> 17 #include <linux/net.h> 18 #include <linux/in.h> 19 #include <linux/mm.h> 20 #include <linux/interrupt.h> 21 #include <linux/module.h> 22 #include <linux/kthread.h> 23 #include <linux/slab.h> 24 25 #include <linux/sunrpc/types.h> 26 #include <linux/sunrpc/xdr.h> 27 #include <linux/sunrpc/stats.h> 28 #include <linux/sunrpc/svcsock.h> 29 #include <linux/sunrpc/clnt.h> 30 #include <linux/sunrpc/bc_xprt.h> 31 32 #include <trace/events/sunrpc.h> 33 34 #include "fail.h" 35 36 #define RPCDBG_FACILITY RPCDBG_SVCDSP 37 38 static void svc_unregister(const struct svc_serv *serv, struct net *net); 39 40 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL 41 42 /* 43 * Mode for mapping cpus to pools. 44 */ 45 enum { 46 SVC_POOL_AUTO = -1, /* choose one of the others */ 47 SVC_POOL_GLOBAL, /* no mapping, just a single global pool 48 * (legacy & UP mode) */ 49 SVC_POOL_PERCPU, /* one pool per cpu */ 50 SVC_POOL_PERNODE /* one pool per numa node */ 51 }; 52 53 /* 54 * Structure for mapping cpus to pools and vice versa. 55 * Setup once during sunrpc initialisation. 56 */ 57 58 struct svc_pool_map { 59 int count; /* How many svc_servs use us */ 60 int mode; /* Note: int not enum to avoid 61 * warnings about "enumeration value 62 * not handled in switch" */ 63 unsigned int npools; 64 unsigned int *pool_to; /* maps pool id to cpu or node */ 65 unsigned int *to_pool; /* maps cpu or node to pool id */ 66 }; 67 68 static struct svc_pool_map svc_pool_map = { 69 .mode = SVC_POOL_DEFAULT 70 }; 71 72 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ 73 74 static int 75 param_set_pool_mode(const char *val, const struct kernel_param *kp) 76 { 77 int *ip = (int *)kp->arg; 78 struct svc_pool_map *m = &svc_pool_map; 79 int err; 80 81 mutex_lock(&svc_pool_map_mutex); 82 83 err = -EBUSY; 84 if (m->count) 85 goto out; 86 87 err = 0; 88 if (!strncmp(val, "auto", 4)) 89 *ip = SVC_POOL_AUTO; 90 else if (!strncmp(val, "global", 6)) 91 *ip = SVC_POOL_GLOBAL; 92 else if (!strncmp(val, "percpu", 6)) 93 *ip = SVC_POOL_PERCPU; 94 else if (!strncmp(val, "pernode", 7)) 95 *ip = SVC_POOL_PERNODE; 96 else 97 err = -EINVAL; 98 99 out: 100 mutex_unlock(&svc_pool_map_mutex); 101 return err; 102 } 103 104 static int 105 param_get_pool_mode(char *buf, const struct kernel_param *kp) 106 { 107 int *ip = (int *)kp->arg; 108 109 switch (*ip) 110 { 111 case SVC_POOL_AUTO: 112 return sysfs_emit(buf, "auto\n"); 113 case SVC_POOL_GLOBAL: 114 return sysfs_emit(buf, "global\n"); 115 case SVC_POOL_PERCPU: 116 return sysfs_emit(buf, "percpu\n"); 117 case SVC_POOL_PERNODE: 118 return sysfs_emit(buf, "pernode\n"); 119 default: 120 return sysfs_emit(buf, "%d\n", *ip); 121 } 122 } 123 124 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, 125 &svc_pool_map.mode, 0644); 126 127 /* 128 * Detect best pool mapping mode heuristically, 129 * according to the machine's topology. 130 */ 131 static int 132 svc_pool_map_choose_mode(void) 133 { 134 unsigned int node; 135 136 if (nr_online_nodes > 1) { 137 /* 138 * Actually have multiple NUMA nodes, 139 * so split pools on NUMA node boundaries 140 */ 141 return SVC_POOL_PERNODE; 142 } 143 144 node = first_online_node; 145 if (nr_cpus_node(node) > 2) { 146 /* 147 * Non-trivial SMP, or CONFIG_NUMA on 148 * non-NUMA hardware, e.g. with a generic 149 * x86_64 kernel on Xeons. In this case we 150 * want to divide the pools on cpu boundaries. 151 */ 152 return SVC_POOL_PERCPU; 153 } 154 155 /* default: one global pool */ 156 return SVC_POOL_GLOBAL; 157 } 158 159 /* 160 * Allocate the to_pool[] and pool_to[] arrays. 161 * Returns 0 on success or an errno. 162 */ 163 static int 164 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) 165 { 166 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 167 if (!m->to_pool) 168 goto fail; 169 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 170 if (!m->pool_to) 171 goto fail_free; 172 173 return 0; 174 175 fail_free: 176 kfree(m->to_pool); 177 m->to_pool = NULL; 178 fail: 179 return -ENOMEM; 180 } 181 182 /* 183 * Initialise the pool map for SVC_POOL_PERCPU mode. 184 * Returns number of pools or <0 on error. 185 */ 186 static int 187 svc_pool_map_init_percpu(struct svc_pool_map *m) 188 { 189 unsigned int maxpools = nr_cpu_ids; 190 unsigned int pidx = 0; 191 unsigned int cpu; 192 int err; 193 194 err = svc_pool_map_alloc_arrays(m, maxpools); 195 if (err) 196 return err; 197 198 for_each_online_cpu(cpu) { 199 BUG_ON(pidx >= maxpools); 200 m->to_pool[cpu] = pidx; 201 m->pool_to[pidx] = cpu; 202 pidx++; 203 } 204 /* cpus brought online later all get mapped to pool0, sorry */ 205 206 return pidx; 207 }; 208 209 210 /* 211 * Initialise the pool map for SVC_POOL_PERNODE mode. 212 * Returns number of pools or <0 on error. 213 */ 214 static int 215 svc_pool_map_init_pernode(struct svc_pool_map *m) 216 { 217 unsigned int maxpools = nr_node_ids; 218 unsigned int pidx = 0; 219 unsigned int node; 220 int err; 221 222 err = svc_pool_map_alloc_arrays(m, maxpools); 223 if (err) 224 return err; 225 226 for_each_node_with_cpus(node) { 227 /* some architectures (e.g. SN2) have cpuless nodes */ 228 BUG_ON(pidx > maxpools); 229 m->to_pool[node] = pidx; 230 m->pool_to[pidx] = node; 231 pidx++; 232 } 233 /* nodes brought online later all get mapped to pool0, sorry */ 234 235 return pidx; 236 } 237 238 239 /* 240 * Add a reference to the global map of cpus to pools (and 241 * vice versa) if pools are in use. 242 * Initialise the map if we're the first user. 243 * Returns the number of pools. If this is '1', no reference 244 * was taken. 245 */ 246 static unsigned int 247 svc_pool_map_get(void) 248 { 249 struct svc_pool_map *m = &svc_pool_map; 250 int npools = -1; 251 252 mutex_lock(&svc_pool_map_mutex); 253 254 if (m->count++) { 255 mutex_unlock(&svc_pool_map_mutex); 256 WARN_ON_ONCE(m->npools <= 1); 257 return m->npools; 258 } 259 260 if (m->mode == SVC_POOL_AUTO) 261 m->mode = svc_pool_map_choose_mode(); 262 263 switch (m->mode) { 264 case SVC_POOL_PERCPU: 265 npools = svc_pool_map_init_percpu(m); 266 break; 267 case SVC_POOL_PERNODE: 268 npools = svc_pool_map_init_pernode(m); 269 break; 270 } 271 272 if (npools <= 0) { 273 /* default, or memory allocation failure */ 274 npools = 1; 275 m->mode = SVC_POOL_GLOBAL; 276 } 277 m->npools = npools; 278 279 if (npools == 1) 280 /* service is unpooled, so doesn't hold a reference */ 281 m->count--; 282 283 mutex_unlock(&svc_pool_map_mutex); 284 return npools; 285 } 286 287 /* 288 * Drop a reference to the global map of cpus to pools, if 289 * pools were in use, i.e. if npools > 1. 290 * When the last reference is dropped, the map data is 291 * freed; this allows the sysadmin to change the pool 292 * mode using the pool_mode module option without 293 * rebooting or re-loading sunrpc.ko. 294 */ 295 static void 296 svc_pool_map_put(int npools) 297 { 298 struct svc_pool_map *m = &svc_pool_map; 299 300 if (npools <= 1) 301 return; 302 mutex_lock(&svc_pool_map_mutex); 303 304 if (!--m->count) { 305 kfree(m->to_pool); 306 m->to_pool = NULL; 307 kfree(m->pool_to); 308 m->pool_to = NULL; 309 m->npools = 0; 310 } 311 312 mutex_unlock(&svc_pool_map_mutex); 313 } 314 315 static int svc_pool_map_get_node(unsigned int pidx) 316 { 317 const struct svc_pool_map *m = &svc_pool_map; 318 319 if (m->count) { 320 if (m->mode == SVC_POOL_PERCPU) 321 return cpu_to_node(m->pool_to[pidx]); 322 if (m->mode == SVC_POOL_PERNODE) 323 return m->pool_to[pidx]; 324 } 325 return NUMA_NO_NODE; 326 } 327 /* 328 * Set the given thread's cpus_allowed mask so that it 329 * will only run on cpus in the given pool. 330 */ 331 static inline void 332 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) 333 { 334 struct svc_pool_map *m = &svc_pool_map; 335 unsigned int node = m->pool_to[pidx]; 336 337 /* 338 * The caller checks for sv_nrpools > 1, which 339 * implies that we've been initialized. 340 */ 341 WARN_ON_ONCE(m->count == 0); 342 if (m->count == 0) 343 return; 344 345 switch (m->mode) { 346 case SVC_POOL_PERCPU: 347 { 348 set_cpus_allowed_ptr(task, cpumask_of(node)); 349 break; 350 } 351 case SVC_POOL_PERNODE: 352 { 353 set_cpus_allowed_ptr(task, cpumask_of_node(node)); 354 break; 355 } 356 } 357 } 358 359 /** 360 * svc_pool_for_cpu - Select pool to run a thread on this cpu 361 * @serv: An RPC service 362 * 363 * Use the active CPU and the svc_pool_map's mode setting to 364 * select the svc thread pool to use. Once initialized, the 365 * svc_pool_map does not change. 366 * 367 * Return value: 368 * A pointer to an svc_pool 369 */ 370 struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv) 371 { 372 struct svc_pool_map *m = &svc_pool_map; 373 int cpu = raw_smp_processor_id(); 374 unsigned int pidx = 0; 375 376 if (serv->sv_nrpools <= 1) 377 return serv->sv_pools; 378 379 switch (m->mode) { 380 case SVC_POOL_PERCPU: 381 pidx = m->to_pool[cpu]; 382 break; 383 case SVC_POOL_PERNODE: 384 pidx = m->to_pool[cpu_to_node(cpu)]; 385 break; 386 } 387 388 return &serv->sv_pools[pidx % serv->sv_nrpools]; 389 } 390 391 int svc_rpcb_setup(struct svc_serv *serv, struct net *net) 392 { 393 int err; 394 395 err = rpcb_create_local(net); 396 if (err) 397 return err; 398 399 /* Remove any stale portmap registrations */ 400 svc_unregister(serv, net); 401 return 0; 402 } 403 EXPORT_SYMBOL_GPL(svc_rpcb_setup); 404 405 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) 406 { 407 svc_unregister(serv, net); 408 rpcb_put_local(net); 409 } 410 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup); 411 412 static int svc_uses_rpcbind(struct svc_serv *serv) 413 { 414 struct svc_program *progp; 415 unsigned int i; 416 417 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 418 for (i = 0; i < progp->pg_nvers; i++) { 419 if (progp->pg_vers[i] == NULL) 420 continue; 421 if (!progp->pg_vers[i]->vs_hidden) 422 return 1; 423 } 424 } 425 426 return 0; 427 } 428 429 int svc_bind(struct svc_serv *serv, struct net *net) 430 { 431 if (!svc_uses_rpcbind(serv)) 432 return 0; 433 return svc_rpcb_setup(serv, net); 434 } 435 EXPORT_SYMBOL_GPL(svc_bind); 436 437 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 438 static void 439 __svc_init_bc(struct svc_serv *serv) 440 { 441 lwq_init(&serv->sv_cb_list); 442 } 443 #else 444 static void 445 __svc_init_bc(struct svc_serv *serv) 446 { 447 } 448 #endif 449 450 /* 451 * Create an RPC service 452 */ 453 static struct svc_serv * 454 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, 455 int (*threadfn)(void *data)) 456 { 457 struct svc_serv *serv; 458 unsigned int vers; 459 unsigned int xdrsize; 460 unsigned int i; 461 462 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 463 return NULL; 464 serv->sv_name = prog->pg_name; 465 serv->sv_program = prog; 466 kref_init(&serv->sv_refcnt); 467 serv->sv_stats = prog->pg_stats; 468 if (bufsize > RPCSVC_MAXPAYLOAD) 469 bufsize = RPCSVC_MAXPAYLOAD; 470 serv->sv_max_payload = bufsize? bufsize : 4096; 471 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); 472 serv->sv_threadfn = threadfn; 473 xdrsize = 0; 474 while (prog) { 475 prog->pg_lovers = prog->pg_nvers-1; 476 for (vers=0; vers<prog->pg_nvers ; vers++) 477 if (prog->pg_vers[vers]) { 478 prog->pg_hivers = vers; 479 if (prog->pg_lovers > vers) 480 prog->pg_lovers = vers; 481 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 482 xdrsize = prog->pg_vers[vers]->vs_xdrsize; 483 } 484 prog = prog->pg_next; 485 } 486 serv->sv_xdrsize = xdrsize; 487 INIT_LIST_HEAD(&serv->sv_tempsocks); 488 INIT_LIST_HEAD(&serv->sv_permsocks); 489 timer_setup(&serv->sv_temptimer, NULL, 0); 490 spin_lock_init(&serv->sv_lock); 491 492 __svc_init_bc(serv); 493 494 serv->sv_nrpools = npools; 495 serv->sv_pools = 496 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), 497 GFP_KERNEL); 498 if (!serv->sv_pools) { 499 kfree(serv); 500 return NULL; 501 } 502 503 for (i = 0; i < serv->sv_nrpools; i++) { 504 struct svc_pool *pool = &serv->sv_pools[i]; 505 506 dprintk("svc: initialising pool %u for %s\n", 507 i, serv->sv_name); 508 509 pool->sp_id = i; 510 lwq_init(&pool->sp_xprts); 511 INIT_LIST_HEAD(&pool->sp_all_threads); 512 init_llist_head(&pool->sp_idle_threads); 513 514 percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); 515 percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); 516 percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); 517 } 518 519 return serv; 520 } 521 522 /** 523 * svc_create - Create an RPC service 524 * @prog: the RPC program the new service will handle 525 * @bufsize: maximum message size for @prog 526 * @threadfn: a function to service RPC requests for @prog 527 * 528 * Returns an instantiated struct svc_serv object or NULL. 529 */ 530 struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, 531 int (*threadfn)(void *data)) 532 { 533 return __svc_create(prog, bufsize, 1, threadfn); 534 } 535 EXPORT_SYMBOL_GPL(svc_create); 536 537 /** 538 * svc_create_pooled - Create an RPC service with pooled threads 539 * @prog: the RPC program the new service will handle 540 * @bufsize: maximum message size for @prog 541 * @threadfn: a function to service RPC requests for @prog 542 * 543 * Returns an instantiated struct svc_serv object or NULL. 544 */ 545 struct svc_serv *svc_create_pooled(struct svc_program *prog, 546 unsigned int bufsize, 547 int (*threadfn)(void *data)) 548 { 549 struct svc_serv *serv; 550 unsigned int npools = svc_pool_map_get(); 551 552 serv = __svc_create(prog, bufsize, npools, threadfn); 553 if (!serv) 554 goto out_err; 555 return serv; 556 out_err: 557 svc_pool_map_put(npools); 558 return NULL; 559 } 560 EXPORT_SYMBOL_GPL(svc_create_pooled); 561 562 /* 563 * Destroy an RPC service. Should be called with appropriate locking to 564 * protect sv_permsocks and sv_tempsocks. 565 */ 566 void 567 svc_destroy(struct kref *ref) 568 { 569 struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt); 570 unsigned int i; 571 572 dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name); 573 timer_shutdown_sync(&serv->sv_temptimer); 574 575 /* 576 * Remaining transports at this point are not expected. 577 */ 578 WARN_ONCE(!list_empty(&serv->sv_permsocks), 579 "SVC: permsocks remain for %s\n", serv->sv_program->pg_name); 580 WARN_ONCE(!list_empty(&serv->sv_tempsocks), 581 "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name); 582 583 cache_clean_deferred(serv); 584 585 svc_pool_map_put(serv->sv_nrpools); 586 587 for (i = 0; i < serv->sv_nrpools; i++) { 588 struct svc_pool *pool = &serv->sv_pools[i]; 589 590 percpu_counter_destroy(&pool->sp_messages_arrived); 591 percpu_counter_destroy(&pool->sp_sockets_queued); 592 percpu_counter_destroy(&pool->sp_threads_woken); 593 } 594 kfree(serv->sv_pools); 595 kfree(serv); 596 } 597 EXPORT_SYMBOL_GPL(svc_destroy); 598 599 static bool 600 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) 601 { 602 unsigned long pages, ret; 603 604 /* bc_xprt uses fore channel allocated buffers */ 605 if (svc_is_backchannel(rqstp)) 606 return true; 607 608 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 609 * We assume one is at most one page 610 */ 611 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); 612 if (pages > RPCSVC_MAXPAGES) 613 pages = RPCSVC_MAXPAGES; 614 615 ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages, 616 rqstp->rq_pages); 617 return ret == pages; 618 } 619 620 /* 621 * Release an RPC server buffer 622 */ 623 static void 624 svc_release_buffer(struct svc_rqst *rqstp) 625 { 626 unsigned int i; 627 628 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) 629 if (rqstp->rq_pages[i]) 630 put_page(rqstp->rq_pages[i]); 631 } 632 633 struct svc_rqst * 634 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) 635 { 636 struct svc_rqst *rqstp; 637 638 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); 639 if (!rqstp) 640 return rqstp; 641 642 folio_batch_init(&rqstp->rq_fbatch); 643 644 rqstp->rq_server = serv; 645 rqstp->rq_pool = pool; 646 647 rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0); 648 if (!rqstp->rq_scratch_page) 649 goto out_enomem; 650 651 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 652 if (!rqstp->rq_argp) 653 goto out_enomem; 654 655 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 656 if (!rqstp->rq_resp) 657 goto out_enomem; 658 659 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) 660 goto out_enomem; 661 662 return rqstp; 663 out_enomem: 664 svc_rqst_free(rqstp); 665 return NULL; 666 } 667 EXPORT_SYMBOL_GPL(svc_rqst_alloc); 668 669 static struct svc_rqst * 670 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) 671 { 672 struct svc_rqst *rqstp; 673 674 rqstp = svc_rqst_alloc(serv, pool, node); 675 if (!rqstp) 676 return ERR_PTR(-ENOMEM); 677 678 svc_get(serv); 679 spin_lock_bh(&serv->sv_lock); 680 serv->sv_nrthreads += 1; 681 spin_unlock_bh(&serv->sv_lock); 682 683 atomic_inc(&pool->sp_nrthreads); 684 685 /* Protected by whatever lock the service uses when calling 686 * svc_set_num_threads() 687 */ 688 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); 689 690 return rqstp; 691 } 692 693 /** 694 * svc_pool_wake_idle_thread - Awaken an idle thread in @pool 695 * @pool: service thread pool 696 * 697 * Can be called from soft IRQ or process context. Finding an idle 698 * service thread and marking it BUSY is atomic with respect to 699 * other calls to svc_pool_wake_idle_thread(). 700 * 701 */ 702 void svc_pool_wake_idle_thread(struct svc_pool *pool) 703 { 704 struct svc_rqst *rqstp; 705 struct llist_node *ln; 706 707 rcu_read_lock(); 708 ln = READ_ONCE(pool->sp_idle_threads.first); 709 if (ln) { 710 rqstp = llist_entry(ln, struct svc_rqst, rq_idle); 711 WRITE_ONCE(rqstp->rq_qtime, ktime_get()); 712 if (!task_is_running(rqstp->rq_task)) { 713 wake_up_process(rqstp->rq_task); 714 trace_svc_wake_up(rqstp->rq_task->pid); 715 percpu_counter_inc(&pool->sp_threads_woken); 716 } 717 rcu_read_unlock(); 718 return; 719 } 720 rcu_read_unlock(); 721 722 } 723 EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread); 724 725 static struct svc_pool * 726 svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 727 { 728 return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 729 } 730 731 static struct svc_pool * 732 svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, 733 unsigned int *state) 734 { 735 struct svc_pool *pool; 736 unsigned int i; 737 738 retry: 739 pool = target_pool; 740 741 if (pool != NULL) { 742 if (atomic_inc_not_zero(&pool->sp_nrthreads)) 743 goto found_pool; 744 return NULL; 745 } else { 746 for (i = 0; i < serv->sv_nrpools; i++) { 747 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 748 if (atomic_inc_not_zero(&pool->sp_nrthreads)) 749 goto found_pool; 750 } 751 return NULL; 752 } 753 754 found_pool: 755 set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); 756 set_bit(SP_NEED_VICTIM, &pool->sp_flags); 757 if (!atomic_dec_and_test(&pool->sp_nrthreads)) 758 return pool; 759 /* Nothing left in this pool any more */ 760 clear_bit(SP_NEED_VICTIM, &pool->sp_flags); 761 clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags); 762 goto retry; 763 } 764 765 static int 766 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 767 { 768 struct svc_rqst *rqstp; 769 struct task_struct *task; 770 struct svc_pool *chosen_pool; 771 unsigned int state = serv->sv_nrthreads-1; 772 int node; 773 774 do { 775 nrservs--; 776 chosen_pool = svc_pool_next(serv, pool, &state); 777 node = svc_pool_map_get_node(chosen_pool->sp_id); 778 779 rqstp = svc_prepare_thread(serv, chosen_pool, node); 780 if (IS_ERR(rqstp)) 781 return PTR_ERR(rqstp); 782 task = kthread_create_on_node(serv->sv_threadfn, rqstp, 783 node, "%s", serv->sv_name); 784 if (IS_ERR(task)) { 785 svc_exit_thread(rqstp); 786 return PTR_ERR(task); 787 } 788 789 rqstp->rq_task = task; 790 if (serv->sv_nrpools > 1) 791 svc_pool_map_set_cpumask(task, chosen_pool->sp_id); 792 793 svc_sock_update_bufs(serv); 794 wake_up_process(task); 795 } while (nrservs > 0); 796 797 return 0; 798 } 799 800 static int 801 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 802 { 803 unsigned int state = serv->sv_nrthreads-1; 804 struct svc_pool *victim; 805 806 do { 807 victim = svc_pool_victim(serv, pool, &state); 808 if (!victim) 809 break; 810 svc_pool_wake_idle_thread(victim); 811 wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS, 812 TASK_IDLE); 813 nrservs++; 814 } while (nrservs < 0); 815 return 0; 816 } 817 818 /** 819 * svc_set_num_threads - adjust number of threads per RPC service 820 * @serv: RPC service to adjust 821 * @pool: Specific pool from which to choose threads, or NULL 822 * @nrservs: New number of threads for @serv (0 or less means kill all threads) 823 * 824 * Create or destroy threads to make the number of threads for @serv the 825 * given number. If @pool is non-NULL, change only threads in that pool; 826 * otherwise, round-robin between all pools for @serv. @serv's 827 * sv_nrthreads is adjusted for each thread created or destroyed. 828 * 829 * Caller must ensure mutual exclusion between this and server startup or 830 * shutdown. 831 * 832 * Returns zero on success or a negative errno if an error occurred while 833 * starting a thread. 834 */ 835 int 836 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 837 { 838 if (!pool) 839 nrservs -= serv->sv_nrthreads; 840 else 841 nrservs -= atomic_read(&pool->sp_nrthreads); 842 843 if (nrservs > 0) 844 return svc_start_kthreads(serv, pool, nrservs); 845 if (nrservs < 0) 846 return svc_stop_kthreads(serv, pool, nrservs); 847 return 0; 848 } 849 EXPORT_SYMBOL_GPL(svc_set_num_threads); 850 851 /** 852 * svc_rqst_replace_page - Replace one page in rq_pages[] 853 * @rqstp: svc_rqst with pages to replace 854 * @page: replacement page 855 * 856 * When replacing a page in rq_pages, batch the release of the 857 * replaced pages to avoid hammering the page allocator. 858 * 859 * Return values: 860 * %true: page replaced 861 * %false: array bounds checking failed 862 */ 863 bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) 864 { 865 struct page **begin = rqstp->rq_pages; 866 struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES]; 867 868 if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) { 869 trace_svc_replace_page_err(rqstp); 870 return false; 871 } 872 873 if (*rqstp->rq_next_page) { 874 if (!folio_batch_add(&rqstp->rq_fbatch, 875 page_folio(*rqstp->rq_next_page))) 876 __folio_batch_release(&rqstp->rq_fbatch); 877 } 878 879 get_page(page); 880 *(rqstp->rq_next_page++) = page; 881 return true; 882 } 883 EXPORT_SYMBOL_GPL(svc_rqst_replace_page); 884 885 /** 886 * svc_rqst_release_pages - Release Reply buffer pages 887 * @rqstp: RPC transaction context 888 * 889 * Release response pages that might still be in flight after 890 * svc_send, and any spliced filesystem-owned pages. 891 */ 892 void svc_rqst_release_pages(struct svc_rqst *rqstp) 893 { 894 int i, count = rqstp->rq_next_page - rqstp->rq_respages; 895 896 if (count) { 897 release_pages(rqstp->rq_respages, count); 898 for (i = 0; i < count; i++) 899 rqstp->rq_respages[i] = NULL; 900 } 901 } 902 903 /* 904 * Called from a server thread as it's exiting. Caller must hold the "service 905 * mutex" for the service. 906 */ 907 void 908 svc_rqst_free(struct svc_rqst *rqstp) 909 { 910 folio_batch_release(&rqstp->rq_fbatch); 911 svc_release_buffer(rqstp); 912 if (rqstp->rq_scratch_page) 913 put_page(rqstp->rq_scratch_page); 914 kfree(rqstp->rq_resp); 915 kfree(rqstp->rq_argp); 916 kfree(rqstp->rq_auth_data); 917 kfree_rcu(rqstp, rq_rcu_head); 918 } 919 EXPORT_SYMBOL_GPL(svc_rqst_free); 920 921 void 922 svc_exit_thread(struct svc_rqst *rqstp) 923 { 924 struct svc_serv *serv = rqstp->rq_server; 925 struct svc_pool *pool = rqstp->rq_pool; 926 927 list_del_rcu(&rqstp->rq_all); 928 929 atomic_dec(&pool->sp_nrthreads); 930 931 spin_lock_bh(&serv->sv_lock); 932 serv->sv_nrthreads -= 1; 933 spin_unlock_bh(&serv->sv_lock); 934 svc_sock_update_bufs(serv); 935 936 svc_rqst_free(rqstp); 937 938 svc_put(serv); 939 /* That svc_put() cannot be the last, because the thread 940 * waiting for SP_VICTIM_REMAINS to clear must hold 941 * a reference. So it is still safe to access pool. 942 */ 943 clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags); 944 } 945 EXPORT_SYMBOL_GPL(svc_exit_thread); 946 947 /* 948 * Register an "inet" protocol family netid with the local 949 * rpcbind daemon via an rpcbind v4 SET request. 950 * 951 * No netconfig infrastructure is available in the kernel, so 952 * we map IP_ protocol numbers to netids by hand. 953 * 954 * Returns zero on success; a negative errno value is returned 955 * if any error occurs. 956 */ 957 static int __svc_rpcb_register4(struct net *net, const u32 program, 958 const u32 version, 959 const unsigned short protocol, 960 const unsigned short port) 961 { 962 const struct sockaddr_in sin = { 963 .sin_family = AF_INET, 964 .sin_addr.s_addr = htonl(INADDR_ANY), 965 .sin_port = htons(port), 966 }; 967 const char *netid; 968 int error; 969 970 switch (protocol) { 971 case IPPROTO_UDP: 972 netid = RPCBIND_NETID_UDP; 973 break; 974 case IPPROTO_TCP: 975 netid = RPCBIND_NETID_TCP; 976 break; 977 default: 978 return -ENOPROTOOPT; 979 } 980 981 error = rpcb_v4_register(net, program, version, 982 (const struct sockaddr *)&sin, netid); 983 984 /* 985 * User space didn't support rpcbind v4, so retry this 986 * registration request with the legacy rpcbind v2 protocol. 987 */ 988 if (error == -EPROTONOSUPPORT) 989 error = rpcb_register(net, program, version, protocol, port); 990 991 return error; 992 } 993 994 #if IS_ENABLED(CONFIG_IPV6) 995 /* 996 * Register an "inet6" protocol family netid with the local 997 * rpcbind daemon via an rpcbind v4 SET request. 998 * 999 * No netconfig infrastructure is available in the kernel, so 1000 * we map IP_ protocol numbers to netids by hand. 1001 * 1002 * Returns zero on success; a negative errno value is returned 1003 * if any error occurs. 1004 */ 1005 static int __svc_rpcb_register6(struct net *net, const u32 program, 1006 const u32 version, 1007 const unsigned short protocol, 1008 const unsigned short port) 1009 { 1010 const struct sockaddr_in6 sin6 = { 1011 .sin6_family = AF_INET6, 1012 .sin6_addr = IN6ADDR_ANY_INIT, 1013 .sin6_port = htons(port), 1014 }; 1015 const char *netid; 1016 int error; 1017 1018 switch (protocol) { 1019 case IPPROTO_UDP: 1020 netid = RPCBIND_NETID_UDP6; 1021 break; 1022 case IPPROTO_TCP: 1023 netid = RPCBIND_NETID_TCP6; 1024 break; 1025 default: 1026 return -ENOPROTOOPT; 1027 } 1028 1029 error = rpcb_v4_register(net, program, version, 1030 (const struct sockaddr *)&sin6, netid); 1031 1032 /* 1033 * User space didn't support rpcbind version 4, so we won't 1034 * use a PF_INET6 listener. 1035 */ 1036 if (error == -EPROTONOSUPPORT) 1037 error = -EAFNOSUPPORT; 1038 1039 return error; 1040 } 1041 #endif /* IS_ENABLED(CONFIG_IPV6) */ 1042 1043 /* 1044 * Register a kernel RPC service via rpcbind version 4. 1045 * 1046 * Returns zero on success; a negative errno value is returned 1047 * if any error occurs. 1048 */ 1049 static int __svc_register(struct net *net, const char *progname, 1050 const u32 program, const u32 version, 1051 const int family, 1052 const unsigned short protocol, 1053 const unsigned short port) 1054 { 1055 int error = -EAFNOSUPPORT; 1056 1057 switch (family) { 1058 case PF_INET: 1059 error = __svc_rpcb_register4(net, program, version, 1060 protocol, port); 1061 break; 1062 #if IS_ENABLED(CONFIG_IPV6) 1063 case PF_INET6: 1064 error = __svc_rpcb_register6(net, program, version, 1065 protocol, port); 1066 #endif 1067 } 1068 1069 trace_svc_register(progname, version, family, protocol, port, error); 1070 return error; 1071 } 1072 1073 int svc_rpcbind_set_version(struct net *net, 1074 const struct svc_program *progp, 1075 u32 version, int family, 1076 unsigned short proto, 1077 unsigned short port) 1078 { 1079 return __svc_register(net, progp->pg_name, progp->pg_prog, 1080 version, family, proto, port); 1081 1082 } 1083 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version); 1084 1085 int svc_generic_rpcbind_set(struct net *net, 1086 const struct svc_program *progp, 1087 u32 version, int family, 1088 unsigned short proto, 1089 unsigned short port) 1090 { 1091 const struct svc_version *vers = progp->pg_vers[version]; 1092 int error; 1093 1094 if (vers == NULL) 1095 return 0; 1096 1097 if (vers->vs_hidden) { 1098 trace_svc_noregister(progp->pg_name, version, proto, 1099 port, family, 0); 1100 return 0; 1101 } 1102 1103 /* 1104 * Don't register a UDP port if we need congestion 1105 * control. 1106 */ 1107 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) 1108 return 0; 1109 1110 error = svc_rpcbind_set_version(net, progp, version, 1111 family, proto, port); 1112 1113 return (vers->vs_rpcb_optnl) ? 0 : error; 1114 } 1115 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set); 1116 1117 /** 1118 * svc_register - register an RPC service with the local portmapper 1119 * @serv: svc_serv struct for the service to register 1120 * @net: net namespace for the service to register 1121 * @family: protocol family of service's listener socket 1122 * @proto: transport protocol number to advertise 1123 * @port: port to advertise 1124 * 1125 * Service is registered for any address in the passed-in protocol family 1126 */ 1127 int svc_register(const struct svc_serv *serv, struct net *net, 1128 const int family, const unsigned short proto, 1129 const unsigned short port) 1130 { 1131 struct svc_program *progp; 1132 unsigned int i; 1133 int error = 0; 1134 1135 WARN_ON_ONCE(proto == 0 && port == 0); 1136 if (proto == 0 && port == 0) 1137 return -EINVAL; 1138 1139 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 1140 for (i = 0; i < progp->pg_nvers; i++) { 1141 1142 error = progp->pg_rpcbind_set(net, progp, i, 1143 family, proto, port); 1144 if (error < 0) { 1145 printk(KERN_WARNING "svc: failed to register " 1146 "%sv%u RPC service (errno %d).\n", 1147 progp->pg_name, i, -error); 1148 break; 1149 } 1150 } 1151 } 1152 1153 return error; 1154 } 1155 1156 /* 1157 * If user space is running rpcbind, it should take the v4 UNSET 1158 * and clear everything for this [program, version]. If user space 1159 * is running portmap, it will reject the v4 UNSET, but won't have 1160 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient 1161 * in this case to clear all existing entries for [program, version]. 1162 */ 1163 static void __svc_unregister(struct net *net, const u32 program, const u32 version, 1164 const char *progname) 1165 { 1166 int error; 1167 1168 error = rpcb_v4_register(net, program, version, NULL, ""); 1169 1170 /* 1171 * User space didn't support rpcbind v4, so retry this 1172 * request with the legacy rpcbind v2 protocol. 1173 */ 1174 if (error == -EPROTONOSUPPORT) 1175 error = rpcb_register(net, program, version, 0, 0); 1176 1177 trace_svc_unregister(progname, version, error); 1178 } 1179 1180 /* 1181 * All netids, bind addresses and ports registered for [program, version] 1182 * are removed from the local rpcbind database (if the service is not 1183 * hidden) to make way for a new instance of the service. 1184 * 1185 * The result of unregistration is reported via dprintk for those who want 1186 * verification of the result, but is otherwise not important. 1187 */ 1188 static void svc_unregister(const struct svc_serv *serv, struct net *net) 1189 { 1190 struct sighand_struct *sighand; 1191 struct svc_program *progp; 1192 unsigned long flags; 1193 unsigned int i; 1194 1195 clear_thread_flag(TIF_SIGPENDING); 1196 1197 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 1198 for (i = 0; i < progp->pg_nvers; i++) { 1199 if (progp->pg_vers[i] == NULL) 1200 continue; 1201 if (progp->pg_vers[i]->vs_hidden) 1202 continue; 1203 __svc_unregister(net, progp->pg_prog, i, progp->pg_name); 1204 } 1205 } 1206 1207 rcu_read_lock(); 1208 sighand = rcu_dereference(current->sighand); 1209 spin_lock_irqsave(&sighand->siglock, flags); 1210 recalc_sigpending(); 1211 spin_unlock_irqrestore(&sighand->siglock, flags); 1212 rcu_read_unlock(); 1213 } 1214 1215 /* 1216 * dprintk the given error with the address of the client that caused it. 1217 */ 1218 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 1219 static __printf(2, 3) 1220 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 1221 { 1222 struct va_format vaf; 1223 va_list args; 1224 char buf[RPC_MAX_ADDRBUFLEN]; 1225 1226 va_start(args, fmt); 1227 1228 vaf.fmt = fmt; 1229 vaf.va = &args; 1230 1231 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); 1232 1233 va_end(args); 1234 } 1235 #else 1236 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} 1237 #endif 1238 1239 __be32 1240 svc_generic_init_request(struct svc_rqst *rqstp, 1241 const struct svc_program *progp, 1242 struct svc_process_info *ret) 1243 { 1244 const struct svc_version *versp = NULL; /* compiler food */ 1245 const struct svc_procedure *procp = NULL; 1246 1247 if (rqstp->rq_vers >= progp->pg_nvers ) 1248 goto err_bad_vers; 1249 versp = progp->pg_vers[rqstp->rq_vers]; 1250 if (!versp) 1251 goto err_bad_vers; 1252 1253 /* 1254 * Some protocol versions (namely NFSv4) require some form of 1255 * congestion control. (See RFC 7530 section 3.1 paragraph 2) 1256 * In other words, UDP is not allowed. We mark those when setting 1257 * up the svc_xprt, and verify that here. 1258 * 1259 * The spec is not very clear about what error should be returned 1260 * when someone tries to access a server that is listening on UDP 1261 * for lower versions. RPC_PROG_MISMATCH seems to be the closest 1262 * fit. 1263 */ 1264 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && 1265 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) 1266 goto err_bad_vers; 1267 1268 if (rqstp->rq_proc >= versp->vs_nproc) 1269 goto err_bad_proc; 1270 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc]; 1271 if (!procp) 1272 goto err_bad_proc; 1273 1274 /* Initialize storage for argp and resp */ 1275 memset(rqstp->rq_argp, 0, procp->pc_argzero); 1276 memset(rqstp->rq_resp, 0, procp->pc_ressize); 1277 1278 /* Bump per-procedure stats counter */ 1279 this_cpu_inc(versp->vs_count[rqstp->rq_proc]); 1280 1281 ret->dispatch = versp->vs_dispatch; 1282 return rpc_success; 1283 err_bad_vers: 1284 ret->mismatch.lovers = progp->pg_lovers; 1285 ret->mismatch.hivers = progp->pg_hivers; 1286 return rpc_prog_mismatch; 1287 err_bad_proc: 1288 return rpc_proc_unavail; 1289 } 1290 EXPORT_SYMBOL_GPL(svc_generic_init_request); 1291 1292 /* 1293 * Common routine for processing the RPC request. 1294 */ 1295 static int 1296 svc_process_common(struct svc_rqst *rqstp) 1297 { 1298 struct xdr_stream *xdr = &rqstp->rq_res_stream; 1299 struct svc_program *progp; 1300 const struct svc_procedure *procp = NULL; 1301 struct svc_serv *serv = rqstp->rq_server; 1302 struct svc_process_info process; 1303 enum svc_auth_status auth_res; 1304 unsigned int aoffset; 1305 int rc; 1306 __be32 *p; 1307 1308 /* Will be turned off by GSS integrity and privacy services */ 1309 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); 1310 /* Will be turned off only when NFSv4 Sessions are used */ 1311 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); 1312 clear_bit(RQ_DROPME, &rqstp->rq_flags); 1313 1314 /* Construct the first words of the reply: */ 1315 svcxdr_init_encode(rqstp); 1316 xdr_stream_encode_be32(xdr, rqstp->rq_xid); 1317 xdr_stream_encode_be32(xdr, rpc_reply); 1318 1319 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4); 1320 if (unlikely(!p)) 1321 goto err_short_len; 1322 if (*p++ != cpu_to_be32(RPC_VERSION)) 1323 goto err_bad_rpc; 1324 1325 xdr_stream_encode_be32(xdr, rpc_msg_accepted); 1326 1327 rqstp->rq_prog = be32_to_cpup(p++); 1328 rqstp->rq_vers = be32_to_cpup(p++); 1329 rqstp->rq_proc = be32_to_cpup(p); 1330 1331 for (progp = serv->sv_program; progp; progp = progp->pg_next) 1332 if (rqstp->rq_prog == progp->pg_prog) 1333 break; 1334 1335 /* 1336 * Decode auth data, and add verifier to reply buffer. 1337 * We do this before anything else in order to get a decent 1338 * auth verifier. 1339 */ 1340 auth_res = svc_authenticate(rqstp); 1341 /* Also give the program a chance to reject this call: */ 1342 if (auth_res == SVC_OK && progp) 1343 auth_res = progp->pg_authenticate(rqstp); 1344 trace_svc_authenticate(rqstp, auth_res); 1345 switch (auth_res) { 1346 case SVC_OK: 1347 break; 1348 case SVC_GARBAGE: 1349 goto err_garbage_args; 1350 case SVC_SYSERR: 1351 goto err_system_err; 1352 case SVC_DENIED: 1353 goto err_bad_auth; 1354 case SVC_CLOSE: 1355 goto close; 1356 case SVC_DROP: 1357 goto dropit; 1358 case SVC_COMPLETE: 1359 goto sendit; 1360 default: 1361 pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res); 1362 goto err_system_err; 1363 } 1364 1365 if (progp == NULL) 1366 goto err_bad_prog; 1367 1368 switch (progp->pg_init_request(rqstp, progp, &process)) { 1369 case rpc_success: 1370 break; 1371 case rpc_prog_unavail: 1372 goto err_bad_prog; 1373 case rpc_prog_mismatch: 1374 goto err_bad_vers; 1375 case rpc_proc_unavail: 1376 goto err_bad_proc; 1377 } 1378 1379 procp = rqstp->rq_procinfo; 1380 /* Should this check go into the dispatcher? */ 1381 if (!procp || !procp->pc_func) 1382 goto err_bad_proc; 1383 1384 /* Syntactic check complete */ 1385 serv->sv_stats->rpccnt++; 1386 trace_svc_process(rqstp, progp->pg_name); 1387 1388 aoffset = xdr_stream_pos(xdr); 1389 1390 /* un-reserve some of the out-queue now that we have a 1391 * better idea of reply size 1392 */ 1393 if (procp->pc_xdrressize) 1394 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); 1395 1396 /* Call the function that processes the request. */ 1397 rc = process.dispatch(rqstp); 1398 if (procp->pc_release) 1399 procp->pc_release(rqstp); 1400 xdr_finish_decode(xdr); 1401 1402 if (!rc) 1403 goto dropit; 1404 if (rqstp->rq_auth_stat != rpc_auth_ok) 1405 goto err_bad_auth; 1406 1407 if (*rqstp->rq_accept_statp != rpc_success) 1408 xdr_truncate_encode(xdr, aoffset); 1409 1410 if (procp->pc_encode == NULL) 1411 goto dropit; 1412 1413 sendit: 1414 if (svc_authorise(rqstp)) 1415 goto close_xprt; 1416 return 1; /* Caller can now send it */ 1417 1418 dropit: 1419 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1420 dprintk("svc: svc_process dropit\n"); 1421 return 0; 1422 1423 close: 1424 svc_authorise(rqstp); 1425 close_xprt: 1426 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) 1427 svc_xprt_close(rqstp->rq_xprt); 1428 dprintk("svc: svc_process close\n"); 1429 return 0; 1430 1431 err_short_len: 1432 svc_printk(rqstp, "short len %u, dropping request\n", 1433 rqstp->rq_arg.len); 1434 goto close_xprt; 1435 1436 err_bad_rpc: 1437 serv->sv_stats->rpcbadfmt++; 1438 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); 1439 xdr_stream_encode_u32(xdr, RPC_MISMATCH); 1440 /* Only RPCv2 supported */ 1441 xdr_stream_encode_u32(xdr, RPC_VERSION); 1442 xdr_stream_encode_u32(xdr, RPC_VERSION); 1443 return 1; /* don't wrap */ 1444 1445 err_bad_auth: 1446 dprintk("svc: authentication failed (%d)\n", 1447 be32_to_cpu(rqstp->rq_auth_stat)); 1448 serv->sv_stats->rpcbadauth++; 1449 /* Restore write pointer to location of reply status: */ 1450 xdr_truncate_encode(xdr, XDR_UNIT * 2); 1451 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); 1452 xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR); 1453 xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat); 1454 goto sendit; 1455 1456 err_bad_prog: 1457 dprintk("svc: unknown program %d\n", rqstp->rq_prog); 1458 serv->sv_stats->rpcbadfmt++; 1459 *rqstp->rq_accept_statp = rpc_prog_unavail; 1460 goto sendit; 1461 1462 err_bad_vers: 1463 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", 1464 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); 1465 1466 serv->sv_stats->rpcbadfmt++; 1467 *rqstp->rq_accept_statp = rpc_prog_mismatch; 1468 1469 /* 1470 * svc_authenticate() has already added the verifier and 1471 * advanced the stream just past rq_accept_statp. 1472 */ 1473 xdr_stream_encode_u32(xdr, process.mismatch.lovers); 1474 xdr_stream_encode_u32(xdr, process.mismatch.hivers); 1475 goto sendit; 1476 1477 err_bad_proc: 1478 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); 1479 1480 serv->sv_stats->rpcbadfmt++; 1481 *rqstp->rq_accept_statp = rpc_proc_unavail; 1482 goto sendit; 1483 1484 err_garbage_args: 1485 svc_printk(rqstp, "failed to decode RPC header\n"); 1486 1487 serv->sv_stats->rpcbadfmt++; 1488 *rqstp->rq_accept_statp = rpc_garbage_args; 1489 goto sendit; 1490 1491 err_system_err: 1492 serv->sv_stats->rpcbadfmt++; 1493 *rqstp->rq_accept_statp = rpc_system_err; 1494 goto sendit; 1495 } 1496 1497 /** 1498 * svc_process - Execute one RPC transaction 1499 * @rqstp: RPC transaction context 1500 * 1501 */ 1502 void svc_process(struct svc_rqst *rqstp) 1503 { 1504 struct kvec *resv = &rqstp->rq_res.head[0]; 1505 __be32 *p; 1506 1507 #if IS_ENABLED(CONFIG_FAIL_SUNRPC) 1508 if (!fail_sunrpc.ignore_server_disconnect && 1509 should_fail(&fail_sunrpc.attr, 1)) 1510 svc_xprt_deferred_close(rqstp->rq_xprt); 1511 #endif 1512 1513 /* 1514 * Setup response xdr_buf. 1515 * Initially it has just one page 1516 */ 1517 rqstp->rq_next_page = &rqstp->rq_respages[1]; 1518 resv->iov_base = page_address(rqstp->rq_respages[0]); 1519 resv->iov_len = 0; 1520 rqstp->rq_res.pages = rqstp->rq_next_page; 1521 rqstp->rq_res.len = 0; 1522 rqstp->rq_res.page_base = 0; 1523 rqstp->rq_res.page_len = 0; 1524 rqstp->rq_res.buflen = PAGE_SIZE; 1525 rqstp->rq_res.tail[0].iov_base = NULL; 1526 rqstp->rq_res.tail[0].iov_len = 0; 1527 1528 svcxdr_init_decode(rqstp); 1529 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2); 1530 if (unlikely(!p)) 1531 goto out_drop; 1532 rqstp->rq_xid = *p++; 1533 if (unlikely(*p != rpc_call)) 1534 goto out_baddir; 1535 1536 if (!svc_process_common(rqstp)) 1537 goto out_drop; 1538 svc_send(rqstp); 1539 return; 1540 1541 out_baddir: 1542 svc_printk(rqstp, "bad direction 0x%08x, dropping request\n", 1543 be32_to_cpu(*p)); 1544 rqstp->rq_server->sv_stats->rpcbadfmt++; 1545 out_drop: 1546 svc_drop(rqstp); 1547 } 1548 1549 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1550 /** 1551 * svc_process_bc - process a reverse-direction RPC request 1552 * @req: RPC request to be used for client-side processing 1553 * @rqstp: server-side execution context 1554 * 1555 */ 1556 void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp) 1557 { 1558 struct rpc_task *task; 1559 int proc_error; 1560 1561 /* Build the svc_rqst used by the common processing routine */ 1562 rqstp->rq_xid = req->rq_xid; 1563 rqstp->rq_prot = req->rq_xprt->prot; 1564 rqstp->rq_bc_net = req->rq_xprt->xprt_net; 1565 1566 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); 1567 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); 1568 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1569 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1570 1571 /* Adjust the argument buffer length */ 1572 rqstp->rq_arg.len = req->rq_private_buf.len; 1573 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { 1574 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; 1575 rqstp->rq_arg.page_len = 0; 1576 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + 1577 rqstp->rq_arg.page_len) 1578 rqstp->rq_arg.page_len = rqstp->rq_arg.len - 1579 rqstp->rq_arg.head[0].iov_len; 1580 else 1581 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + 1582 rqstp->rq_arg.page_len; 1583 1584 /* Reset the response buffer */ 1585 rqstp->rq_res.head[0].iov_len = 0; 1586 1587 /* 1588 * Skip the XID and calldir fields because they've already 1589 * been processed by the caller. 1590 */ 1591 svcxdr_init_decode(rqstp); 1592 if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) 1593 return; 1594 1595 /* Parse and execute the bc call */ 1596 proc_error = svc_process_common(rqstp); 1597 1598 atomic_dec(&req->rq_xprt->bc_slot_count); 1599 if (!proc_error) { 1600 /* Processing error: drop the request */ 1601 xprt_free_bc_request(req); 1602 return; 1603 } 1604 /* Finally, send the reply synchronously */ 1605 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); 1606 task = rpc_run_bc_task(req); 1607 if (IS_ERR(task)) 1608 return; 1609 1610 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); 1611 rpc_put_task(task); 1612 } 1613 EXPORT_SYMBOL_GPL(svc_process_bc); 1614 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1615 1616 /** 1617 * svc_max_payload - Return transport-specific limit on the RPC payload 1618 * @rqstp: RPC transaction context 1619 * 1620 * Returns the maximum number of payload bytes the current transport 1621 * allows. 1622 */ 1623 u32 svc_max_payload(const struct svc_rqst *rqstp) 1624 { 1625 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; 1626 1627 if (rqstp->rq_server->sv_max_payload < max) 1628 max = rqstp->rq_server->sv_max_payload; 1629 return max; 1630 } 1631 EXPORT_SYMBOL_GPL(svc_max_payload); 1632 1633 /** 1634 * svc_proc_name - Return RPC procedure name in string form 1635 * @rqstp: svc_rqst to operate on 1636 * 1637 * Return value: 1638 * Pointer to a NUL-terminated string 1639 */ 1640 const char *svc_proc_name(const struct svc_rqst *rqstp) 1641 { 1642 if (rqstp && rqstp->rq_procinfo) 1643 return rqstp->rq_procinfo->pc_name; 1644 return "unknown"; 1645 } 1646 1647 1648 /** 1649 * svc_encode_result_payload - mark a range of bytes as a result payload 1650 * @rqstp: svc_rqst to operate on 1651 * @offset: payload's byte offset in rqstp->rq_res 1652 * @length: size of payload, in bytes 1653 * 1654 * Returns zero on success, or a negative errno if a permanent 1655 * error occurred. 1656 */ 1657 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, 1658 unsigned int length) 1659 { 1660 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset, 1661 length); 1662 } 1663 EXPORT_SYMBOL_GPL(svc_encode_result_payload); 1664 1665 /** 1666 * svc_fill_write_vector - Construct data argument for VFS write call 1667 * @rqstp: svc_rqst to operate on 1668 * @payload: xdr_buf containing only the write data payload 1669 * 1670 * Fills in rqstp::rq_vec, and returns the number of elements. 1671 */ 1672 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, 1673 struct xdr_buf *payload) 1674 { 1675 struct page **pages = payload->pages; 1676 struct kvec *first = payload->head; 1677 struct kvec *vec = rqstp->rq_vec; 1678 size_t total = payload->len; 1679 unsigned int i; 1680 1681 /* Some types of transport can present the write payload 1682 * entirely in rq_arg.pages. In this case, @first is empty. 1683 */ 1684 i = 0; 1685 if (first->iov_len) { 1686 vec[i].iov_base = first->iov_base; 1687 vec[i].iov_len = min_t(size_t, total, first->iov_len); 1688 total -= vec[i].iov_len; 1689 ++i; 1690 } 1691 1692 while (total) { 1693 vec[i].iov_base = page_address(*pages); 1694 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE); 1695 total -= vec[i].iov_len; 1696 ++i; 1697 ++pages; 1698 } 1699 1700 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec)); 1701 return i; 1702 } 1703 EXPORT_SYMBOL_GPL(svc_fill_write_vector); 1704 1705 /** 1706 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call 1707 * @rqstp: svc_rqst to operate on 1708 * @first: buffer containing first section of pathname 1709 * @p: buffer containing remaining section of pathname 1710 * @total: total length of the pathname argument 1711 * 1712 * The VFS symlink API demands a NUL-terminated pathname in mapped memory. 1713 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free 1714 * the returned string. 1715 */ 1716 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, 1717 void *p, size_t total) 1718 { 1719 size_t len, remaining; 1720 char *result, *dst; 1721 1722 result = kmalloc(total + 1, GFP_KERNEL); 1723 if (!result) 1724 return ERR_PTR(-ESERVERFAULT); 1725 1726 dst = result; 1727 remaining = total; 1728 1729 len = min_t(size_t, total, first->iov_len); 1730 if (len) { 1731 memcpy(dst, first->iov_base, len); 1732 dst += len; 1733 remaining -= len; 1734 } 1735 1736 if (remaining) { 1737 len = min_t(size_t, remaining, PAGE_SIZE); 1738 memcpy(dst, p, len); 1739 dst += len; 1740 } 1741 1742 *dst = '\0'; 1743 1744 /* Sanity check: Linux doesn't allow the pathname argument to 1745 * contain a NUL byte. 1746 */ 1747 if (strlen(result) != total) { 1748 kfree(result); 1749 return ERR_PTR(-EINVAL); 1750 } 1751 return result; 1752 } 1753 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname); 1754