1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/svc.c 4 * 5 * High-level RPC service routines 6 * 7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 * 9 * Multiple threads pools and NUMAisation 10 * Copyright (c) 2006 Silicon Graphics, Inc. 11 * by Greg Banks <gnb@melbourne.sgi.com> 12 */ 13 14 #include <linux/linkage.h> 15 #include <linux/sched/signal.h> 16 #include <linux/errno.h> 17 #include <linux/net.h> 18 #include <linux/in.h> 19 #include <linux/mm.h> 20 #include <linux/interrupt.h> 21 #include <linux/module.h> 22 #include <linux/kthread.h> 23 #include <linux/slab.h> 24 25 #include <linux/sunrpc/types.h> 26 #include <linux/sunrpc/xdr.h> 27 #include <linux/sunrpc/stats.h> 28 #include <linux/sunrpc/svcsock.h> 29 #include <linux/sunrpc/clnt.h> 30 #include <linux/sunrpc/bc_xprt.h> 31 32 #include <trace/events/sunrpc.h> 33 34 #include "fail.h" 35 36 #define RPCDBG_FACILITY RPCDBG_SVCDSP 37 38 static void svc_unregister(const struct svc_serv *serv, struct net *net); 39 40 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL 41 42 /* 43 * Mode for mapping cpus to pools. 44 */ 45 enum { 46 SVC_POOL_AUTO = -1, /* choose one of the others */ 47 SVC_POOL_GLOBAL, /* no mapping, just a single global pool 48 * (legacy & UP mode) */ 49 SVC_POOL_PERCPU, /* one pool per cpu */ 50 SVC_POOL_PERNODE /* one pool per numa node */ 51 }; 52 53 /* 54 * Structure for mapping cpus to pools and vice versa. 55 * Setup once during sunrpc initialisation. 56 */ 57 58 struct svc_pool_map { 59 int count; /* How many svc_servs use us */ 60 int mode; /* Note: int not enum to avoid 61 * warnings about "enumeration value 62 * not handled in switch" */ 63 unsigned int npools; 64 unsigned int *pool_to; /* maps pool id to cpu or node */ 65 unsigned int *to_pool; /* maps cpu or node to pool id */ 66 }; 67 68 static struct svc_pool_map svc_pool_map = { 69 .mode = SVC_POOL_DEFAULT 70 }; 71 72 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ 73 74 static int 75 param_set_pool_mode(const char *val, const struct kernel_param *kp) 76 { 77 int *ip = (int *)kp->arg; 78 struct svc_pool_map *m = &svc_pool_map; 79 int err; 80 81 mutex_lock(&svc_pool_map_mutex); 82 83 err = -EBUSY; 84 if (m->count) 85 goto out; 86 87 err = 0; 88 if (!strncmp(val, "auto", 4)) 89 *ip = SVC_POOL_AUTO; 90 else if (!strncmp(val, "global", 6)) 91 *ip = SVC_POOL_GLOBAL; 92 else if (!strncmp(val, "percpu", 6)) 93 *ip = SVC_POOL_PERCPU; 94 else if (!strncmp(val, "pernode", 7)) 95 *ip = SVC_POOL_PERNODE; 96 else 97 err = -EINVAL; 98 99 out: 100 mutex_unlock(&svc_pool_map_mutex); 101 return err; 102 } 103 104 static int 105 param_get_pool_mode(char *buf, const struct kernel_param *kp) 106 { 107 int *ip = (int *)kp->arg; 108 109 switch (*ip) 110 { 111 case SVC_POOL_AUTO: 112 return sysfs_emit(buf, "auto\n"); 113 case SVC_POOL_GLOBAL: 114 return sysfs_emit(buf, "global\n"); 115 case SVC_POOL_PERCPU: 116 return sysfs_emit(buf, "percpu\n"); 117 case SVC_POOL_PERNODE: 118 return sysfs_emit(buf, "pernode\n"); 119 default: 120 return sysfs_emit(buf, "%d\n", *ip); 121 } 122 } 123 124 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, 125 &svc_pool_map.mode, 0644); 126 127 /* 128 * Detect best pool mapping mode heuristically, 129 * according to the machine's topology. 130 */ 131 static int 132 svc_pool_map_choose_mode(void) 133 { 134 unsigned int node; 135 136 if (nr_online_nodes > 1) { 137 /* 138 * Actually have multiple NUMA nodes, 139 * so split pools on NUMA node boundaries 140 */ 141 return SVC_POOL_PERNODE; 142 } 143 144 node = first_online_node; 145 if (nr_cpus_node(node) > 2) { 146 /* 147 * Non-trivial SMP, or CONFIG_NUMA on 148 * non-NUMA hardware, e.g. with a generic 149 * x86_64 kernel on Xeons. In this case we 150 * want to divide the pools on cpu boundaries. 151 */ 152 return SVC_POOL_PERCPU; 153 } 154 155 /* default: one global pool */ 156 return SVC_POOL_GLOBAL; 157 } 158 159 /* 160 * Allocate the to_pool[] and pool_to[] arrays. 161 * Returns 0 on success or an errno. 162 */ 163 static int 164 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) 165 { 166 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 167 if (!m->to_pool) 168 goto fail; 169 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 170 if (!m->pool_to) 171 goto fail_free; 172 173 return 0; 174 175 fail_free: 176 kfree(m->to_pool); 177 m->to_pool = NULL; 178 fail: 179 return -ENOMEM; 180 } 181 182 /* 183 * Initialise the pool map for SVC_POOL_PERCPU mode. 184 * Returns number of pools or <0 on error. 185 */ 186 static int 187 svc_pool_map_init_percpu(struct svc_pool_map *m) 188 { 189 unsigned int maxpools = nr_cpu_ids; 190 unsigned int pidx = 0; 191 unsigned int cpu; 192 int err; 193 194 err = svc_pool_map_alloc_arrays(m, maxpools); 195 if (err) 196 return err; 197 198 for_each_online_cpu(cpu) { 199 BUG_ON(pidx >= maxpools); 200 m->to_pool[cpu] = pidx; 201 m->pool_to[pidx] = cpu; 202 pidx++; 203 } 204 /* cpus brought online later all get mapped to pool0, sorry */ 205 206 return pidx; 207 }; 208 209 210 /* 211 * Initialise the pool map for SVC_POOL_PERNODE mode. 212 * Returns number of pools or <0 on error. 213 */ 214 static int 215 svc_pool_map_init_pernode(struct svc_pool_map *m) 216 { 217 unsigned int maxpools = nr_node_ids; 218 unsigned int pidx = 0; 219 unsigned int node; 220 int err; 221 222 err = svc_pool_map_alloc_arrays(m, maxpools); 223 if (err) 224 return err; 225 226 for_each_node_with_cpus(node) { 227 /* some architectures (e.g. SN2) have cpuless nodes */ 228 BUG_ON(pidx > maxpools); 229 m->to_pool[node] = pidx; 230 m->pool_to[pidx] = node; 231 pidx++; 232 } 233 /* nodes brought online later all get mapped to pool0, sorry */ 234 235 return pidx; 236 } 237 238 239 /* 240 * Add a reference to the global map of cpus to pools (and 241 * vice versa) if pools are in use. 242 * Initialise the map if we're the first user. 243 * Returns the number of pools. If this is '1', no reference 244 * was taken. 245 */ 246 static unsigned int 247 svc_pool_map_get(void) 248 { 249 struct svc_pool_map *m = &svc_pool_map; 250 int npools = -1; 251 252 mutex_lock(&svc_pool_map_mutex); 253 254 if (m->count++) { 255 mutex_unlock(&svc_pool_map_mutex); 256 WARN_ON_ONCE(m->npools <= 1); 257 return m->npools; 258 } 259 260 if (m->mode == SVC_POOL_AUTO) 261 m->mode = svc_pool_map_choose_mode(); 262 263 switch (m->mode) { 264 case SVC_POOL_PERCPU: 265 npools = svc_pool_map_init_percpu(m); 266 break; 267 case SVC_POOL_PERNODE: 268 npools = svc_pool_map_init_pernode(m); 269 break; 270 } 271 272 if (npools <= 0) { 273 /* default, or memory allocation failure */ 274 npools = 1; 275 m->mode = SVC_POOL_GLOBAL; 276 } 277 m->npools = npools; 278 279 if (npools == 1) 280 /* service is unpooled, so doesn't hold a reference */ 281 m->count--; 282 283 mutex_unlock(&svc_pool_map_mutex); 284 return npools; 285 } 286 287 /* 288 * Drop a reference to the global map of cpus to pools, if 289 * pools were in use, i.e. if npools > 1. 290 * When the last reference is dropped, the map data is 291 * freed; this allows the sysadmin to change the pool 292 * mode using the pool_mode module option without 293 * rebooting or re-loading sunrpc.ko. 294 */ 295 static void 296 svc_pool_map_put(int npools) 297 { 298 struct svc_pool_map *m = &svc_pool_map; 299 300 if (npools <= 1) 301 return; 302 mutex_lock(&svc_pool_map_mutex); 303 304 if (!--m->count) { 305 kfree(m->to_pool); 306 m->to_pool = NULL; 307 kfree(m->pool_to); 308 m->pool_to = NULL; 309 m->npools = 0; 310 } 311 312 mutex_unlock(&svc_pool_map_mutex); 313 } 314 315 static int svc_pool_map_get_node(unsigned int pidx) 316 { 317 const struct svc_pool_map *m = &svc_pool_map; 318 319 if (m->count) { 320 if (m->mode == SVC_POOL_PERCPU) 321 return cpu_to_node(m->pool_to[pidx]); 322 if (m->mode == SVC_POOL_PERNODE) 323 return m->pool_to[pidx]; 324 } 325 return NUMA_NO_NODE; 326 } 327 /* 328 * Set the given thread's cpus_allowed mask so that it 329 * will only run on cpus in the given pool. 330 */ 331 static inline void 332 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) 333 { 334 struct svc_pool_map *m = &svc_pool_map; 335 unsigned int node = m->pool_to[pidx]; 336 337 /* 338 * The caller checks for sv_nrpools > 1, which 339 * implies that we've been initialized. 340 */ 341 WARN_ON_ONCE(m->count == 0); 342 if (m->count == 0) 343 return; 344 345 switch (m->mode) { 346 case SVC_POOL_PERCPU: 347 { 348 set_cpus_allowed_ptr(task, cpumask_of(node)); 349 break; 350 } 351 case SVC_POOL_PERNODE: 352 { 353 set_cpus_allowed_ptr(task, cpumask_of_node(node)); 354 break; 355 } 356 } 357 } 358 359 /** 360 * svc_pool_for_cpu - Select pool to run a thread on this cpu 361 * @serv: An RPC service 362 * 363 * Use the active CPU and the svc_pool_map's mode setting to 364 * select the svc thread pool to use. Once initialized, the 365 * svc_pool_map does not change. 366 * 367 * Return value: 368 * A pointer to an svc_pool 369 */ 370 struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv) 371 { 372 struct svc_pool_map *m = &svc_pool_map; 373 int cpu = raw_smp_processor_id(); 374 unsigned int pidx = 0; 375 376 if (serv->sv_nrpools <= 1) 377 return serv->sv_pools; 378 379 switch (m->mode) { 380 case SVC_POOL_PERCPU: 381 pidx = m->to_pool[cpu]; 382 break; 383 case SVC_POOL_PERNODE: 384 pidx = m->to_pool[cpu_to_node(cpu)]; 385 break; 386 } 387 388 return &serv->sv_pools[pidx % serv->sv_nrpools]; 389 } 390 391 int svc_rpcb_setup(struct svc_serv *serv, struct net *net) 392 { 393 int err; 394 395 err = rpcb_create_local(net); 396 if (err) 397 return err; 398 399 /* Remove any stale portmap registrations */ 400 svc_unregister(serv, net); 401 return 0; 402 } 403 EXPORT_SYMBOL_GPL(svc_rpcb_setup); 404 405 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) 406 { 407 svc_unregister(serv, net); 408 rpcb_put_local(net); 409 } 410 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup); 411 412 static int svc_uses_rpcbind(struct svc_serv *serv) 413 { 414 struct svc_program *progp; 415 unsigned int i; 416 417 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 418 for (i = 0; i < progp->pg_nvers; i++) { 419 if (progp->pg_vers[i] == NULL) 420 continue; 421 if (!progp->pg_vers[i]->vs_hidden) 422 return 1; 423 } 424 } 425 426 return 0; 427 } 428 429 int svc_bind(struct svc_serv *serv, struct net *net) 430 { 431 if (!svc_uses_rpcbind(serv)) 432 return 0; 433 return svc_rpcb_setup(serv, net); 434 } 435 EXPORT_SYMBOL_GPL(svc_bind); 436 437 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 438 static void 439 __svc_init_bc(struct svc_serv *serv) 440 { 441 lwq_init(&serv->sv_cb_list); 442 } 443 #else 444 static void 445 __svc_init_bc(struct svc_serv *serv) 446 { 447 } 448 #endif 449 450 /* 451 * Create an RPC service 452 */ 453 static struct svc_serv * 454 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, 455 int (*threadfn)(void *data)) 456 { 457 struct svc_serv *serv; 458 unsigned int vers; 459 unsigned int xdrsize; 460 unsigned int i; 461 462 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 463 return NULL; 464 serv->sv_name = prog->pg_name; 465 serv->sv_program = prog; 466 serv->sv_stats = prog->pg_stats; 467 if (bufsize > RPCSVC_MAXPAYLOAD) 468 bufsize = RPCSVC_MAXPAYLOAD; 469 serv->sv_max_payload = bufsize? bufsize : 4096; 470 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); 471 serv->sv_threadfn = threadfn; 472 xdrsize = 0; 473 while (prog) { 474 prog->pg_lovers = prog->pg_nvers-1; 475 for (vers=0; vers<prog->pg_nvers ; vers++) 476 if (prog->pg_vers[vers]) { 477 prog->pg_hivers = vers; 478 if (prog->pg_lovers > vers) 479 prog->pg_lovers = vers; 480 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 481 xdrsize = prog->pg_vers[vers]->vs_xdrsize; 482 } 483 prog = prog->pg_next; 484 } 485 serv->sv_xdrsize = xdrsize; 486 INIT_LIST_HEAD(&serv->sv_tempsocks); 487 INIT_LIST_HEAD(&serv->sv_permsocks); 488 timer_setup(&serv->sv_temptimer, NULL, 0); 489 spin_lock_init(&serv->sv_lock); 490 491 __svc_init_bc(serv); 492 493 serv->sv_nrpools = npools; 494 serv->sv_pools = 495 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), 496 GFP_KERNEL); 497 if (!serv->sv_pools) { 498 kfree(serv); 499 return NULL; 500 } 501 502 for (i = 0; i < serv->sv_nrpools; i++) { 503 struct svc_pool *pool = &serv->sv_pools[i]; 504 505 dprintk("svc: initialising pool %u for %s\n", 506 i, serv->sv_name); 507 508 pool->sp_id = i; 509 lwq_init(&pool->sp_xprts); 510 INIT_LIST_HEAD(&pool->sp_all_threads); 511 init_llist_head(&pool->sp_idle_threads); 512 513 percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); 514 percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); 515 percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); 516 } 517 518 return serv; 519 } 520 521 /** 522 * svc_create - Create an RPC service 523 * @prog: the RPC program the new service will handle 524 * @bufsize: maximum message size for @prog 525 * @threadfn: a function to service RPC requests for @prog 526 * 527 * Returns an instantiated struct svc_serv object or NULL. 528 */ 529 struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, 530 int (*threadfn)(void *data)) 531 { 532 return __svc_create(prog, bufsize, 1, threadfn); 533 } 534 EXPORT_SYMBOL_GPL(svc_create); 535 536 /** 537 * svc_create_pooled - Create an RPC service with pooled threads 538 * @prog: the RPC program the new service will handle 539 * @bufsize: maximum message size for @prog 540 * @threadfn: a function to service RPC requests for @prog 541 * 542 * Returns an instantiated struct svc_serv object or NULL. 543 */ 544 struct svc_serv *svc_create_pooled(struct svc_program *prog, 545 unsigned int bufsize, 546 int (*threadfn)(void *data)) 547 { 548 struct svc_serv *serv; 549 unsigned int npools = svc_pool_map_get(); 550 551 serv = __svc_create(prog, bufsize, npools, threadfn); 552 if (!serv) 553 goto out_err; 554 return serv; 555 out_err: 556 svc_pool_map_put(npools); 557 return NULL; 558 } 559 EXPORT_SYMBOL_GPL(svc_create_pooled); 560 561 /* 562 * Destroy an RPC service. Should be called with appropriate locking to 563 * protect sv_permsocks and sv_tempsocks. 564 */ 565 void 566 svc_destroy(struct svc_serv **servp) 567 { 568 struct svc_serv *serv = *servp; 569 unsigned int i; 570 571 *servp = NULL; 572 573 dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name); 574 timer_shutdown_sync(&serv->sv_temptimer); 575 576 /* 577 * Remaining transports at this point are not expected. 578 */ 579 WARN_ONCE(!list_empty(&serv->sv_permsocks), 580 "SVC: permsocks remain for %s\n", serv->sv_program->pg_name); 581 WARN_ONCE(!list_empty(&serv->sv_tempsocks), 582 "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name); 583 584 cache_clean_deferred(serv); 585 586 svc_pool_map_put(serv->sv_nrpools); 587 588 for (i = 0; i < serv->sv_nrpools; i++) { 589 struct svc_pool *pool = &serv->sv_pools[i]; 590 591 percpu_counter_destroy(&pool->sp_messages_arrived); 592 percpu_counter_destroy(&pool->sp_sockets_queued); 593 percpu_counter_destroy(&pool->sp_threads_woken); 594 } 595 kfree(serv->sv_pools); 596 kfree(serv); 597 } 598 EXPORT_SYMBOL_GPL(svc_destroy); 599 600 static bool 601 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) 602 { 603 unsigned long pages, ret; 604 605 /* bc_xprt uses fore channel allocated buffers */ 606 if (svc_is_backchannel(rqstp)) 607 return true; 608 609 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 610 * We assume one is at most one page 611 */ 612 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); 613 if (pages > RPCSVC_MAXPAGES) 614 pages = RPCSVC_MAXPAGES; 615 616 ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages, 617 rqstp->rq_pages); 618 return ret == pages; 619 } 620 621 /* 622 * Release an RPC server buffer 623 */ 624 static void 625 svc_release_buffer(struct svc_rqst *rqstp) 626 { 627 unsigned int i; 628 629 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) 630 if (rqstp->rq_pages[i]) 631 put_page(rqstp->rq_pages[i]); 632 } 633 634 struct svc_rqst * 635 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) 636 { 637 struct svc_rqst *rqstp; 638 639 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); 640 if (!rqstp) 641 return rqstp; 642 643 folio_batch_init(&rqstp->rq_fbatch); 644 645 rqstp->rq_server = serv; 646 rqstp->rq_pool = pool; 647 648 rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0); 649 if (!rqstp->rq_scratch_page) 650 goto out_enomem; 651 652 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 653 if (!rqstp->rq_argp) 654 goto out_enomem; 655 656 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 657 if (!rqstp->rq_resp) 658 goto out_enomem; 659 660 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) 661 goto out_enomem; 662 663 return rqstp; 664 out_enomem: 665 svc_rqst_free(rqstp); 666 return NULL; 667 } 668 EXPORT_SYMBOL_GPL(svc_rqst_alloc); 669 670 static struct svc_rqst * 671 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) 672 { 673 struct svc_rqst *rqstp; 674 675 rqstp = svc_rqst_alloc(serv, pool, node); 676 if (!rqstp) 677 return ERR_PTR(-ENOMEM); 678 679 spin_lock_bh(&serv->sv_lock); 680 serv->sv_nrthreads += 1; 681 spin_unlock_bh(&serv->sv_lock); 682 683 atomic_inc(&pool->sp_nrthreads); 684 685 /* Protected by whatever lock the service uses when calling 686 * svc_set_num_threads() 687 */ 688 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); 689 690 return rqstp; 691 } 692 693 /** 694 * svc_pool_wake_idle_thread - Awaken an idle thread in @pool 695 * @pool: service thread pool 696 * 697 * Can be called from soft IRQ or process context. Finding an idle 698 * service thread and marking it BUSY is atomic with respect to 699 * other calls to svc_pool_wake_idle_thread(). 700 * 701 */ 702 void svc_pool_wake_idle_thread(struct svc_pool *pool) 703 { 704 struct svc_rqst *rqstp; 705 struct llist_node *ln; 706 707 rcu_read_lock(); 708 ln = READ_ONCE(pool->sp_idle_threads.first); 709 if (ln) { 710 rqstp = llist_entry(ln, struct svc_rqst, rq_idle); 711 WRITE_ONCE(rqstp->rq_qtime, ktime_get()); 712 if (!task_is_running(rqstp->rq_task)) { 713 wake_up_process(rqstp->rq_task); 714 trace_svc_wake_up(rqstp->rq_task->pid); 715 percpu_counter_inc(&pool->sp_threads_woken); 716 } 717 rcu_read_unlock(); 718 return; 719 } 720 rcu_read_unlock(); 721 722 } 723 EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread); 724 725 static struct svc_pool * 726 svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 727 { 728 return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 729 } 730 731 static struct svc_pool * 732 svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, 733 unsigned int *state) 734 { 735 struct svc_pool *pool; 736 unsigned int i; 737 738 retry: 739 pool = target_pool; 740 741 if (pool != NULL) { 742 if (atomic_inc_not_zero(&pool->sp_nrthreads)) 743 goto found_pool; 744 return NULL; 745 } else { 746 for (i = 0; i < serv->sv_nrpools; i++) { 747 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 748 if (atomic_inc_not_zero(&pool->sp_nrthreads)) 749 goto found_pool; 750 } 751 return NULL; 752 } 753 754 found_pool: 755 set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); 756 set_bit(SP_NEED_VICTIM, &pool->sp_flags); 757 if (!atomic_dec_and_test(&pool->sp_nrthreads)) 758 return pool; 759 /* Nothing left in this pool any more */ 760 clear_bit(SP_NEED_VICTIM, &pool->sp_flags); 761 clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags); 762 goto retry; 763 } 764 765 static int 766 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 767 { 768 struct svc_rqst *rqstp; 769 struct task_struct *task; 770 struct svc_pool *chosen_pool; 771 unsigned int state = serv->sv_nrthreads-1; 772 int node; 773 774 do { 775 nrservs--; 776 chosen_pool = svc_pool_next(serv, pool, &state); 777 node = svc_pool_map_get_node(chosen_pool->sp_id); 778 779 rqstp = svc_prepare_thread(serv, chosen_pool, node); 780 if (IS_ERR(rqstp)) 781 return PTR_ERR(rqstp); 782 task = kthread_create_on_node(serv->sv_threadfn, rqstp, 783 node, "%s", serv->sv_name); 784 if (IS_ERR(task)) { 785 svc_exit_thread(rqstp); 786 return PTR_ERR(task); 787 } 788 789 rqstp->rq_task = task; 790 if (serv->sv_nrpools > 1) 791 svc_pool_map_set_cpumask(task, chosen_pool->sp_id); 792 793 svc_sock_update_bufs(serv); 794 wake_up_process(task); 795 } while (nrservs > 0); 796 797 return 0; 798 } 799 800 static int 801 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 802 { 803 unsigned int state = serv->sv_nrthreads-1; 804 struct svc_pool *victim; 805 806 do { 807 victim = svc_pool_victim(serv, pool, &state); 808 if (!victim) 809 break; 810 svc_pool_wake_idle_thread(victim); 811 wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS, 812 TASK_IDLE); 813 nrservs++; 814 } while (nrservs < 0); 815 return 0; 816 } 817 818 /** 819 * svc_set_num_threads - adjust number of threads per RPC service 820 * @serv: RPC service to adjust 821 * @pool: Specific pool from which to choose threads, or NULL 822 * @nrservs: New number of threads for @serv (0 or less means kill all threads) 823 * 824 * Create or destroy threads to make the number of threads for @serv the 825 * given number. If @pool is non-NULL, change only threads in that pool; 826 * otherwise, round-robin between all pools for @serv. @serv's 827 * sv_nrthreads is adjusted for each thread created or destroyed. 828 * 829 * Caller must ensure mutual exclusion between this and server startup or 830 * shutdown. 831 * 832 * Returns zero on success or a negative errno if an error occurred while 833 * starting a thread. 834 */ 835 int 836 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 837 { 838 if (!pool) 839 nrservs -= serv->sv_nrthreads; 840 else 841 nrservs -= atomic_read(&pool->sp_nrthreads); 842 843 if (nrservs > 0) 844 return svc_start_kthreads(serv, pool, nrservs); 845 if (nrservs < 0) 846 return svc_stop_kthreads(serv, pool, nrservs); 847 return 0; 848 } 849 EXPORT_SYMBOL_GPL(svc_set_num_threads); 850 851 /** 852 * svc_rqst_replace_page - Replace one page in rq_pages[] 853 * @rqstp: svc_rqst with pages to replace 854 * @page: replacement page 855 * 856 * When replacing a page in rq_pages, batch the release of the 857 * replaced pages to avoid hammering the page allocator. 858 * 859 * Return values: 860 * %true: page replaced 861 * %false: array bounds checking failed 862 */ 863 bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) 864 { 865 struct page **begin = rqstp->rq_pages; 866 struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES]; 867 868 if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) { 869 trace_svc_replace_page_err(rqstp); 870 return false; 871 } 872 873 if (*rqstp->rq_next_page) { 874 if (!folio_batch_add(&rqstp->rq_fbatch, 875 page_folio(*rqstp->rq_next_page))) 876 __folio_batch_release(&rqstp->rq_fbatch); 877 } 878 879 get_page(page); 880 *(rqstp->rq_next_page++) = page; 881 return true; 882 } 883 EXPORT_SYMBOL_GPL(svc_rqst_replace_page); 884 885 /** 886 * svc_rqst_release_pages - Release Reply buffer pages 887 * @rqstp: RPC transaction context 888 * 889 * Release response pages that might still be in flight after 890 * svc_send, and any spliced filesystem-owned pages. 891 */ 892 void svc_rqst_release_pages(struct svc_rqst *rqstp) 893 { 894 int i, count = rqstp->rq_next_page - rqstp->rq_respages; 895 896 if (count) { 897 release_pages(rqstp->rq_respages, count); 898 for (i = 0; i < count; i++) 899 rqstp->rq_respages[i] = NULL; 900 } 901 } 902 903 /* 904 * Called from a server thread as it's exiting. Caller must hold the "service 905 * mutex" for the service. 906 */ 907 void 908 svc_rqst_free(struct svc_rqst *rqstp) 909 { 910 folio_batch_release(&rqstp->rq_fbatch); 911 svc_release_buffer(rqstp); 912 if (rqstp->rq_scratch_page) 913 put_page(rqstp->rq_scratch_page); 914 kfree(rqstp->rq_resp); 915 kfree(rqstp->rq_argp); 916 kfree(rqstp->rq_auth_data); 917 kfree_rcu(rqstp, rq_rcu_head); 918 } 919 EXPORT_SYMBOL_GPL(svc_rqst_free); 920 921 void 922 svc_exit_thread(struct svc_rqst *rqstp) 923 { 924 struct svc_serv *serv = rqstp->rq_server; 925 struct svc_pool *pool = rqstp->rq_pool; 926 927 list_del_rcu(&rqstp->rq_all); 928 929 atomic_dec(&pool->sp_nrthreads); 930 931 spin_lock_bh(&serv->sv_lock); 932 serv->sv_nrthreads -= 1; 933 spin_unlock_bh(&serv->sv_lock); 934 svc_sock_update_bufs(serv); 935 936 svc_rqst_free(rqstp); 937 938 clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags); 939 } 940 EXPORT_SYMBOL_GPL(svc_exit_thread); 941 942 /* 943 * Register an "inet" protocol family netid with the local 944 * rpcbind daemon via an rpcbind v4 SET request. 945 * 946 * No netconfig infrastructure is available in the kernel, so 947 * we map IP_ protocol numbers to netids by hand. 948 * 949 * Returns zero on success; a negative errno value is returned 950 * if any error occurs. 951 */ 952 static int __svc_rpcb_register4(struct net *net, const u32 program, 953 const u32 version, 954 const unsigned short protocol, 955 const unsigned short port) 956 { 957 const struct sockaddr_in sin = { 958 .sin_family = AF_INET, 959 .sin_addr.s_addr = htonl(INADDR_ANY), 960 .sin_port = htons(port), 961 }; 962 const char *netid; 963 int error; 964 965 switch (protocol) { 966 case IPPROTO_UDP: 967 netid = RPCBIND_NETID_UDP; 968 break; 969 case IPPROTO_TCP: 970 netid = RPCBIND_NETID_TCP; 971 break; 972 default: 973 return -ENOPROTOOPT; 974 } 975 976 error = rpcb_v4_register(net, program, version, 977 (const struct sockaddr *)&sin, netid); 978 979 /* 980 * User space didn't support rpcbind v4, so retry this 981 * registration request with the legacy rpcbind v2 protocol. 982 */ 983 if (error == -EPROTONOSUPPORT) 984 error = rpcb_register(net, program, version, protocol, port); 985 986 return error; 987 } 988 989 #if IS_ENABLED(CONFIG_IPV6) 990 /* 991 * Register an "inet6" protocol family netid with the local 992 * rpcbind daemon via an rpcbind v4 SET request. 993 * 994 * No netconfig infrastructure is available in the kernel, so 995 * we map IP_ protocol numbers to netids by hand. 996 * 997 * Returns zero on success; a negative errno value is returned 998 * if any error occurs. 999 */ 1000 static int __svc_rpcb_register6(struct net *net, const u32 program, 1001 const u32 version, 1002 const unsigned short protocol, 1003 const unsigned short port) 1004 { 1005 const struct sockaddr_in6 sin6 = { 1006 .sin6_family = AF_INET6, 1007 .sin6_addr = IN6ADDR_ANY_INIT, 1008 .sin6_port = htons(port), 1009 }; 1010 const char *netid; 1011 int error; 1012 1013 switch (protocol) { 1014 case IPPROTO_UDP: 1015 netid = RPCBIND_NETID_UDP6; 1016 break; 1017 case IPPROTO_TCP: 1018 netid = RPCBIND_NETID_TCP6; 1019 break; 1020 default: 1021 return -ENOPROTOOPT; 1022 } 1023 1024 error = rpcb_v4_register(net, program, version, 1025 (const struct sockaddr *)&sin6, netid); 1026 1027 /* 1028 * User space didn't support rpcbind version 4, so we won't 1029 * use a PF_INET6 listener. 1030 */ 1031 if (error == -EPROTONOSUPPORT) 1032 error = -EAFNOSUPPORT; 1033 1034 return error; 1035 } 1036 #endif /* IS_ENABLED(CONFIG_IPV6) */ 1037 1038 /* 1039 * Register a kernel RPC service via rpcbind version 4. 1040 * 1041 * Returns zero on success; a negative errno value is returned 1042 * if any error occurs. 1043 */ 1044 static int __svc_register(struct net *net, const char *progname, 1045 const u32 program, const u32 version, 1046 const int family, 1047 const unsigned short protocol, 1048 const unsigned short port) 1049 { 1050 int error = -EAFNOSUPPORT; 1051 1052 switch (family) { 1053 case PF_INET: 1054 error = __svc_rpcb_register4(net, program, version, 1055 protocol, port); 1056 break; 1057 #if IS_ENABLED(CONFIG_IPV6) 1058 case PF_INET6: 1059 error = __svc_rpcb_register6(net, program, version, 1060 protocol, port); 1061 #endif 1062 } 1063 1064 trace_svc_register(progname, version, family, protocol, port, error); 1065 return error; 1066 } 1067 1068 int svc_rpcbind_set_version(struct net *net, 1069 const struct svc_program *progp, 1070 u32 version, int family, 1071 unsigned short proto, 1072 unsigned short port) 1073 { 1074 return __svc_register(net, progp->pg_name, progp->pg_prog, 1075 version, family, proto, port); 1076 1077 } 1078 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version); 1079 1080 int svc_generic_rpcbind_set(struct net *net, 1081 const struct svc_program *progp, 1082 u32 version, int family, 1083 unsigned short proto, 1084 unsigned short port) 1085 { 1086 const struct svc_version *vers = progp->pg_vers[version]; 1087 int error; 1088 1089 if (vers == NULL) 1090 return 0; 1091 1092 if (vers->vs_hidden) { 1093 trace_svc_noregister(progp->pg_name, version, proto, 1094 port, family, 0); 1095 return 0; 1096 } 1097 1098 /* 1099 * Don't register a UDP port if we need congestion 1100 * control. 1101 */ 1102 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) 1103 return 0; 1104 1105 error = svc_rpcbind_set_version(net, progp, version, 1106 family, proto, port); 1107 1108 return (vers->vs_rpcb_optnl) ? 0 : error; 1109 } 1110 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set); 1111 1112 /** 1113 * svc_register - register an RPC service with the local portmapper 1114 * @serv: svc_serv struct for the service to register 1115 * @net: net namespace for the service to register 1116 * @family: protocol family of service's listener socket 1117 * @proto: transport protocol number to advertise 1118 * @port: port to advertise 1119 * 1120 * Service is registered for any address in the passed-in protocol family 1121 */ 1122 int svc_register(const struct svc_serv *serv, struct net *net, 1123 const int family, const unsigned short proto, 1124 const unsigned short port) 1125 { 1126 struct svc_program *progp; 1127 unsigned int i; 1128 int error = 0; 1129 1130 WARN_ON_ONCE(proto == 0 && port == 0); 1131 if (proto == 0 && port == 0) 1132 return -EINVAL; 1133 1134 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 1135 for (i = 0; i < progp->pg_nvers; i++) { 1136 1137 error = progp->pg_rpcbind_set(net, progp, i, 1138 family, proto, port); 1139 if (error < 0) { 1140 printk(KERN_WARNING "svc: failed to register " 1141 "%sv%u RPC service (errno %d).\n", 1142 progp->pg_name, i, -error); 1143 break; 1144 } 1145 } 1146 } 1147 1148 return error; 1149 } 1150 1151 /* 1152 * If user space is running rpcbind, it should take the v4 UNSET 1153 * and clear everything for this [program, version]. If user space 1154 * is running portmap, it will reject the v4 UNSET, but won't have 1155 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient 1156 * in this case to clear all existing entries for [program, version]. 1157 */ 1158 static void __svc_unregister(struct net *net, const u32 program, const u32 version, 1159 const char *progname) 1160 { 1161 int error; 1162 1163 error = rpcb_v4_register(net, program, version, NULL, ""); 1164 1165 /* 1166 * User space didn't support rpcbind v4, so retry this 1167 * request with the legacy rpcbind v2 protocol. 1168 */ 1169 if (error == -EPROTONOSUPPORT) 1170 error = rpcb_register(net, program, version, 0, 0); 1171 1172 trace_svc_unregister(progname, version, error); 1173 } 1174 1175 /* 1176 * All netids, bind addresses and ports registered for [program, version] 1177 * are removed from the local rpcbind database (if the service is not 1178 * hidden) to make way for a new instance of the service. 1179 * 1180 * The result of unregistration is reported via dprintk for those who want 1181 * verification of the result, but is otherwise not important. 1182 */ 1183 static void svc_unregister(const struct svc_serv *serv, struct net *net) 1184 { 1185 struct sighand_struct *sighand; 1186 struct svc_program *progp; 1187 unsigned long flags; 1188 unsigned int i; 1189 1190 clear_thread_flag(TIF_SIGPENDING); 1191 1192 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 1193 for (i = 0; i < progp->pg_nvers; i++) { 1194 if (progp->pg_vers[i] == NULL) 1195 continue; 1196 if (progp->pg_vers[i]->vs_hidden) 1197 continue; 1198 __svc_unregister(net, progp->pg_prog, i, progp->pg_name); 1199 } 1200 } 1201 1202 rcu_read_lock(); 1203 sighand = rcu_dereference(current->sighand); 1204 spin_lock_irqsave(&sighand->siglock, flags); 1205 recalc_sigpending(); 1206 spin_unlock_irqrestore(&sighand->siglock, flags); 1207 rcu_read_unlock(); 1208 } 1209 1210 /* 1211 * dprintk the given error with the address of the client that caused it. 1212 */ 1213 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 1214 static __printf(2, 3) 1215 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 1216 { 1217 struct va_format vaf; 1218 va_list args; 1219 char buf[RPC_MAX_ADDRBUFLEN]; 1220 1221 va_start(args, fmt); 1222 1223 vaf.fmt = fmt; 1224 vaf.va = &args; 1225 1226 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); 1227 1228 va_end(args); 1229 } 1230 #else 1231 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} 1232 #endif 1233 1234 __be32 1235 svc_generic_init_request(struct svc_rqst *rqstp, 1236 const struct svc_program *progp, 1237 struct svc_process_info *ret) 1238 { 1239 const struct svc_version *versp = NULL; /* compiler food */ 1240 const struct svc_procedure *procp = NULL; 1241 1242 if (rqstp->rq_vers >= progp->pg_nvers ) 1243 goto err_bad_vers; 1244 versp = progp->pg_vers[rqstp->rq_vers]; 1245 if (!versp) 1246 goto err_bad_vers; 1247 1248 /* 1249 * Some protocol versions (namely NFSv4) require some form of 1250 * congestion control. (See RFC 7530 section 3.1 paragraph 2) 1251 * In other words, UDP is not allowed. We mark those when setting 1252 * up the svc_xprt, and verify that here. 1253 * 1254 * The spec is not very clear about what error should be returned 1255 * when someone tries to access a server that is listening on UDP 1256 * for lower versions. RPC_PROG_MISMATCH seems to be the closest 1257 * fit. 1258 */ 1259 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && 1260 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) 1261 goto err_bad_vers; 1262 1263 if (rqstp->rq_proc >= versp->vs_nproc) 1264 goto err_bad_proc; 1265 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc]; 1266 if (!procp) 1267 goto err_bad_proc; 1268 1269 /* Initialize storage for argp and resp */ 1270 memset(rqstp->rq_argp, 0, procp->pc_argzero); 1271 memset(rqstp->rq_resp, 0, procp->pc_ressize); 1272 1273 /* Bump per-procedure stats counter */ 1274 this_cpu_inc(versp->vs_count[rqstp->rq_proc]); 1275 1276 ret->dispatch = versp->vs_dispatch; 1277 return rpc_success; 1278 err_bad_vers: 1279 ret->mismatch.lovers = progp->pg_lovers; 1280 ret->mismatch.hivers = progp->pg_hivers; 1281 return rpc_prog_mismatch; 1282 err_bad_proc: 1283 return rpc_proc_unavail; 1284 } 1285 EXPORT_SYMBOL_GPL(svc_generic_init_request); 1286 1287 /* 1288 * Common routine for processing the RPC request. 1289 */ 1290 static int 1291 svc_process_common(struct svc_rqst *rqstp) 1292 { 1293 struct xdr_stream *xdr = &rqstp->rq_res_stream; 1294 struct svc_program *progp; 1295 const struct svc_procedure *procp = NULL; 1296 struct svc_serv *serv = rqstp->rq_server; 1297 struct svc_process_info process; 1298 enum svc_auth_status auth_res; 1299 unsigned int aoffset; 1300 int rc; 1301 __be32 *p; 1302 1303 /* Will be turned off only when NFSv4 Sessions are used */ 1304 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); 1305 clear_bit(RQ_DROPME, &rqstp->rq_flags); 1306 1307 /* Construct the first words of the reply: */ 1308 svcxdr_init_encode(rqstp); 1309 xdr_stream_encode_be32(xdr, rqstp->rq_xid); 1310 xdr_stream_encode_be32(xdr, rpc_reply); 1311 1312 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4); 1313 if (unlikely(!p)) 1314 goto err_short_len; 1315 if (*p++ != cpu_to_be32(RPC_VERSION)) 1316 goto err_bad_rpc; 1317 1318 xdr_stream_encode_be32(xdr, rpc_msg_accepted); 1319 1320 rqstp->rq_prog = be32_to_cpup(p++); 1321 rqstp->rq_vers = be32_to_cpup(p++); 1322 rqstp->rq_proc = be32_to_cpup(p); 1323 1324 for (progp = serv->sv_program; progp; progp = progp->pg_next) 1325 if (rqstp->rq_prog == progp->pg_prog) 1326 break; 1327 1328 /* 1329 * Decode auth data, and add verifier to reply buffer. 1330 * We do this before anything else in order to get a decent 1331 * auth verifier. 1332 */ 1333 auth_res = svc_authenticate(rqstp); 1334 /* Also give the program a chance to reject this call: */ 1335 if (auth_res == SVC_OK && progp) 1336 auth_res = progp->pg_authenticate(rqstp); 1337 trace_svc_authenticate(rqstp, auth_res); 1338 switch (auth_res) { 1339 case SVC_OK: 1340 break; 1341 case SVC_GARBAGE: 1342 goto err_garbage_args; 1343 case SVC_SYSERR: 1344 goto err_system_err; 1345 case SVC_DENIED: 1346 goto err_bad_auth; 1347 case SVC_CLOSE: 1348 goto close; 1349 case SVC_DROP: 1350 goto dropit; 1351 case SVC_COMPLETE: 1352 goto sendit; 1353 default: 1354 pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res); 1355 goto err_system_err; 1356 } 1357 1358 if (progp == NULL) 1359 goto err_bad_prog; 1360 1361 switch (progp->pg_init_request(rqstp, progp, &process)) { 1362 case rpc_success: 1363 break; 1364 case rpc_prog_unavail: 1365 goto err_bad_prog; 1366 case rpc_prog_mismatch: 1367 goto err_bad_vers; 1368 case rpc_proc_unavail: 1369 goto err_bad_proc; 1370 } 1371 1372 procp = rqstp->rq_procinfo; 1373 /* Should this check go into the dispatcher? */ 1374 if (!procp || !procp->pc_func) 1375 goto err_bad_proc; 1376 1377 /* Syntactic check complete */ 1378 serv->sv_stats->rpccnt++; 1379 trace_svc_process(rqstp, progp->pg_name); 1380 1381 aoffset = xdr_stream_pos(xdr); 1382 1383 /* un-reserve some of the out-queue now that we have a 1384 * better idea of reply size 1385 */ 1386 if (procp->pc_xdrressize) 1387 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); 1388 1389 /* Call the function that processes the request. */ 1390 rc = process.dispatch(rqstp); 1391 if (procp->pc_release) 1392 procp->pc_release(rqstp); 1393 xdr_finish_decode(xdr); 1394 1395 if (!rc) 1396 goto dropit; 1397 if (rqstp->rq_auth_stat != rpc_auth_ok) 1398 goto err_bad_auth; 1399 1400 if (*rqstp->rq_accept_statp != rpc_success) 1401 xdr_truncate_encode(xdr, aoffset); 1402 1403 if (procp->pc_encode == NULL) 1404 goto dropit; 1405 1406 sendit: 1407 if (svc_authorise(rqstp)) 1408 goto close_xprt; 1409 return 1; /* Caller can now send it */ 1410 1411 dropit: 1412 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1413 dprintk("svc: svc_process dropit\n"); 1414 return 0; 1415 1416 close: 1417 svc_authorise(rqstp); 1418 close_xprt: 1419 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) 1420 svc_xprt_close(rqstp->rq_xprt); 1421 dprintk("svc: svc_process close\n"); 1422 return 0; 1423 1424 err_short_len: 1425 svc_printk(rqstp, "short len %u, dropping request\n", 1426 rqstp->rq_arg.len); 1427 goto close_xprt; 1428 1429 err_bad_rpc: 1430 serv->sv_stats->rpcbadfmt++; 1431 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); 1432 xdr_stream_encode_u32(xdr, RPC_MISMATCH); 1433 /* Only RPCv2 supported */ 1434 xdr_stream_encode_u32(xdr, RPC_VERSION); 1435 xdr_stream_encode_u32(xdr, RPC_VERSION); 1436 return 1; /* don't wrap */ 1437 1438 err_bad_auth: 1439 dprintk("svc: authentication failed (%d)\n", 1440 be32_to_cpu(rqstp->rq_auth_stat)); 1441 serv->sv_stats->rpcbadauth++; 1442 /* Restore write pointer to location of reply status: */ 1443 xdr_truncate_encode(xdr, XDR_UNIT * 2); 1444 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); 1445 xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR); 1446 xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat); 1447 goto sendit; 1448 1449 err_bad_prog: 1450 dprintk("svc: unknown program %d\n", rqstp->rq_prog); 1451 serv->sv_stats->rpcbadfmt++; 1452 *rqstp->rq_accept_statp = rpc_prog_unavail; 1453 goto sendit; 1454 1455 err_bad_vers: 1456 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", 1457 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); 1458 1459 serv->sv_stats->rpcbadfmt++; 1460 *rqstp->rq_accept_statp = rpc_prog_mismatch; 1461 1462 /* 1463 * svc_authenticate() has already added the verifier and 1464 * advanced the stream just past rq_accept_statp. 1465 */ 1466 xdr_stream_encode_u32(xdr, process.mismatch.lovers); 1467 xdr_stream_encode_u32(xdr, process.mismatch.hivers); 1468 goto sendit; 1469 1470 err_bad_proc: 1471 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); 1472 1473 serv->sv_stats->rpcbadfmt++; 1474 *rqstp->rq_accept_statp = rpc_proc_unavail; 1475 goto sendit; 1476 1477 err_garbage_args: 1478 svc_printk(rqstp, "failed to decode RPC header\n"); 1479 1480 serv->sv_stats->rpcbadfmt++; 1481 *rqstp->rq_accept_statp = rpc_garbage_args; 1482 goto sendit; 1483 1484 err_system_err: 1485 serv->sv_stats->rpcbadfmt++; 1486 *rqstp->rq_accept_statp = rpc_system_err; 1487 goto sendit; 1488 } 1489 1490 /** 1491 * svc_process - Execute one RPC transaction 1492 * @rqstp: RPC transaction context 1493 * 1494 */ 1495 void svc_process(struct svc_rqst *rqstp) 1496 { 1497 struct kvec *resv = &rqstp->rq_res.head[0]; 1498 __be32 *p; 1499 1500 #if IS_ENABLED(CONFIG_FAIL_SUNRPC) 1501 if (!fail_sunrpc.ignore_server_disconnect && 1502 should_fail(&fail_sunrpc.attr, 1)) 1503 svc_xprt_deferred_close(rqstp->rq_xprt); 1504 #endif 1505 1506 /* 1507 * Setup response xdr_buf. 1508 * Initially it has just one page 1509 */ 1510 rqstp->rq_next_page = &rqstp->rq_respages[1]; 1511 resv->iov_base = page_address(rqstp->rq_respages[0]); 1512 resv->iov_len = 0; 1513 rqstp->rq_res.pages = rqstp->rq_next_page; 1514 rqstp->rq_res.len = 0; 1515 rqstp->rq_res.page_base = 0; 1516 rqstp->rq_res.page_len = 0; 1517 rqstp->rq_res.buflen = PAGE_SIZE; 1518 rqstp->rq_res.tail[0].iov_base = NULL; 1519 rqstp->rq_res.tail[0].iov_len = 0; 1520 1521 svcxdr_init_decode(rqstp); 1522 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2); 1523 if (unlikely(!p)) 1524 goto out_drop; 1525 rqstp->rq_xid = *p++; 1526 if (unlikely(*p != rpc_call)) 1527 goto out_baddir; 1528 1529 if (!svc_process_common(rqstp)) 1530 goto out_drop; 1531 svc_send(rqstp); 1532 return; 1533 1534 out_baddir: 1535 svc_printk(rqstp, "bad direction 0x%08x, dropping request\n", 1536 be32_to_cpu(*p)); 1537 rqstp->rq_server->sv_stats->rpcbadfmt++; 1538 out_drop: 1539 svc_drop(rqstp); 1540 } 1541 1542 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1543 /** 1544 * svc_process_bc - process a reverse-direction RPC request 1545 * @req: RPC request to be used for client-side processing 1546 * @rqstp: server-side execution context 1547 * 1548 */ 1549 void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp) 1550 { 1551 struct rpc_task *task; 1552 int proc_error; 1553 struct rpc_timeout timeout; 1554 1555 /* Build the svc_rqst used by the common processing routine */ 1556 rqstp->rq_xid = req->rq_xid; 1557 rqstp->rq_prot = req->rq_xprt->prot; 1558 rqstp->rq_bc_net = req->rq_xprt->xprt_net; 1559 1560 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); 1561 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); 1562 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1563 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1564 1565 /* Adjust the argument buffer length */ 1566 rqstp->rq_arg.len = req->rq_private_buf.len; 1567 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { 1568 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; 1569 rqstp->rq_arg.page_len = 0; 1570 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + 1571 rqstp->rq_arg.page_len) 1572 rqstp->rq_arg.page_len = rqstp->rq_arg.len - 1573 rqstp->rq_arg.head[0].iov_len; 1574 else 1575 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + 1576 rqstp->rq_arg.page_len; 1577 1578 /* Reset the response buffer */ 1579 rqstp->rq_res.head[0].iov_len = 0; 1580 1581 /* 1582 * Skip the XID and calldir fields because they've already 1583 * been processed by the caller. 1584 */ 1585 svcxdr_init_decode(rqstp); 1586 if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) 1587 return; 1588 1589 /* Parse and execute the bc call */ 1590 proc_error = svc_process_common(rqstp); 1591 1592 atomic_dec(&req->rq_xprt->bc_slot_count); 1593 if (!proc_error) { 1594 /* Processing error: drop the request */ 1595 xprt_free_bc_request(req); 1596 return; 1597 } 1598 /* Finally, send the reply synchronously */ 1599 if (rqstp->bc_to_initval > 0) { 1600 timeout.to_initval = rqstp->bc_to_initval; 1601 timeout.to_retries = rqstp->bc_to_initval; 1602 } else { 1603 timeout.to_initval = req->rq_xprt->timeout->to_initval; 1604 timeout.to_initval = req->rq_xprt->timeout->to_retries; 1605 } 1606 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); 1607 task = rpc_run_bc_task(req, &timeout); 1608 1609 if (IS_ERR(task)) 1610 return; 1611 1612 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); 1613 rpc_put_task(task); 1614 } 1615 EXPORT_SYMBOL_GPL(svc_process_bc); 1616 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1617 1618 /** 1619 * svc_max_payload - Return transport-specific limit on the RPC payload 1620 * @rqstp: RPC transaction context 1621 * 1622 * Returns the maximum number of payload bytes the current transport 1623 * allows. 1624 */ 1625 u32 svc_max_payload(const struct svc_rqst *rqstp) 1626 { 1627 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; 1628 1629 if (rqstp->rq_server->sv_max_payload < max) 1630 max = rqstp->rq_server->sv_max_payload; 1631 return max; 1632 } 1633 EXPORT_SYMBOL_GPL(svc_max_payload); 1634 1635 /** 1636 * svc_proc_name - Return RPC procedure name in string form 1637 * @rqstp: svc_rqst to operate on 1638 * 1639 * Return value: 1640 * Pointer to a NUL-terminated string 1641 */ 1642 const char *svc_proc_name(const struct svc_rqst *rqstp) 1643 { 1644 if (rqstp && rqstp->rq_procinfo) 1645 return rqstp->rq_procinfo->pc_name; 1646 return "unknown"; 1647 } 1648 1649 1650 /** 1651 * svc_encode_result_payload - mark a range of bytes as a result payload 1652 * @rqstp: svc_rqst to operate on 1653 * @offset: payload's byte offset in rqstp->rq_res 1654 * @length: size of payload, in bytes 1655 * 1656 * Returns zero on success, or a negative errno if a permanent 1657 * error occurred. 1658 */ 1659 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, 1660 unsigned int length) 1661 { 1662 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset, 1663 length); 1664 } 1665 EXPORT_SYMBOL_GPL(svc_encode_result_payload); 1666 1667 /** 1668 * svc_fill_write_vector - Construct data argument for VFS write call 1669 * @rqstp: svc_rqst to operate on 1670 * @payload: xdr_buf containing only the write data payload 1671 * 1672 * Fills in rqstp::rq_vec, and returns the number of elements. 1673 */ 1674 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, 1675 struct xdr_buf *payload) 1676 { 1677 struct page **pages = payload->pages; 1678 struct kvec *first = payload->head; 1679 struct kvec *vec = rqstp->rq_vec; 1680 size_t total = payload->len; 1681 unsigned int i; 1682 1683 /* Some types of transport can present the write payload 1684 * entirely in rq_arg.pages. In this case, @first is empty. 1685 */ 1686 i = 0; 1687 if (first->iov_len) { 1688 vec[i].iov_base = first->iov_base; 1689 vec[i].iov_len = min_t(size_t, total, first->iov_len); 1690 total -= vec[i].iov_len; 1691 ++i; 1692 } 1693 1694 while (total) { 1695 vec[i].iov_base = page_address(*pages); 1696 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE); 1697 total -= vec[i].iov_len; 1698 ++i; 1699 ++pages; 1700 } 1701 1702 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec)); 1703 return i; 1704 } 1705 EXPORT_SYMBOL_GPL(svc_fill_write_vector); 1706 1707 /** 1708 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call 1709 * @rqstp: svc_rqst to operate on 1710 * @first: buffer containing first section of pathname 1711 * @p: buffer containing remaining section of pathname 1712 * @total: total length of the pathname argument 1713 * 1714 * The VFS symlink API demands a NUL-terminated pathname in mapped memory. 1715 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free 1716 * the returned string. 1717 */ 1718 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, 1719 void *p, size_t total) 1720 { 1721 size_t len, remaining; 1722 char *result, *dst; 1723 1724 result = kmalloc(total + 1, GFP_KERNEL); 1725 if (!result) 1726 return ERR_PTR(-ESERVERFAULT); 1727 1728 dst = result; 1729 remaining = total; 1730 1731 len = min_t(size_t, total, first->iov_len); 1732 if (len) { 1733 memcpy(dst, first->iov_base, len); 1734 dst += len; 1735 remaining -= len; 1736 } 1737 1738 if (remaining) { 1739 len = min_t(size_t, remaining, PAGE_SIZE); 1740 memcpy(dst, p, len); 1741 dst += len; 1742 } 1743 1744 *dst = '\0'; 1745 1746 /* Sanity check: Linux doesn't allow the pathname argument to 1747 * contain a NUL byte. 1748 */ 1749 if (strlen(result) != total) { 1750 kfree(result); 1751 return ERR_PTR(-EINVAL); 1752 } 1753 return result; 1754 } 1755 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname); 1756