1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/svc.c 4 * 5 * High-level RPC service routines 6 * 7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 * 9 * Multiple threads pools and NUMAisation 10 * Copyright (c) 2006 Silicon Graphics, Inc. 11 * by Greg Banks <gnb@melbourne.sgi.com> 12 */ 13 14 #include <linux/linkage.h> 15 #include <linux/sched/signal.h> 16 #include <linux/errno.h> 17 #include <linux/net.h> 18 #include <linux/in.h> 19 #include <linux/mm.h> 20 #include <linux/interrupt.h> 21 #include <linux/module.h> 22 #include <linux/kthread.h> 23 #include <linux/slab.h> 24 25 #include <linux/sunrpc/types.h> 26 #include <linux/sunrpc/xdr.h> 27 #include <linux/sunrpc/stats.h> 28 #include <linux/sunrpc/svcsock.h> 29 #include <linux/sunrpc/clnt.h> 30 #include <linux/sunrpc/bc_xprt.h> 31 32 #include <trace/events/sunrpc.h> 33 34 #include "fail.h" 35 #include "sunrpc.h" 36 37 #define RPCDBG_FACILITY RPCDBG_SVCDSP 38 39 static void svc_unregister(const struct svc_serv *serv, struct net *net); 40 41 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL 42 43 /* 44 * Mode for mapping cpus to pools. 45 */ 46 enum { 47 SVC_POOL_AUTO = -1, /* choose one of the others */ 48 SVC_POOL_GLOBAL, /* no mapping, just a single global pool 49 * (legacy & UP mode) */ 50 SVC_POOL_PERCPU, /* one pool per cpu */ 51 SVC_POOL_PERNODE /* one pool per numa node */ 52 }; 53 54 /* 55 * Structure for mapping cpus to pools and vice versa. 56 * Setup once during sunrpc initialisation. 57 */ 58 59 struct svc_pool_map { 60 int count; /* How many svc_servs use us */ 61 int mode; /* Note: int not enum to avoid 62 * warnings about "enumeration value 63 * not handled in switch" */ 64 unsigned int npools; 65 unsigned int *pool_to; /* maps pool id to cpu or node */ 66 unsigned int *to_pool; /* maps cpu or node to pool id */ 67 }; 68 69 static struct svc_pool_map svc_pool_map = { 70 .mode = SVC_POOL_DEFAULT 71 }; 72 73 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ 74 75 static int 76 __param_set_pool_mode(const char *val, struct svc_pool_map *m) 77 { 78 int err, mode; 79 80 mutex_lock(&svc_pool_map_mutex); 81 82 err = 0; 83 if (!strncmp(val, "auto", 4)) 84 mode = SVC_POOL_AUTO; 85 else if (!strncmp(val, "global", 6)) 86 mode = SVC_POOL_GLOBAL; 87 else if (!strncmp(val, "percpu", 6)) 88 mode = SVC_POOL_PERCPU; 89 else if (!strncmp(val, "pernode", 7)) 90 mode = SVC_POOL_PERNODE; 91 else 92 err = -EINVAL; 93 94 if (err) 95 goto out; 96 97 if (m->count == 0) 98 m->mode = mode; 99 else if (mode != m->mode) 100 err = -EBUSY; 101 out: 102 mutex_unlock(&svc_pool_map_mutex); 103 return err; 104 } 105 106 static int 107 param_set_pool_mode(const char *val, const struct kernel_param *kp) 108 { 109 struct svc_pool_map *m = kp->arg; 110 111 return __param_set_pool_mode(val, m); 112 } 113 114 int sunrpc_set_pool_mode(const char *val) 115 { 116 return __param_set_pool_mode(val, &svc_pool_map); 117 } 118 EXPORT_SYMBOL(sunrpc_set_pool_mode); 119 120 /** 121 * sunrpc_get_pool_mode - get the current pool_mode for the host 122 * @buf: where to write the current pool_mode 123 * @size: size of @buf 124 * 125 * Grab the current pool_mode from the svc_pool_map and write 126 * the resulting string to @buf. Returns the number of characters 127 * written to @buf (a'la snprintf()). 128 */ 129 int 130 sunrpc_get_pool_mode(char *buf, size_t size) 131 { 132 struct svc_pool_map *m = &svc_pool_map; 133 134 switch (m->mode) 135 { 136 case SVC_POOL_AUTO: 137 return snprintf(buf, size, "auto"); 138 case SVC_POOL_GLOBAL: 139 return snprintf(buf, size, "global"); 140 case SVC_POOL_PERCPU: 141 return snprintf(buf, size, "percpu"); 142 case SVC_POOL_PERNODE: 143 return snprintf(buf, size, "pernode"); 144 default: 145 return snprintf(buf, size, "%d", m->mode); 146 } 147 } 148 EXPORT_SYMBOL(sunrpc_get_pool_mode); 149 150 static int 151 param_get_pool_mode(char *buf, const struct kernel_param *kp) 152 { 153 char str[16]; 154 int len; 155 156 len = sunrpc_get_pool_mode(str, ARRAY_SIZE(str)); 157 158 /* Ensure we have room for newline and NUL */ 159 len = min_t(int, len, ARRAY_SIZE(str) - 2); 160 161 /* tack on the newline */ 162 str[len] = '\n'; 163 str[len + 1] = '\0'; 164 165 return sysfs_emit(buf, "%s", str); 166 } 167 168 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, 169 &svc_pool_map, 0644); 170 171 /* 172 * Detect best pool mapping mode heuristically, 173 * according to the machine's topology. 174 */ 175 static int 176 svc_pool_map_choose_mode(void) 177 { 178 unsigned int node; 179 180 if (nr_online_nodes > 1) { 181 /* 182 * Actually have multiple NUMA nodes, 183 * so split pools on NUMA node boundaries 184 */ 185 return SVC_POOL_PERNODE; 186 } 187 188 node = first_online_node; 189 if (nr_cpus_node(node) > 2) { 190 /* 191 * Non-trivial SMP, or CONFIG_NUMA on 192 * non-NUMA hardware, e.g. with a generic 193 * x86_64 kernel on Xeons. In this case we 194 * want to divide the pools on cpu boundaries. 195 */ 196 return SVC_POOL_PERCPU; 197 } 198 199 /* default: one global pool */ 200 return SVC_POOL_GLOBAL; 201 } 202 203 /* 204 * Allocate the to_pool[] and pool_to[] arrays. 205 * Returns 0 on success or an errno. 206 */ 207 static int 208 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) 209 { 210 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 211 if (!m->to_pool) 212 goto fail; 213 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 214 if (!m->pool_to) 215 goto fail_free; 216 217 return 0; 218 219 fail_free: 220 kfree(m->to_pool); 221 m->to_pool = NULL; 222 fail: 223 return -ENOMEM; 224 } 225 226 /* 227 * Initialise the pool map for SVC_POOL_PERCPU mode. 228 * Returns number of pools or <0 on error. 229 */ 230 static int 231 svc_pool_map_init_percpu(struct svc_pool_map *m) 232 { 233 unsigned int maxpools = nr_cpu_ids; 234 unsigned int pidx = 0; 235 unsigned int cpu; 236 int err; 237 238 err = svc_pool_map_alloc_arrays(m, maxpools); 239 if (err) 240 return err; 241 242 for_each_online_cpu(cpu) { 243 BUG_ON(pidx >= maxpools); 244 m->to_pool[cpu] = pidx; 245 m->pool_to[pidx] = cpu; 246 pidx++; 247 } 248 /* cpus brought online later all get mapped to pool0, sorry */ 249 250 return pidx; 251 }; 252 253 254 /* 255 * Initialise the pool map for SVC_POOL_PERNODE mode. 256 * Returns number of pools or <0 on error. 257 */ 258 static int 259 svc_pool_map_init_pernode(struct svc_pool_map *m) 260 { 261 unsigned int maxpools = nr_node_ids; 262 unsigned int pidx = 0; 263 unsigned int node; 264 int err; 265 266 err = svc_pool_map_alloc_arrays(m, maxpools); 267 if (err) 268 return err; 269 270 for_each_node_with_cpus(node) { 271 /* some architectures (e.g. SN2) have cpuless nodes */ 272 BUG_ON(pidx > maxpools); 273 m->to_pool[node] = pidx; 274 m->pool_to[pidx] = node; 275 pidx++; 276 } 277 /* nodes brought online later all get mapped to pool0, sorry */ 278 279 return pidx; 280 } 281 282 283 /* 284 * Add a reference to the global map of cpus to pools (and 285 * vice versa) if pools are in use. 286 * Initialise the map if we're the first user. 287 * Returns the number of pools. If this is '1', no reference 288 * was taken. 289 */ 290 static unsigned int 291 svc_pool_map_get(void) 292 { 293 struct svc_pool_map *m = &svc_pool_map; 294 int npools = -1; 295 296 mutex_lock(&svc_pool_map_mutex); 297 if (m->count++) { 298 mutex_unlock(&svc_pool_map_mutex); 299 return m->npools; 300 } 301 302 if (m->mode == SVC_POOL_AUTO) 303 m->mode = svc_pool_map_choose_mode(); 304 305 switch (m->mode) { 306 case SVC_POOL_PERCPU: 307 npools = svc_pool_map_init_percpu(m); 308 break; 309 case SVC_POOL_PERNODE: 310 npools = svc_pool_map_init_pernode(m); 311 break; 312 } 313 314 if (npools <= 0) { 315 /* default, or memory allocation failure */ 316 npools = 1; 317 m->mode = SVC_POOL_GLOBAL; 318 } 319 m->npools = npools; 320 mutex_unlock(&svc_pool_map_mutex); 321 return npools; 322 } 323 324 /* 325 * Drop a reference to the global map of cpus to pools. 326 * When the last reference is dropped, the map data is 327 * freed; this allows the sysadmin to change the pool. 328 */ 329 static void 330 svc_pool_map_put(void) 331 { 332 struct svc_pool_map *m = &svc_pool_map; 333 334 mutex_lock(&svc_pool_map_mutex); 335 if (!--m->count) { 336 kfree(m->to_pool); 337 m->to_pool = NULL; 338 kfree(m->pool_to); 339 m->pool_to = NULL; 340 m->npools = 0; 341 } 342 mutex_unlock(&svc_pool_map_mutex); 343 } 344 345 static int svc_pool_map_get_node(unsigned int pidx) 346 { 347 const struct svc_pool_map *m = &svc_pool_map; 348 349 if (m->count) { 350 if (m->mode == SVC_POOL_PERCPU) 351 return cpu_to_node(m->pool_to[pidx]); 352 if (m->mode == SVC_POOL_PERNODE) 353 return m->pool_to[pidx]; 354 } 355 return NUMA_NO_NODE; 356 } 357 /* 358 * Set the given thread's cpus_allowed mask so that it 359 * will only run on cpus in the given pool. 360 */ 361 static inline void 362 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) 363 { 364 struct svc_pool_map *m = &svc_pool_map; 365 unsigned int node = m->pool_to[pidx]; 366 367 /* 368 * The caller checks for sv_nrpools > 1, which 369 * implies that we've been initialized. 370 */ 371 WARN_ON_ONCE(m->count == 0); 372 if (m->count == 0) 373 return; 374 375 switch (m->mode) { 376 case SVC_POOL_PERCPU: 377 { 378 set_cpus_allowed_ptr(task, cpumask_of(node)); 379 break; 380 } 381 case SVC_POOL_PERNODE: 382 { 383 set_cpus_allowed_ptr(task, cpumask_of_node(node)); 384 break; 385 } 386 } 387 } 388 389 /** 390 * svc_pool_for_cpu - Select pool to run a thread on this cpu 391 * @serv: An RPC service 392 * 393 * Use the active CPU and the svc_pool_map's mode setting to 394 * select the svc thread pool to use. Once initialized, the 395 * svc_pool_map does not change. 396 * 397 * Return value: 398 * A pointer to an svc_pool 399 */ 400 struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv) 401 { 402 struct svc_pool_map *m = &svc_pool_map; 403 int cpu = raw_smp_processor_id(); 404 unsigned int pidx = 0; 405 406 if (serv->sv_nrpools <= 1) 407 return serv->sv_pools; 408 409 switch (m->mode) { 410 case SVC_POOL_PERCPU: 411 pidx = m->to_pool[cpu]; 412 break; 413 case SVC_POOL_PERNODE: 414 pidx = m->to_pool[cpu_to_node(cpu)]; 415 break; 416 } 417 418 return &serv->sv_pools[pidx % serv->sv_nrpools]; 419 } 420 421 static int svc_rpcb_setup(struct svc_serv *serv, struct net *net) 422 { 423 int err; 424 425 err = rpcb_create_local(net); 426 if (err) 427 return err; 428 429 /* Remove any stale portmap registrations */ 430 svc_unregister(serv, net); 431 return 0; 432 } 433 434 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) 435 { 436 svc_unregister(serv, net); 437 rpcb_put_local(net); 438 } 439 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup); 440 441 static int svc_uses_rpcbind(struct svc_serv *serv) 442 { 443 struct svc_program *progp; 444 unsigned int i; 445 446 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 447 for (i = 0; i < progp->pg_nvers; i++) { 448 if (progp->pg_vers[i] == NULL) 449 continue; 450 if (!progp->pg_vers[i]->vs_hidden) 451 return 1; 452 } 453 } 454 455 return 0; 456 } 457 458 int svc_bind(struct svc_serv *serv, struct net *net) 459 { 460 if (!svc_uses_rpcbind(serv)) 461 return 0; 462 return svc_rpcb_setup(serv, net); 463 } 464 EXPORT_SYMBOL_GPL(svc_bind); 465 466 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 467 static void 468 __svc_init_bc(struct svc_serv *serv) 469 { 470 lwq_init(&serv->sv_cb_list); 471 } 472 #else 473 static void 474 __svc_init_bc(struct svc_serv *serv) 475 { 476 } 477 #endif 478 479 /* 480 * Create an RPC service 481 */ 482 static struct svc_serv * 483 __svc_create(struct svc_program *prog, struct svc_stat *stats, 484 unsigned int bufsize, int npools, int (*threadfn)(void *data)) 485 { 486 struct svc_serv *serv; 487 unsigned int vers; 488 unsigned int xdrsize; 489 unsigned int i; 490 491 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 492 return NULL; 493 serv->sv_name = prog->pg_name; 494 serv->sv_program = prog; 495 serv->sv_stats = stats; 496 if (bufsize > RPCSVC_MAXPAYLOAD) 497 bufsize = RPCSVC_MAXPAYLOAD; 498 serv->sv_max_payload = bufsize? bufsize : 4096; 499 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); 500 serv->sv_threadfn = threadfn; 501 xdrsize = 0; 502 while (prog) { 503 prog->pg_lovers = prog->pg_nvers-1; 504 for (vers=0; vers<prog->pg_nvers ; vers++) 505 if (prog->pg_vers[vers]) { 506 prog->pg_hivers = vers; 507 if (prog->pg_lovers > vers) 508 prog->pg_lovers = vers; 509 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 510 xdrsize = prog->pg_vers[vers]->vs_xdrsize; 511 } 512 prog = prog->pg_next; 513 } 514 serv->sv_xdrsize = xdrsize; 515 INIT_LIST_HEAD(&serv->sv_tempsocks); 516 INIT_LIST_HEAD(&serv->sv_permsocks); 517 timer_setup(&serv->sv_temptimer, NULL, 0); 518 spin_lock_init(&serv->sv_lock); 519 520 __svc_init_bc(serv); 521 522 serv->sv_nrpools = npools; 523 serv->sv_pools = 524 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), 525 GFP_KERNEL); 526 if (!serv->sv_pools) { 527 kfree(serv); 528 return NULL; 529 } 530 531 for (i = 0; i < serv->sv_nrpools; i++) { 532 struct svc_pool *pool = &serv->sv_pools[i]; 533 534 dprintk("svc: initialising pool %u for %s\n", 535 i, serv->sv_name); 536 537 pool->sp_id = i; 538 lwq_init(&pool->sp_xprts); 539 INIT_LIST_HEAD(&pool->sp_all_threads); 540 init_llist_head(&pool->sp_idle_threads); 541 542 percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); 543 percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); 544 percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); 545 } 546 547 return serv; 548 } 549 550 /** 551 * svc_create - Create an RPC service 552 * @prog: the RPC program the new service will handle 553 * @bufsize: maximum message size for @prog 554 * @threadfn: a function to service RPC requests for @prog 555 * 556 * Returns an instantiated struct svc_serv object or NULL. 557 */ 558 struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, 559 int (*threadfn)(void *data)) 560 { 561 return __svc_create(prog, NULL, bufsize, 1, threadfn); 562 } 563 EXPORT_SYMBOL_GPL(svc_create); 564 565 /** 566 * svc_create_pooled - Create an RPC service with pooled threads 567 * @prog: the RPC program the new service will handle 568 * @stats: the stats struct if desired 569 * @bufsize: maximum message size for @prog 570 * @threadfn: a function to service RPC requests for @prog 571 * 572 * Returns an instantiated struct svc_serv object or NULL. 573 */ 574 struct svc_serv *svc_create_pooled(struct svc_program *prog, 575 struct svc_stat *stats, 576 unsigned int bufsize, 577 int (*threadfn)(void *data)) 578 { 579 struct svc_serv *serv; 580 unsigned int npools = svc_pool_map_get(); 581 582 serv = __svc_create(prog, stats, bufsize, npools, threadfn); 583 if (!serv) 584 goto out_err; 585 serv->sv_is_pooled = true; 586 return serv; 587 out_err: 588 svc_pool_map_put(); 589 return NULL; 590 } 591 EXPORT_SYMBOL_GPL(svc_create_pooled); 592 593 /* 594 * Destroy an RPC service. Should be called with appropriate locking to 595 * protect sv_permsocks and sv_tempsocks. 596 */ 597 void 598 svc_destroy(struct svc_serv **servp) 599 { 600 struct svc_serv *serv = *servp; 601 unsigned int i; 602 603 *servp = NULL; 604 605 dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name); 606 timer_shutdown_sync(&serv->sv_temptimer); 607 608 /* 609 * Remaining transports at this point are not expected. 610 */ 611 WARN_ONCE(!list_empty(&serv->sv_permsocks), 612 "SVC: permsocks remain for %s\n", serv->sv_program->pg_name); 613 WARN_ONCE(!list_empty(&serv->sv_tempsocks), 614 "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name); 615 616 cache_clean_deferred(serv); 617 618 if (serv->sv_is_pooled) 619 svc_pool_map_put(); 620 621 for (i = 0; i < serv->sv_nrpools; i++) { 622 struct svc_pool *pool = &serv->sv_pools[i]; 623 624 percpu_counter_destroy(&pool->sp_messages_arrived); 625 percpu_counter_destroy(&pool->sp_sockets_queued); 626 percpu_counter_destroy(&pool->sp_threads_woken); 627 } 628 kfree(serv->sv_pools); 629 kfree(serv); 630 } 631 EXPORT_SYMBOL_GPL(svc_destroy); 632 633 static bool 634 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) 635 { 636 unsigned long pages, ret; 637 638 /* bc_xprt uses fore channel allocated buffers */ 639 if (svc_is_backchannel(rqstp)) 640 return true; 641 642 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 643 * We assume one is at most one page 644 */ 645 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); 646 if (pages > RPCSVC_MAXPAGES) 647 pages = RPCSVC_MAXPAGES; 648 649 ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages, 650 rqstp->rq_pages); 651 return ret == pages; 652 } 653 654 /* 655 * Release an RPC server buffer 656 */ 657 static void 658 svc_release_buffer(struct svc_rqst *rqstp) 659 { 660 unsigned int i; 661 662 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) 663 if (rqstp->rq_pages[i]) 664 put_page(rqstp->rq_pages[i]); 665 } 666 667 static void 668 svc_rqst_free(struct svc_rqst *rqstp) 669 { 670 folio_batch_release(&rqstp->rq_fbatch); 671 svc_release_buffer(rqstp); 672 if (rqstp->rq_scratch_page) 673 put_page(rqstp->rq_scratch_page); 674 kfree(rqstp->rq_resp); 675 kfree(rqstp->rq_argp); 676 kfree(rqstp->rq_auth_data); 677 kfree_rcu(rqstp, rq_rcu_head); 678 } 679 680 static struct svc_rqst * 681 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) 682 { 683 struct svc_rqst *rqstp; 684 685 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); 686 if (!rqstp) 687 return rqstp; 688 689 folio_batch_init(&rqstp->rq_fbatch); 690 691 rqstp->rq_server = serv; 692 rqstp->rq_pool = pool; 693 694 rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0); 695 if (!rqstp->rq_scratch_page) 696 goto out_enomem; 697 698 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 699 if (!rqstp->rq_argp) 700 goto out_enomem; 701 702 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); 703 if (!rqstp->rq_resp) 704 goto out_enomem; 705 706 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) 707 goto out_enomem; 708 709 rqstp->rq_err = -EAGAIN; /* No error yet */ 710 711 serv->sv_nrthreads += 1; 712 pool->sp_nrthreads += 1; 713 714 /* Protected by whatever lock the service uses when calling 715 * svc_set_num_threads() 716 */ 717 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); 718 719 return rqstp; 720 721 out_enomem: 722 svc_rqst_free(rqstp); 723 return NULL; 724 } 725 726 /** 727 * svc_pool_wake_idle_thread - Awaken an idle thread in @pool 728 * @pool: service thread pool 729 * 730 * Can be called from soft IRQ or process context. Finding an idle 731 * service thread and marking it BUSY is atomic with respect to 732 * other calls to svc_pool_wake_idle_thread(). 733 * 734 */ 735 void svc_pool_wake_idle_thread(struct svc_pool *pool) 736 { 737 struct svc_rqst *rqstp; 738 struct llist_node *ln; 739 740 rcu_read_lock(); 741 ln = READ_ONCE(pool->sp_idle_threads.first); 742 if (ln) { 743 rqstp = llist_entry(ln, struct svc_rqst, rq_idle); 744 WRITE_ONCE(rqstp->rq_qtime, ktime_get()); 745 if (!task_is_running(rqstp->rq_task)) { 746 wake_up_process(rqstp->rq_task); 747 trace_svc_wake_up(rqstp->rq_task->pid); 748 percpu_counter_inc(&pool->sp_threads_woken); 749 } 750 rcu_read_unlock(); 751 return; 752 } 753 rcu_read_unlock(); 754 755 } 756 EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread); 757 758 static struct svc_pool * 759 svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 760 { 761 return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 762 } 763 764 static struct svc_pool * 765 svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, 766 unsigned int *state) 767 { 768 struct svc_pool *pool; 769 unsigned int i; 770 771 pool = target_pool; 772 773 if (!pool) { 774 for (i = 0; i < serv->sv_nrpools; i++) { 775 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 776 if (pool->sp_nrthreads) 777 break; 778 } 779 } 780 781 if (pool && pool->sp_nrthreads) { 782 set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); 783 set_bit(SP_NEED_VICTIM, &pool->sp_flags); 784 return pool; 785 } 786 return NULL; 787 } 788 789 static int 790 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 791 { 792 struct svc_rqst *rqstp; 793 struct task_struct *task; 794 struct svc_pool *chosen_pool; 795 unsigned int state = serv->sv_nrthreads-1; 796 int node; 797 int err; 798 799 do { 800 nrservs--; 801 chosen_pool = svc_pool_next(serv, pool, &state); 802 node = svc_pool_map_get_node(chosen_pool->sp_id); 803 804 rqstp = svc_prepare_thread(serv, chosen_pool, node); 805 if (!rqstp) 806 return -ENOMEM; 807 task = kthread_create_on_node(serv->sv_threadfn, rqstp, 808 node, "%s", serv->sv_name); 809 if (IS_ERR(task)) { 810 svc_exit_thread(rqstp); 811 return PTR_ERR(task); 812 } 813 814 rqstp->rq_task = task; 815 if (serv->sv_nrpools > 1) 816 svc_pool_map_set_cpumask(task, chosen_pool->sp_id); 817 818 svc_sock_update_bufs(serv); 819 wake_up_process(task); 820 821 wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN); 822 err = rqstp->rq_err; 823 if (err) { 824 svc_exit_thread(rqstp); 825 return err; 826 } 827 } while (nrservs > 0); 828 829 return 0; 830 } 831 832 static int 833 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 834 { 835 unsigned int state = serv->sv_nrthreads-1; 836 struct svc_pool *victim; 837 838 do { 839 victim = svc_pool_victim(serv, pool, &state); 840 if (!victim) 841 break; 842 svc_pool_wake_idle_thread(victim); 843 wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS, 844 TASK_IDLE); 845 nrservs++; 846 } while (nrservs < 0); 847 return 0; 848 } 849 850 /** 851 * svc_set_num_threads - adjust number of threads per RPC service 852 * @serv: RPC service to adjust 853 * @pool: Specific pool from which to choose threads, or NULL 854 * @nrservs: New number of threads for @serv (0 or less means kill all threads) 855 * 856 * Create or destroy threads to make the number of threads for @serv the 857 * given number. If @pool is non-NULL, change only threads in that pool; 858 * otherwise, round-robin between all pools for @serv. @serv's 859 * sv_nrthreads is adjusted for each thread created or destroyed. 860 * 861 * Caller must ensure mutual exclusion between this and server startup or 862 * shutdown. 863 * 864 * Returns zero on success or a negative errno if an error occurred while 865 * starting a thread. 866 */ 867 int 868 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 869 { 870 if (!pool) 871 nrservs -= serv->sv_nrthreads; 872 else 873 nrservs -= pool->sp_nrthreads; 874 875 if (nrservs > 0) 876 return svc_start_kthreads(serv, pool, nrservs); 877 if (nrservs < 0) 878 return svc_stop_kthreads(serv, pool, nrservs); 879 return 0; 880 } 881 EXPORT_SYMBOL_GPL(svc_set_num_threads); 882 883 /** 884 * svc_rqst_replace_page - Replace one page in rq_pages[] 885 * @rqstp: svc_rqst with pages to replace 886 * @page: replacement page 887 * 888 * When replacing a page in rq_pages, batch the release of the 889 * replaced pages to avoid hammering the page allocator. 890 * 891 * Return values: 892 * %true: page replaced 893 * %false: array bounds checking failed 894 */ 895 bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) 896 { 897 struct page **begin = rqstp->rq_pages; 898 struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES]; 899 900 if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) { 901 trace_svc_replace_page_err(rqstp); 902 return false; 903 } 904 905 if (*rqstp->rq_next_page) { 906 if (!folio_batch_add(&rqstp->rq_fbatch, 907 page_folio(*rqstp->rq_next_page))) 908 __folio_batch_release(&rqstp->rq_fbatch); 909 } 910 911 get_page(page); 912 *(rqstp->rq_next_page++) = page; 913 return true; 914 } 915 EXPORT_SYMBOL_GPL(svc_rqst_replace_page); 916 917 /** 918 * svc_rqst_release_pages - Release Reply buffer pages 919 * @rqstp: RPC transaction context 920 * 921 * Release response pages that might still be in flight after 922 * svc_send, and any spliced filesystem-owned pages. 923 */ 924 void svc_rqst_release_pages(struct svc_rqst *rqstp) 925 { 926 int i, count = rqstp->rq_next_page - rqstp->rq_respages; 927 928 if (count) { 929 release_pages(rqstp->rq_respages, count); 930 for (i = 0; i < count; i++) 931 rqstp->rq_respages[i] = NULL; 932 } 933 } 934 935 /** 936 * svc_exit_thread - finalise the termination of a sunrpc server thread 937 * @rqstp: the svc_rqst which represents the thread. 938 * 939 * When a thread started with svc_new_thread() exits it must call 940 * svc_exit_thread() as its last act. This must be done with the 941 * service mutex held. Normally this is held by a DIFFERENT thread, the 942 * one that is calling svc_set_num_threads() and which will wait for 943 * SP_VICTIM_REMAINS to be cleared before dropping the mutex. If the 944 * thread exits for any reason other than svc_thread_should_stop() 945 * returning %true (which indicated that svc_set_num_threads() is 946 * waiting for it to exit), then it must take the service mutex itself, 947 * which can only safely be done using mutex_try_lock(). 948 */ 949 void 950 svc_exit_thread(struct svc_rqst *rqstp) 951 { 952 struct svc_serv *serv = rqstp->rq_server; 953 struct svc_pool *pool = rqstp->rq_pool; 954 955 list_del_rcu(&rqstp->rq_all); 956 957 pool->sp_nrthreads -= 1; 958 serv->sv_nrthreads -= 1; 959 svc_sock_update_bufs(serv); 960 961 svc_rqst_free(rqstp); 962 963 clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags); 964 } 965 EXPORT_SYMBOL_GPL(svc_exit_thread); 966 967 /* 968 * Register an "inet" protocol family netid with the local 969 * rpcbind daemon via an rpcbind v4 SET request. 970 * 971 * No netconfig infrastructure is available in the kernel, so 972 * we map IP_ protocol numbers to netids by hand. 973 * 974 * Returns zero on success; a negative errno value is returned 975 * if any error occurs. 976 */ 977 static int __svc_rpcb_register4(struct net *net, const u32 program, 978 const u32 version, 979 const unsigned short protocol, 980 const unsigned short port) 981 { 982 const struct sockaddr_in sin = { 983 .sin_family = AF_INET, 984 .sin_addr.s_addr = htonl(INADDR_ANY), 985 .sin_port = htons(port), 986 }; 987 const char *netid; 988 int error; 989 990 switch (protocol) { 991 case IPPROTO_UDP: 992 netid = RPCBIND_NETID_UDP; 993 break; 994 case IPPROTO_TCP: 995 netid = RPCBIND_NETID_TCP; 996 break; 997 default: 998 return -ENOPROTOOPT; 999 } 1000 1001 error = rpcb_v4_register(net, program, version, 1002 (const struct sockaddr *)&sin, netid); 1003 1004 /* 1005 * User space didn't support rpcbind v4, so retry this 1006 * registration request with the legacy rpcbind v2 protocol. 1007 */ 1008 if (error == -EPROTONOSUPPORT) 1009 error = rpcb_register(net, program, version, protocol, port); 1010 1011 return error; 1012 } 1013 1014 #if IS_ENABLED(CONFIG_IPV6) 1015 /* 1016 * Register an "inet6" protocol family netid with the local 1017 * rpcbind daemon via an rpcbind v4 SET request. 1018 * 1019 * No netconfig infrastructure is available in the kernel, so 1020 * we map IP_ protocol numbers to netids by hand. 1021 * 1022 * Returns zero on success; a negative errno value is returned 1023 * if any error occurs. 1024 */ 1025 static int __svc_rpcb_register6(struct net *net, const u32 program, 1026 const u32 version, 1027 const unsigned short protocol, 1028 const unsigned short port) 1029 { 1030 const struct sockaddr_in6 sin6 = { 1031 .sin6_family = AF_INET6, 1032 .sin6_addr = IN6ADDR_ANY_INIT, 1033 .sin6_port = htons(port), 1034 }; 1035 const char *netid; 1036 int error; 1037 1038 switch (protocol) { 1039 case IPPROTO_UDP: 1040 netid = RPCBIND_NETID_UDP6; 1041 break; 1042 case IPPROTO_TCP: 1043 netid = RPCBIND_NETID_TCP6; 1044 break; 1045 default: 1046 return -ENOPROTOOPT; 1047 } 1048 1049 error = rpcb_v4_register(net, program, version, 1050 (const struct sockaddr *)&sin6, netid); 1051 1052 /* 1053 * User space didn't support rpcbind version 4, so we won't 1054 * use a PF_INET6 listener. 1055 */ 1056 if (error == -EPROTONOSUPPORT) 1057 error = -EAFNOSUPPORT; 1058 1059 return error; 1060 } 1061 #endif /* IS_ENABLED(CONFIG_IPV6) */ 1062 1063 /* 1064 * Register a kernel RPC service via rpcbind version 4. 1065 * 1066 * Returns zero on success; a negative errno value is returned 1067 * if any error occurs. 1068 */ 1069 static int __svc_register(struct net *net, const char *progname, 1070 const u32 program, const u32 version, 1071 const int family, 1072 const unsigned short protocol, 1073 const unsigned short port) 1074 { 1075 int error = -EAFNOSUPPORT; 1076 1077 switch (family) { 1078 case PF_INET: 1079 error = __svc_rpcb_register4(net, program, version, 1080 protocol, port); 1081 break; 1082 #if IS_ENABLED(CONFIG_IPV6) 1083 case PF_INET6: 1084 error = __svc_rpcb_register6(net, program, version, 1085 protocol, port); 1086 #endif 1087 } 1088 1089 trace_svc_register(progname, version, family, protocol, port, error); 1090 return error; 1091 } 1092 1093 static 1094 int svc_rpcbind_set_version(struct net *net, 1095 const struct svc_program *progp, 1096 u32 version, int family, 1097 unsigned short proto, 1098 unsigned short port) 1099 { 1100 return __svc_register(net, progp->pg_name, progp->pg_prog, 1101 version, family, proto, port); 1102 1103 } 1104 1105 int svc_generic_rpcbind_set(struct net *net, 1106 const struct svc_program *progp, 1107 u32 version, int family, 1108 unsigned short proto, 1109 unsigned short port) 1110 { 1111 const struct svc_version *vers = progp->pg_vers[version]; 1112 int error; 1113 1114 if (vers == NULL) 1115 return 0; 1116 1117 if (vers->vs_hidden) { 1118 trace_svc_noregister(progp->pg_name, version, proto, 1119 port, family, 0); 1120 return 0; 1121 } 1122 1123 /* 1124 * Don't register a UDP port if we need congestion 1125 * control. 1126 */ 1127 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) 1128 return 0; 1129 1130 error = svc_rpcbind_set_version(net, progp, version, 1131 family, proto, port); 1132 1133 return (vers->vs_rpcb_optnl) ? 0 : error; 1134 } 1135 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set); 1136 1137 /** 1138 * svc_register - register an RPC service with the local portmapper 1139 * @serv: svc_serv struct for the service to register 1140 * @net: net namespace for the service to register 1141 * @family: protocol family of service's listener socket 1142 * @proto: transport protocol number to advertise 1143 * @port: port to advertise 1144 * 1145 * Service is registered for any address in the passed-in protocol family 1146 */ 1147 int svc_register(const struct svc_serv *serv, struct net *net, 1148 const int family, const unsigned short proto, 1149 const unsigned short port) 1150 { 1151 struct svc_program *progp; 1152 unsigned int i; 1153 int error = 0; 1154 1155 WARN_ON_ONCE(proto == 0 && port == 0); 1156 if (proto == 0 && port == 0) 1157 return -EINVAL; 1158 1159 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 1160 for (i = 0; i < progp->pg_nvers; i++) { 1161 1162 error = progp->pg_rpcbind_set(net, progp, i, 1163 family, proto, port); 1164 if (error < 0) { 1165 printk(KERN_WARNING "svc: failed to register " 1166 "%sv%u RPC service (errno %d).\n", 1167 progp->pg_name, i, -error); 1168 break; 1169 } 1170 } 1171 } 1172 1173 return error; 1174 } 1175 1176 /* 1177 * If user space is running rpcbind, it should take the v4 UNSET 1178 * and clear everything for this [program, version]. If user space 1179 * is running portmap, it will reject the v4 UNSET, but won't have 1180 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient 1181 * in this case to clear all existing entries for [program, version]. 1182 */ 1183 static void __svc_unregister(struct net *net, const u32 program, const u32 version, 1184 const char *progname) 1185 { 1186 int error; 1187 1188 error = rpcb_v4_register(net, program, version, NULL, ""); 1189 1190 /* 1191 * User space didn't support rpcbind v4, so retry this 1192 * request with the legacy rpcbind v2 protocol. 1193 */ 1194 if (error == -EPROTONOSUPPORT) 1195 error = rpcb_register(net, program, version, 0, 0); 1196 1197 trace_svc_unregister(progname, version, error); 1198 } 1199 1200 /* 1201 * All netids, bind addresses and ports registered for [program, version] 1202 * are removed from the local rpcbind database (if the service is not 1203 * hidden) to make way for a new instance of the service. 1204 * 1205 * The result of unregistration is reported via dprintk for those who want 1206 * verification of the result, but is otherwise not important. 1207 */ 1208 static void svc_unregister(const struct svc_serv *serv, struct net *net) 1209 { 1210 struct sighand_struct *sighand; 1211 struct svc_program *progp; 1212 unsigned long flags; 1213 unsigned int i; 1214 1215 clear_thread_flag(TIF_SIGPENDING); 1216 1217 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 1218 for (i = 0; i < progp->pg_nvers; i++) { 1219 if (progp->pg_vers[i] == NULL) 1220 continue; 1221 if (progp->pg_vers[i]->vs_hidden) 1222 continue; 1223 __svc_unregister(net, progp->pg_prog, i, progp->pg_name); 1224 } 1225 } 1226 1227 rcu_read_lock(); 1228 sighand = rcu_dereference(current->sighand); 1229 spin_lock_irqsave(&sighand->siglock, flags); 1230 recalc_sigpending(); 1231 spin_unlock_irqrestore(&sighand->siglock, flags); 1232 rcu_read_unlock(); 1233 } 1234 1235 /* 1236 * dprintk the given error with the address of the client that caused it. 1237 */ 1238 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 1239 static __printf(2, 3) 1240 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 1241 { 1242 struct va_format vaf; 1243 va_list args; 1244 char buf[RPC_MAX_ADDRBUFLEN]; 1245 1246 va_start(args, fmt); 1247 1248 vaf.fmt = fmt; 1249 vaf.va = &args; 1250 1251 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); 1252 1253 va_end(args); 1254 } 1255 #else 1256 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} 1257 #endif 1258 1259 __be32 1260 svc_generic_init_request(struct svc_rqst *rqstp, 1261 const struct svc_program *progp, 1262 struct svc_process_info *ret) 1263 { 1264 const struct svc_version *versp = NULL; /* compiler food */ 1265 const struct svc_procedure *procp = NULL; 1266 1267 if (rqstp->rq_vers >= progp->pg_nvers ) 1268 goto err_bad_vers; 1269 versp = progp->pg_vers[rqstp->rq_vers]; 1270 if (!versp) 1271 goto err_bad_vers; 1272 1273 /* 1274 * Some protocol versions (namely NFSv4) require some form of 1275 * congestion control. (See RFC 7530 section 3.1 paragraph 2) 1276 * In other words, UDP is not allowed. We mark those when setting 1277 * up the svc_xprt, and verify that here. 1278 * 1279 * The spec is not very clear about what error should be returned 1280 * when someone tries to access a server that is listening on UDP 1281 * for lower versions. RPC_PROG_MISMATCH seems to be the closest 1282 * fit. 1283 */ 1284 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && 1285 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) 1286 goto err_bad_vers; 1287 1288 if (rqstp->rq_proc >= versp->vs_nproc) 1289 goto err_bad_proc; 1290 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc]; 1291 1292 /* Initialize storage for argp and resp */ 1293 memset(rqstp->rq_argp, 0, procp->pc_argzero); 1294 memset(rqstp->rq_resp, 0, procp->pc_ressize); 1295 1296 /* Bump per-procedure stats counter */ 1297 this_cpu_inc(versp->vs_count[rqstp->rq_proc]); 1298 1299 ret->dispatch = versp->vs_dispatch; 1300 return rpc_success; 1301 err_bad_vers: 1302 ret->mismatch.lovers = progp->pg_lovers; 1303 ret->mismatch.hivers = progp->pg_hivers; 1304 return rpc_prog_mismatch; 1305 err_bad_proc: 1306 return rpc_proc_unavail; 1307 } 1308 EXPORT_SYMBOL_GPL(svc_generic_init_request); 1309 1310 /* 1311 * Common routine for processing the RPC request. 1312 */ 1313 static int 1314 svc_process_common(struct svc_rqst *rqstp) 1315 { 1316 struct xdr_stream *xdr = &rqstp->rq_res_stream; 1317 struct svc_program *progp; 1318 const struct svc_procedure *procp = NULL; 1319 struct svc_serv *serv = rqstp->rq_server; 1320 struct svc_process_info process; 1321 enum svc_auth_status auth_res; 1322 unsigned int aoffset; 1323 int rc; 1324 __be32 *p; 1325 1326 /* Will be turned off only when NFSv4 Sessions are used */ 1327 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); 1328 clear_bit(RQ_DROPME, &rqstp->rq_flags); 1329 1330 /* Construct the first words of the reply: */ 1331 svcxdr_init_encode(rqstp); 1332 xdr_stream_encode_be32(xdr, rqstp->rq_xid); 1333 xdr_stream_encode_be32(xdr, rpc_reply); 1334 1335 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4); 1336 if (unlikely(!p)) 1337 goto err_short_len; 1338 if (*p++ != cpu_to_be32(RPC_VERSION)) 1339 goto err_bad_rpc; 1340 1341 xdr_stream_encode_be32(xdr, rpc_msg_accepted); 1342 1343 rqstp->rq_prog = be32_to_cpup(p++); 1344 rqstp->rq_vers = be32_to_cpup(p++); 1345 rqstp->rq_proc = be32_to_cpup(p); 1346 1347 for (progp = serv->sv_program; progp; progp = progp->pg_next) 1348 if (rqstp->rq_prog == progp->pg_prog) 1349 break; 1350 1351 /* 1352 * Decode auth data, and add verifier to reply buffer. 1353 * We do this before anything else in order to get a decent 1354 * auth verifier. 1355 */ 1356 auth_res = svc_authenticate(rqstp); 1357 /* Also give the program a chance to reject this call: */ 1358 if (auth_res == SVC_OK && progp) 1359 auth_res = progp->pg_authenticate(rqstp); 1360 trace_svc_authenticate(rqstp, auth_res); 1361 switch (auth_res) { 1362 case SVC_OK: 1363 break; 1364 case SVC_GARBAGE: 1365 goto err_garbage_args; 1366 case SVC_SYSERR: 1367 goto err_system_err; 1368 case SVC_DENIED: 1369 goto err_bad_auth; 1370 case SVC_CLOSE: 1371 goto close; 1372 case SVC_DROP: 1373 goto dropit; 1374 case SVC_COMPLETE: 1375 goto sendit; 1376 default: 1377 pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res); 1378 goto err_system_err; 1379 } 1380 1381 if (progp == NULL) 1382 goto err_bad_prog; 1383 1384 switch (progp->pg_init_request(rqstp, progp, &process)) { 1385 case rpc_success: 1386 break; 1387 case rpc_prog_unavail: 1388 goto err_bad_prog; 1389 case rpc_prog_mismatch: 1390 goto err_bad_vers; 1391 case rpc_proc_unavail: 1392 goto err_bad_proc; 1393 } 1394 1395 procp = rqstp->rq_procinfo; 1396 /* Should this check go into the dispatcher? */ 1397 if (!procp || !procp->pc_func) 1398 goto err_bad_proc; 1399 1400 /* Syntactic check complete */ 1401 if (serv->sv_stats) 1402 serv->sv_stats->rpccnt++; 1403 trace_svc_process(rqstp, progp->pg_name); 1404 1405 aoffset = xdr_stream_pos(xdr); 1406 1407 /* un-reserve some of the out-queue now that we have a 1408 * better idea of reply size 1409 */ 1410 if (procp->pc_xdrressize) 1411 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); 1412 1413 /* Call the function that processes the request. */ 1414 rc = process.dispatch(rqstp); 1415 if (procp->pc_release) 1416 procp->pc_release(rqstp); 1417 xdr_finish_decode(xdr); 1418 1419 if (!rc) 1420 goto dropit; 1421 if (rqstp->rq_auth_stat != rpc_auth_ok) 1422 goto err_bad_auth; 1423 1424 if (*rqstp->rq_accept_statp != rpc_success) 1425 xdr_truncate_encode(xdr, aoffset); 1426 1427 if (procp->pc_encode == NULL) 1428 goto dropit; 1429 1430 sendit: 1431 if (svc_authorise(rqstp)) 1432 goto close_xprt; 1433 return 1; /* Caller can now send it */ 1434 1435 dropit: 1436 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1437 dprintk("svc: svc_process dropit\n"); 1438 return 0; 1439 1440 close: 1441 svc_authorise(rqstp); 1442 close_xprt: 1443 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) 1444 svc_xprt_close(rqstp->rq_xprt); 1445 dprintk("svc: svc_process close\n"); 1446 return 0; 1447 1448 err_short_len: 1449 svc_printk(rqstp, "short len %u, dropping request\n", 1450 rqstp->rq_arg.len); 1451 goto close_xprt; 1452 1453 err_bad_rpc: 1454 if (serv->sv_stats) 1455 serv->sv_stats->rpcbadfmt++; 1456 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); 1457 xdr_stream_encode_u32(xdr, RPC_MISMATCH); 1458 /* Only RPCv2 supported */ 1459 xdr_stream_encode_u32(xdr, RPC_VERSION); 1460 xdr_stream_encode_u32(xdr, RPC_VERSION); 1461 return 1; /* don't wrap */ 1462 1463 err_bad_auth: 1464 dprintk("svc: authentication failed (%d)\n", 1465 be32_to_cpu(rqstp->rq_auth_stat)); 1466 if (serv->sv_stats) 1467 serv->sv_stats->rpcbadauth++; 1468 /* Restore write pointer to location of reply status: */ 1469 xdr_truncate_encode(xdr, XDR_UNIT * 2); 1470 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); 1471 xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR); 1472 xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat); 1473 goto sendit; 1474 1475 err_bad_prog: 1476 dprintk("svc: unknown program %d\n", rqstp->rq_prog); 1477 if (serv->sv_stats) 1478 serv->sv_stats->rpcbadfmt++; 1479 *rqstp->rq_accept_statp = rpc_prog_unavail; 1480 goto sendit; 1481 1482 err_bad_vers: 1483 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", 1484 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); 1485 1486 if (serv->sv_stats) 1487 serv->sv_stats->rpcbadfmt++; 1488 *rqstp->rq_accept_statp = rpc_prog_mismatch; 1489 1490 /* 1491 * svc_authenticate() has already added the verifier and 1492 * advanced the stream just past rq_accept_statp. 1493 */ 1494 xdr_stream_encode_u32(xdr, process.mismatch.lovers); 1495 xdr_stream_encode_u32(xdr, process.mismatch.hivers); 1496 goto sendit; 1497 1498 err_bad_proc: 1499 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); 1500 1501 if (serv->sv_stats) 1502 serv->sv_stats->rpcbadfmt++; 1503 *rqstp->rq_accept_statp = rpc_proc_unavail; 1504 goto sendit; 1505 1506 err_garbage_args: 1507 svc_printk(rqstp, "failed to decode RPC header\n"); 1508 1509 if (serv->sv_stats) 1510 serv->sv_stats->rpcbadfmt++; 1511 *rqstp->rq_accept_statp = rpc_garbage_args; 1512 goto sendit; 1513 1514 err_system_err: 1515 if (serv->sv_stats) 1516 serv->sv_stats->rpcbadfmt++; 1517 *rqstp->rq_accept_statp = rpc_system_err; 1518 goto sendit; 1519 } 1520 1521 /* 1522 * Drop request 1523 */ 1524 static void svc_drop(struct svc_rqst *rqstp) 1525 { 1526 trace_svc_drop(rqstp); 1527 } 1528 1529 /** 1530 * svc_process - Execute one RPC transaction 1531 * @rqstp: RPC transaction context 1532 * 1533 */ 1534 void svc_process(struct svc_rqst *rqstp) 1535 { 1536 struct kvec *resv = &rqstp->rq_res.head[0]; 1537 __be32 *p; 1538 1539 #if IS_ENABLED(CONFIG_FAIL_SUNRPC) 1540 if (!fail_sunrpc.ignore_server_disconnect && 1541 should_fail(&fail_sunrpc.attr, 1)) 1542 svc_xprt_deferred_close(rqstp->rq_xprt); 1543 #endif 1544 1545 /* 1546 * Setup response xdr_buf. 1547 * Initially it has just one page 1548 */ 1549 rqstp->rq_next_page = &rqstp->rq_respages[1]; 1550 resv->iov_base = page_address(rqstp->rq_respages[0]); 1551 resv->iov_len = 0; 1552 rqstp->rq_res.pages = rqstp->rq_next_page; 1553 rqstp->rq_res.len = 0; 1554 rqstp->rq_res.page_base = 0; 1555 rqstp->rq_res.page_len = 0; 1556 rqstp->rq_res.buflen = PAGE_SIZE; 1557 rqstp->rq_res.tail[0].iov_base = NULL; 1558 rqstp->rq_res.tail[0].iov_len = 0; 1559 1560 svcxdr_init_decode(rqstp); 1561 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2); 1562 if (unlikely(!p)) 1563 goto out_drop; 1564 rqstp->rq_xid = *p++; 1565 if (unlikely(*p != rpc_call)) 1566 goto out_baddir; 1567 1568 if (!svc_process_common(rqstp)) 1569 goto out_drop; 1570 svc_send(rqstp); 1571 return; 1572 1573 out_baddir: 1574 svc_printk(rqstp, "bad direction 0x%08x, dropping request\n", 1575 be32_to_cpu(*p)); 1576 if (rqstp->rq_server->sv_stats) 1577 rqstp->rq_server->sv_stats->rpcbadfmt++; 1578 out_drop: 1579 svc_drop(rqstp); 1580 } 1581 1582 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1583 /** 1584 * svc_process_bc - process a reverse-direction RPC request 1585 * @req: RPC request to be used for client-side processing 1586 * @rqstp: server-side execution context 1587 * 1588 */ 1589 void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp) 1590 { 1591 struct rpc_timeout timeout = { 1592 .to_increment = 0, 1593 }; 1594 struct rpc_task *task; 1595 int proc_error; 1596 1597 /* Build the svc_rqst used by the common processing routine */ 1598 rqstp->rq_xid = req->rq_xid; 1599 rqstp->rq_prot = req->rq_xprt->prot; 1600 rqstp->rq_bc_net = req->rq_xprt->xprt_net; 1601 1602 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); 1603 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); 1604 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1605 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1606 1607 /* Adjust the argument buffer length */ 1608 rqstp->rq_arg.len = req->rq_private_buf.len; 1609 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { 1610 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; 1611 rqstp->rq_arg.page_len = 0; 1612 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + 1613 rqstp->rq_arg.page_len) 1614 rqstp->rq_arg.page_len = rqstp->rq_arg.len - 1615 rqstp->rq_arg.head[0].iov_len; 1616 else 1617 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + 1618 rqstp->rq_arg.page_len; 1619 1620 /* Reset the response buffer */ 1621 rqstp->rq_res.head[0].iov_len = 0; 1622 1623 /* 1624 * Skip the XID and calldir fields because they've already 1625 * been processed by the caller. 1626 */ 1627 svcxdr_init_decode(rqstp); 1628 if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) 1629 return; 1630 1631 /* Parse and execute the bc call */ 1632 proc_error = svc_process_common(rqstp); 1633 1634 atomic_dec(&req->rq_xprt->bc_slot_count); 1635 if (!proc_error) { 1636 /* Processing error: drop the request */ 1637 xprt_free_bc_request(req); 1638 return; 1639 } 1640 /* Finally, send the reply synchronously */ 1641 if (rqstp->bc_to_initval > 0) { 1642 timeout.to_initval = rqstp->bc_to_initval; 1643 timeout.to_retries = rqstp->bc_to_retries; 1644 } else { 1645 timeout.to_initval = req->rq_xprt->timeout->to_initval; 1646 timeout.to_retries = req->rq_xprt->timeout->to_retries; 1647 } 1648 timeout.to_maxval = timeout.to_initval; 1649 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); 1650 task = rpc_run_bc_task(req, &timeout); 1651 1652 if (IS_ERR(task)) 1653 return; 1654 1655 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); 1656 rpc_put_task(task); 1657 } 1658 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1659 1660 /** 1661 * svc_max_payload - Return transport-specific limit on the RPC payload 1662 * @rqstp: RPC transaction context 1663 * 1664 * Returns the maximum number of payload bytes the current transport 1665 * allows. 1666 */ 1667 u32 svc_max_payload(const struct svc_rqst *rqstp) 1668 { 1669 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; 1670 1671 if (rqstp->rq_server->sv_max_payload < max) 1672 max = rqstp->rq_server->sv_max_payload; 1673 return max; 1674 } 1675 EXPORT_SYMBOL_GPL(svc_max_payload); 1676 1677 /** 1678 * svc_proc_name - Return RPC procedure name in string form 1679 * @rqstp: svc_rqst to operate on 1680 * 1681 * Return value: 1682 * Pointer to a NUL-terminated string 1683 */ 1684 const char *svc_proc_name(const struct svc_rqst *rqstp) 1685 { 1686 if (rqstp && rqstp->rq_procinfo) 1687 return rqstp->rq_procinfo->pc_name; 1688 return "unknown"; 1689 } 1690 1691 1692 /** 1693 * svc_encode_result_payload - mark a range of bytes as a result payload 1694 * @rqstp: svc_rqst to operate on 1695 * @offset: payload's byte offset in rqstp->rq_res 1696 * @length: size of payload, in bytes 1697 * 1698 * Returns zero on success, or a negative errno if a permanent 1699 * error occurred. 1700 */ 1701 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, 1702 unsigned int length) 1703 { 1704 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset, 1705 length); 1706 } 1707 EXPORT_SYMBOL_GPL(svc_encode_result_payload); 1708 1709 /** 1710 * svc_fill_write_vector - Construct data argument for VFS write call 1711 * @rqstp: svc_rqst to operate on 1712 * @payload: xdr_buf containing only the write data payload 1713 * 1714 * Fills in rqstp::rq_vec, and returns the number of elements. 1715 */ 1716 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, 1717 struct xdr_buf *payload) 1718 { 1719 struct page **pages = payload->pages; 1720 struct kvec *first = payload->head; 1721 struct kvec *vec = rqstp->rq_vec; 1722 size_t total = payload->len; 1723 unsigned int i; 1724 1725 /* Some types of transport can present the write payload 1726 * entirely in rq_arg.pages. In this case, @first is empty. 1727 */ 1728 i = 0; 1729 if (first->iov_len) { 1730 vec[i].iov_base = first->iov_base; 1731 vec[i].iov_len = min_t(size_t, total, first->iov_len); 1732 total -= vec[i].iov_len; 1733 ++i; 1734 } 1735 1736 while (total) { 1737 vec[i].iov_base = page_address(*pages); 1738 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE); 1739 total -= vec[i].iov_len; 1740 ++i; 1741 ++pages; 1742 } 1743 1744 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec)); 1745 return i; 1746 } 1747 EXPORT_SYMBOL_GPL(svc_fill_write_vector); 1748 1749 /** 1750 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call 1751 * @rqstp: svc_rqst to operate on 1752 * @first: buffer containing first section of pathname 1753 * @p: buffer containing remaining section of pathname 1754 * @total: total length of the pathname argument 1755 * 1756 * The VFS symlink API demands a NUL-terminated pathname in mapped memory. 1757 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free 1758 * the returned string. 1759 */ 1760 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, 1761 void *p, size_t total) 1762 { 1763 size_t len, remaining; 1764 char *result, *dst; 1765 1766 result = kmalloc(total + 1, GFP_KERNEL); 1767 if (!result) 1768 return ERR_PTR(-ESERVERFAULT); 1769 1770 dst = result; 1771 remaining = total; 1772 1773 len = min_t(size_t, total, first->iov_len); 1774 if (len) { 1775 memcpy(dst, first->iov_base, len); 1776 dst += len; 1777 remaining -= len; 1778 } 1779 1780 if (remaining) { 1781 len = min_t(size_t, remaining, PAGE_SIZE); 1782 memcpy(dst, p, len); 1783 dst += len; 1784 } 1785 1786 *dst = '\0'; 1787 1788 /* Sanity check: Linux doesn't allow the pathname argument to 1789 * contain a NUL byte. 1790 */ 1791 if (strlen(result) != total) { 1792 kfree(result); 1793 return ERR_PTR(-EINVAL); 1794 } 1795 return result; 1796 } 1797 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname); 1798