1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/net/sunrpc/svc.c
4 *
5 * High-level RPC service routines
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 *
9 * Multiple threads pools and NUMAisation
10 * Copyright (c) 2006 Silicon Graphics, Inc.
11 * by Greg Banks <gnb@melbourne.sgi.com>
12 */
13
14 #include <linux/linkage.h>
15 #include <linux/sched/signal.h>
16 #include <linux/errno.h>
17 #include <linux/net.h>
18 #include <linux/in.h>
19 #include <linux/mm.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/slab.h>
24
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/xdr.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/bc_xprt.h>
31
32 #include <trace/events/sunrpc.h>
33
34 #include "fail.h"
35 #include "sunrpc.h"
36
37 #define RPCDBG_FACILITY RPCDBG_SVCDSP
38
39 static void svc_unregister(const struct svc_serv *serv, struct net *net);
40
41 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
42
43 /*
44 * Mode for mapping cpus to pools.
45 */
46 enum {
47 SVC_POOL_AUTO = -1, /* choose one of the others */
48 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
49 * (legacy & UP mode) */
50 SVC_POOL_PERCPU, /* one pool per cpu */
51 SVC_POOL_PERNODE /* one pool per numa node */
52 };
53
54 /*
55 * Structure for mapping cpus to pools and vice versa.
56 * Setup once during sunrpc initialisation.
57 */
58
59 struct svc_pool_map {
60 int count; /* How many svc_servs use us */
61 int mode; /* Note: int not enum to avoid
62 * warnings about "enumeration value
63 * not handled in switch" */
64 unsigned int npools;
65 unsigned int *pool_to; /* maps pool id to cpu or node */
66 unsigned int *to_pool; /* maps cpu or node to pool id */
67 };
68
69 static struct svc_pool_map svc_pool_map = {
70 .mode = SVC_POOL_DEFAULT
71 };
72
73 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
74
75 static int
__param_set_pool_mode(const char * val,struct svc_pool_map * m)76 __param_set_pool_mode(const char *val, struct svc_pool_map *m)
77 {
78 int err, mode;
79
80 mutex_lock(&svc_pool_map_mutex);
81
82 err = 0;
83 if (!strncmp(val, "auto", 4))
84 mode = SVC_POOL_AUTO;
85 else if (!strncmp(val, "global", 6))
86 mode = SVC_POOL_GLOBAL;
87 else if (!strncmp(val, "percpu", 6))
88 mode = SVC_POOL_PERCPU;
89 else if (!strncmp(val, "pernode", 7))
90 mode = SVC_POOL_PERNODE;
91 else
92 err = -EINVAL;
93
94 if (err)
95 goto out;
96
97 if (m->count == 0)
98 m->mode = mode;
99 else if (mode != m->mode)
100 err = -EBUSY;
101 out:
102 mutex_unlock(&svc_pool_map_mutex);
103 return err;
104 }
105
106 static int
param_set_pool_mode(const char * val,const struct kernel_param * kp)107 param_set_pool_mode(const char *val, const struct kernel_param *kp)
108 {
109 struct svc_pool_map *m = kp->arg;
110
111 return __param_set_pool_mode(val, m);
112 }
113
sunrpc_set_pool_mode(const char * val)114 int sunrpc_set_pool_mode(const char *val)
115 {
116 return __param_set_pool_mode(val, &svc_pool_map);
117 }
118 EXPORT_SYMBOL(sunrpc_set_pool_mode);
119
120 /**
121 * sunrpc_get_pool_mode - get the current pool_mode for the host
122 * @buf: where to write the current pool_mode
123 * @size: size of @buf
124 *
125 * Grab the current pool_mode from the svc_pool_map and write
126 * the resulting string to @buf. Returns the number of characters
127 * written to @buf (a'la snprintf()).
128 */
129 int
sunrpc_get_pool_mode(char * buf,size_t size)130 sunrpc_get_pool_mode(char *buf, size_t size)
131 {
132 struct svc_pool_map *m = &svc_pool_map;
133
134 switch (m->mode)
135 {
136 case SVC_POOL_AUTO:
137 return snprintf(buf, size, "auto");
138 case SVC_POOL_GLOBAL:
139 return snprintf(buf, size, "global");
140 case SVC_POOL_PERCPU:
141 return snprintf(buf, size, "percpu");
142 case SVC_POOL_PERNODE:
143 return snprintf(buf, size, "pernode");
144 default:
145 return snprintf(buf, size, "%d", m->mode);
146 }
147 }
148 EXPORT_SYMBOL(sunrpc_get_pool_mode);
149
150 static int
param_get_pool_mode(char * buf,const struct kernel_param * kp)151 param_get_pool_mode(char *buf, const struct kernel_param *kp)
152 {
153 char str[16];
154 int len;
155
156 len = sunrpc_get_pool_mode(str, ARRAY_SIZE(str));
157
158 /* Ensure we have room for newline and NUL */
159 len = min_t(int, len, ARRAY_SIZE(str) - 2);
160
161 /* tack on the newline */
162 str[len] = '\n';
163 str[len + 1] = '\0';
164
165 return sysfs_emit(buf, "%s", str);
166 }
167
168 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
169 &svc_pool_map, 0644);
170
171 /*
172 * Detect best pool mapping mode heuristically,
173 * according to the machine's topology.
174 */
175 static int
svc_pool_map_choose_mode(void)176 svc_pool_map_choose_mode(void)
177 {
178 unsigned int node;
179
180 if (nr_online_nodes > 1) {
181 /*
182 * Actually have multiple NUMA nodes,
183 * so split pools on NUMA node boundaries
184 */
185 return SVC_POOL_PERNODE;
186 }
187
188 node = first_online_node;
189 if (nr_cpus_node(node) > 2) {
190 /*
191 * Non-trivial SMP, or CONFIG_NUMA on
192 * non-NUMA hardware, e.g. with a generic
193 * x86_64 kernel on Xeons. In this case we
194 * want to divide the pools on cpu boundaries.
195 */
196 return SVC_POOL_PERCPU;
197 }
198
199 /* default: one global pool */
200 return SVC_POOL_GLOBAL;
201 }
202
203 /*
204 * Allocate the to_pool[] and pool_to[] arrays.
205 * Returns 0 on success or an errno.
206 */
207 static int
svc_pool_map_alloc_arrays(struct svc_pool_map * m,unsigned int maxpools)208 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
209 {
210 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
211 if (!m->to_pool)
212 goto fail;
213 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
214 if (!m->pool_to)
215 goto fail_free;
216
217 return 0;
218
219 fail_free:
220 kfree(m->to_pool);
221 m->to_pool = NULL;
222 fail:
223 return -ENOMEM;
224 }
225
226 /*
227 * Initialise the pool map for SVC_POOL_PERCPU mode.
228 * Returns number of pools or <0 on error.
229 */
230 static int
svc_pool_map_init_percpu(struct svc_pool_map * m)231 svc_pool_map_init_percpu(struct svc_pool_map *m)
232 {
233 unsigned int maxpools = nr_cpu_ids;
234 unsigned int pidx = 0;
235 unsigned int cpu;
236 int err;
237
238 err = svc_pool_map_alloc_arrays(m, maxpools);
239 if (err)
240 return err;
241
242 for_each_online_cpu(cpu) {
243 BUG_ON(pidx >= maxpools);
244 m->to_pool[cpu] = pidx;
245 m->pool_to[pidx] = cpu;
246 pidx++;
247 }
248 /* cpus brought online later all get mapped to pool0, sorry */
249
250 return pidx;
251 };
252
253
254 /*
255 * Initialise the pool map for SVC_POOL_PERNODE mode.
256 * Returns number of pools or <0 on error.
257 */
258 static int
svc_pool_map_init_pernode(struct svc_pool_map * m)259 svc_pool_map_init_pernode(struct svc_pool_map *m)
260 {
261 unsigned int maxpools = nr_node_ids;
262 unsigned int pidx = 0;
263 unsigned int node;
264 int err;
265
266 err = svc_pool_map_alloc_arrays(m, maxpools);
267 if (err)
268 return err;
269
270 for_each_node_with_cpus(node) {
271 /* some architectures (e.g. SN2) have cpuless nodes */
272 BUG_ON(pidx > maxpools);
273 m->to_pool[node] = pidx;
274 m->pool_to[pidx] = node;
275 pidx++;
276 }
277 /* nodes brought online later all get mapped to pool0, sorry */
278
279 return pidx;
280 }
281
282
283 /*
284 * Add a reference to the global map of cpus to pools (and
285 * vice versa) if pools are in use.
286 * Initialise the map if we're the first user.
287 * Returns the number of pools. If this is '1', no reference
288 * was taken.
289 */
290 static unsigned int
svc_pool_map_get(void)291 svc_pool_map_get(void)
292 {
293 struct svc_pool_map *m = &svc_pool_map;
294 int npools = -1;
295
296 mutex_lock(&svc_pool_map_mutex);
297 if (m->count++) {
298 mutex_unlock(&svc_pool_map_mutex);
299 return m->npools;
300 }
301
302 if (m->mode == SVC_POOL_AUTO)
303 m->mode = svc_pool_map_choose_mode();
304
305 switch (m->mode) {
306 case SVC_POOL_PERCPU:
307 npools = svc_pool_map_init_percpu(m);
308 break;
309 case SVC_POOL_PERNODE:
310 npools = svc_pool_map_init_pernode(m);
311 break;
312 }
313
314 if (npools <= 0) {
315 /* default, or memory allocation failure */
316 npools = 1;
317 m->mode = SVC_POOL_GLOBAL;
318 }
319 m->npools = npools;
320 mutex_unlock(&svc_pool_map_mutex);
321 return npools;
322 }
323
324 /*
325 * Drop a reference to the global map of cpus to pools.
326 * When the last reference is dropped, the map data is
327 * freed; this allows the sysadmin to change the pool.
328 */
329 static void
svc_pool_map_put(void)330 svc_pool_map_put(void)
331 {
332 struct svc_pool_map *m = &svc_pool_map;
333
334 mutex_lock(&svc_pool_map_mutex);
335 if (!--m->count) {
336 kfree(m->to_pool);
337 m->to_pool = NULL;
338 kfree(m->pool_to);
339 m->pool_to = NULL;
340 m->npools = 0;
341 }
342 mutex_unlock(&svc_pool_map_mutex);
343 }
344
svc_pool_map_get_node(unsigned int pidx)345 static int svc_pool_map_get_node(unsigned int pidx)
346 {
347 const struct svc_pool_map *m = &svc_pool_map;
348
349 if (m->count) {
350 if (m->mode == SVC_POOL_PERCPU)
351 return cpu_to_node(m->pool_to[pidx]);
352 if (m->mode == SVC_POOL_PERNODE)
353 return m->pool_to[pidx];
354 }
355 return numa_mem_id();
356 }
357 /*
358 * Set the given thread's cpus_allowed mask so that it
359 * will only run on cpus in the given pool.
360 */
361 static inline void
svc_pool_map_set_cpumask(struct task_struct * task,unsigned int pidx)362 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
363 {
364 struct svc_pool_map *m = &svc_pool_map;
365 unsigned int node = m->pool_to[pidx];
366
367 /*
368 * The caller checks for sv_nrpools > 1, which
369 * implies that we've been initialized.
370 */
371 WARN_ON_ONCE(m->count == 0);
372 if (m->count == 0)
373 return;
374
375 switch (m->mode) {
376 case SVC_POOL_PERCPU:
377 {
378 set_cpus_allowed_ptr(task, cpumask_of(node));
379 break;
380 }
381 case SVC_POOL_PERNODE:
382 {
383 set_cpus_allowed_ptr(task, cpumask_of_node(node));
384 break;
385 }
386 }
387 }
388
389 /**
390 * svc_pool_for_cpu - Select pool to run a thread on this cpu
391 * @serv: An RPC service
392 *
393 * Use the active CPU and the svc_pool_map's mode setting to
394 * select the svc thread pool to use. Once initialized, the
395 * svc_pool_map does not change.
396 *
397 * Return value:
398 * A pointer to an svc_pool
399 */
svc_pool_for_cpu(struct svc_serv * serv)400 struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv)
401 {
402 struct svc_pool_map *m = &svc_pool_map;
403 int cpu = raw_smp_processor_id();
404 unsigned int pidx = 0;
405
406 if (serv->sv_nrpools <= 1)
407 return serv->sv_pools;
408
409 switch (m->mode) {
410 case SVC_POOL_PERCPU:
411 pidx = m->to_pool[cpu];
412 break;
413 case SVC_POOL_PERNODE:
414 pidx = m->to_pool[cpu_to_node(cpu)];
415 break;
416 }
417
418 return &serv->sv_pools[pidx % serv->sv_nrpools];
419 }
420
svc_rpcb_setup(struct svc_serv * serv,struct net * net)421 static int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
422 {
423 int err;
424
425 err = rpcb_create_local(net);
426 if (err)
427 return err;
428
429 /* Remove any stale portmap registrations */
430 svc_unregister(serv, net);
431 return 0;
432 }
433
svc_rpcb_cleanup(struct svc_serv * serv,struct net * net)434 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
435 {
436 svc_unregister(serv, net);
437 rpcb_put_local(net);
438 }
439
svc_uses_rpcbind(struct svc_serv * serv)440 static int svc_uses_rpcbind(struct svc_serv *serv)
441 {
442 unsigned int p, i;
443
444 for (p = 0; p < serv->sv_nprogs; p++) {
445 struct svc_program *progp = &serv->sv_programs[p];
446
447 for (i = 0; i < progp->pg_nvers; i++) {
448 if (progp->pg_vers[i] == NULL)
449 continue;
450 if (!progp->pg_vers[i]->vs_hidden)
451 return 1;
452 }
453 }
454
455 return 0;
456 }
457
svc_bind(struct svc_serv * serv,struct net * net)458 int svc_bind(struct svc_serv *serv, struct net *net)
459 {
460 if (!svc_uses_rpcbind(serv))
461 return 0;
462 return svc_rpcb_setup(serv, net);
463 }
464 EXPORT_SYMBOL_GPL(svc_bind);
465
466 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
467 static void
__svc_init_bc(struct svc_serv * serv)468 __svc_init_bc(struct svc_serv *serv)
469 {
470 lwq_init(&serv->sv_cb_list);
471 }
472 #else
473 static void
__svc_init_bc(struct svc_serv * serv)474 __svc_init_bc(struct svc_serv *serv)
475 {
476 }
477 #endif
478
479 /*
480 * Create an RPC service
481 */
482 static struct svc_serv *
__svc_create(struct svc_program * prog,int nprogs,struct svc_stat * stats,unsigned int bufsize,int npools,int (* threadfn)(void * data))483 __svc_create(struct svc_program *prog, int nprogs, struct svc_stat *stats,
484 unsigned int bufsize, int npools, int (*threadfn)(void *data))
485 {
486 struct svc_serv *serv;
487 unsigned int vers;
488 unsigned int xdrsize;
489 unsigned int i;
490
491 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
492 return NULL;
493 serv->sv_name = prog->pg_name;
494 serv->sv_programs = prog;
495 serv->sv_nprogs = nprogs;
496 serv->sv_stats = stats;
497 if (bufsize > RPCSVC_MAXPAYLOAD)
498 bufsize = RPCSVC_MAXPAYLOAD;
499 serv->sv_max_payload = bufsize? bufsize : 4096;
500 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
501 serv->sv_threadfn = threadfn;
502 xdrsize = 0;
503 for (i = 0; i < nprogs; i++) {
504 struct svc_program *progp = &prog[i];
505
506 progp->pg_lovers = progp->pg_nvers-1;
507 for (vers = 0; vers < progp->pg_nvers ; vers++)
508 if (progp->pg_vers[vers]) {
509 progp->pg_hivers = vers;
510 if (progp->pg_lovers > vers)
511 progp->pg_lovers = vers;
512 if (progp->pg_vers[vers]->vs_xdrsize > xdrsize)
513 xdrsize = progp->pg_vers[vers]->vs_xdrsize;
514 }
515 }
516 serv->sv_xdrsize = xdrsize;
517 INIT_LIST_HEAD(&serv->sv_tempsocks);
518 INIT_LIST_HEAD(&serv->sv_permsocks);
519 timer_setup(&serv->sv_temptimer, NULL, 0);
520 spin_lock_init(&serv->sv_lock);
521
522 __svc_init_bc(serv);
523
524 serv->sv_nrpools = npools;
525 serv->sv_pools =
526 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
527 GFP_KERNEL);
528 if (!serv->sv_pools) {
529 kfree(serv);
530 return NULL;
531 }
532
533 for (i = 0; i < serv->sv_nrpools; i++) {
534 struct svc_pool *pool = &serv->sv_pools[i];
535
536 dprintk("svc: initialising pool %u for %s\n",
537 i, serv->sv_name);
538
539 pool->sp_id = i;
540 lwq_init(&pool->sp_xprts);
541 INIT_LIST_HEAD(&pool->sp_all_threads);
542 init_llist_head(&pool->sp_idle_threads);
543
544 percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
545 percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
546 percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL);
547 }
548
549 return serv;
550 }
551
552 /**
553 * svc_create - Create an RPC service
554 * @prog: the RPC program the new service will handle
555 * @bufsize: maximum message size for @prog
556 * @threadfn: a function to service RPC requests for @prog
557 *
558 * Returns an instantiated struct svc_serv object or NULL.
559 */
svc_create(struct svc_program * prog,unsigned int bufsize,int (* threadfn)(void * data))560 struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
561 int (*threadfn)(void *data))
562 {
563 return __svc_create(prog, 1, NULL, bufsize, 1, threadfn);
564 }
565 EXPORT_SYMBOL_GPL(svc_create);
566
567 /**
568 * svc_create_pooled - Create an RPC service with pooled threads
569 * @prog: Array of RPC programs the new service will handle
570 * @nprogs: Number of programs in the array
571 * @stats: the stats struct if desired
572 * @bufsize: maximum message size for @prog
573 * @threadfn: a function to service RPC requests for @prog
574 *
575 * Returns an instantiated struct svc_serv object or NULL.
576 */
svc_create_pooled(struct svc_program * prog,unsigned int nprogs,struct svc_stat * stats,unsigned int bufsize,int (* threadfn)(void * data))577 struct svc_serv *svc_create_pooled(struct svc_program *prog,
578 unsigned int nprogs,
579 struct svc_stat *stats,
580 unsigned int bufsize,
581 int (*threadfn)(void *data))
582 {
583 struct svc_serv *serv;
584 unsigned int npools = svc_pool_map_get();
585
586 serv = __svc_create(prog, nprogs, stats, bufsize, npools, threadfn);
587 if (!serv)
588 goto out_err;
589 serv->sv_is_pooled = true;
590 return serv;
591 out_err:
592 svc_pool_map_put();
593 return NULL;
594 }
595 EXPORT_SYMBOL_GPL(svc_create_pooled);
596
597 /*
598 * Destroy an RPC service. Should be called with appropriate locking to
599 * protect sv_permsocks and sv_tempsocks.
600 */
601 void
svc_destroy(struct svc_serv ** servp)602 svc_destroy(struct svc_serv **servp)
603 {
604 struct svc_serv *serv = *servp;
605 unsigned int i;
606
607 *servp = NULL;
608
609 dprintk("svc: svc_destroy(%s)\n", serv->sv_programs->pg_name);
610 timer_shutdown_sync(&serv->sv_temptimer);
611
612 /*
613 * Remaining transports at this point are not expected.
614 */
615 WARN_ONCE(!list_empty(&serv->sv_permsocks),
616 "SVC: permsocks remain for %s\n", serv->sv_programs->pg_name);
617 WARN_ONCE(!list_empty(&serv->sv_tempsocks),
618 "SVC: tempsocks remain for %s\n", serv->sv_programs->pg_name);
619
620 cache_clean_deferred(serv);
621
622 if (serv->sv_is_pooled)
623 svc_pool_map_put();
624
625 for (i = 0; i < serv->sv_nrpools; i++) {
626 struct svc_pool *pool = &serv->sv_pools[i];
627
628 percpu_counter_destroy(&pool->sp_messages_arrived);
629 percpu_counter_destroy(&pool->sp_sockets_queued);
630 percpu_counter_destroy(&pool->sp_threads_woken);
631 }
632 kfree(serv->sv_pools);
633 kfree(serv);
634 }
635 EXPORT_SYMBOL_GPL(svc_destroy);
636
637 static bool
svc_init_buffer(struct svc_rqst * rqstp,const struct svc_serv * serv,int node)638 svc_init_buffer(struct svc_rqst *rqstp, const struct svc_serv *serv, int node)
639 {
640 rqstp->rq_maxpages = svc_serv_maxpages(serv);
641
642 /* rq_pages' last entry is NULL for historical reasons. */
643 rqstp->rq_pages = kcalloc_node(rqstp->rq_maxpages + 1,
644 sizeof(struct page *),
645 GFP_KERNEL, node);
646 if (!rqstp->rq_pages)
647 return false;
648
649 return true;
650 }
651
652 /*
653 * Release an RPC server buffer
654 */
655 static void
svc_release_buffer(struct svc_rqst * rqstp)656 svc_release_buffer(struct svc_rqst *rqstp)
657 {
658 unsigned long i;
659
660 for (i = 0; i < rqstp->rq_maxpages; i++)
661 if (rqstp->rq_pages[i])
662 put_page(rqstp->rq_pages[i]);
663 kfree(rqstp->rq_pages);
664 }
665
666 static void
svc_rqst_free(struct svc_rqst * rqstp)667 svc_rqst_free(struct svc_rqst *rqstp)
668 {
669 folio_batch_release(&rqstp->rq_fbatch);
670 kfree(rqstp->rq_bvec);
671 svc_release_buffer(rqstp);
672 if (rqstp->rq_scratch_folio)
673 folio_put(rqstp->rq_scratch_folio);
674 kfree(rqstp->rq_resp);
675 kfree(rqstp->rq_argp);
676 kfree(rqstp->rq_auth_data);
677 kfree_rcu(rqstp, rq_rcu_head);
678 }
679
680 static struct svc_rqst *
svc_prepare_thread(struct svc_serv * serv,struct svc_pool * pool,int node)681 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
682 {
683 struct svc_rqst *rqstp;
684
685 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
686 if (!rqstp)
687 return rqstp;
688
689 folio_batch_init(&rqstp->rq_fbatch);
690
691 rqstp->rq_server = serv;
692 rqstp->rq_pool = pool;
693
694 rqstp->rq_scratch_folio = __folio_alloc_node(GFP_KERNEL, 0, node);
695 if (!rqstp->rq_scratch_folio)
696 goto out_enomem;
697
698 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
699 if (!rqstp->rq_argp)
700 goto out_enomem;
701
702 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
703 if (!rqstp->rq_resp)
704 goto out_enomem;
705
706 if (!svc_init_buffer(rqstp, serv, node))
707 goto out_enomem;
708
709 rqstp->rq_bvec = kcalloc_node(rqstp->rq_maxpages,
710 sizeof(struct bio_vec),
711 GFP_KERNEL, node);
712 if (!rqstp->rq_bvec)
713 goto out_enomem;
714
715 rqstp->rq_err = -EAGAIN; /* No error yet */
716
717 serv->sv_nrthreads += 1;
718 pool->sp_nrthreads += 1;
719
720 /* Protected by whatever lock the service uses when calling
721 * svc_set_num_threads()
722 */
723 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
724
725 return rqstp;
726
727 out_enomem:
728 svc_rqst_free(rqstp);
729 return NULL;
730 }
731
732 /**
733 * svc_pool_wake_idle_thread - Awaken an idle thread in @pool
734 * @pool: service thread pool
735 *
736 * Can be called from soft IRQ or process context. Finding an idle
737 * service thread and marking it BUSY is atomic with respect to
738 * other calls to svc_pool_wake_idle_thread().
739 *
740 */
svc_pool_wake_idle_thread(struct svc_pool * pool)741 void svc_pool_wake_idle_thread(struct svc_pool *pool)
742 {
743 struct svc_rqst *rqstp;
744 struct llist_node *ln;
745
746 rcu_read_lock();
747 ln = READ_ONCE(pool->sp_idle_threads.first);
748 if (ln) {
749 rqstp = llist_entry(ln, struct svc_rqst, rq_idle);
750 WRITE_ONCE(rqstp->rq_qtime, ktime_get());
751 if (!task_is_running(rqstp->rq_task)) {
752 wake_up_process(rqstp->rq_task);
753 trace_svc_pool_thread_wake(pool, rqstp->rq_task->pid);
754 percpu_counter_inc(&pool->sp_threads_woken);
755 } else {
756 trace_svc_pool_thread_running(pool, rqstp->rq_task->pid);
757 }
758 rcu_read_unlock();
759 return;
760 }
761 rcu_read_unlock();
762 trace_svc_pool_thread_noidle(pool, 0);
763 }
764 EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
765
766 static struct svc_pool *
svc_pool_next(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)767 svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
768 {
769 return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools];
770 }
771
772 static struct svc_pool *
svc_pool_victim(struct svc_serv * serv,struct svc_pool * target_pool,unsigned int * state)773 svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
774 unsigned int *state)
775 {
776 struct svc_pool *pool;
777 unsigned int i;
778
779 pool = target_pool;
780
781 if (!pool) {
782 for (i = 0; i < serv->sv_nrpools; i++) {
783 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
784 if (pool->sp_nrthreads)
785 break;
786 }
787 }
788
789 if (pool && pool->sp_nrthreads) {
790 set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
791 set_bit(SP_NEED_VICTIM, &pool->sp_flags);
792 return pool;
793 }
794 return NULL;
795 }
796
797 static int
svc_start_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)798 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
799 {
800 struct svc_rqst *rqstp;
801 struct task_struct *task;
802 struct svc_pool *chosen_pool;
803 unsigned int state = serv->sv_nrthreads-1;
804 int node;
805 int err;
806
807 do {
808 nrservs--;
809 chosen_pool = svc_pool_next(serv, pool, &state);
810 node = svc_pool_map_get_node(chosen_pool->sp_id);
811
812 rqstp = svc_prepare_thread(serv, chosen_pool, node);
813 if (!rqstp)
814 return -ENOMEM;
815 task = kthread_create_on_node(serv->sv_threadfn, rqstp,
816 node, "%s", serv->sv_name);
817 if (IS_ERR(task)) {
818 svc_exit_thread(rqstp);
819 return PTR_ERR(task);
820 }
821
822 rqstp->rq_task = task;
823 if (serv->sv_nrpools > 1)
824 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
825
826 svc_sock_update_bufs(serv);
827 wake_up_process(task);
828
829 wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN);
830 err = rqstp->rq_err;
831 if (err) {
832 svc_exit_thread(rqstp);
833 return err;
834 }
835 } while (nrservs > 0);
836
837 return 0;
838 }
839
840 static int
svc_stop_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)841 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
842 {
843 unsigned int state = serv->sv_nrthreads-1;
844 struct svc_pool *victim;
845
846 do {
847 victim = svc_pool_victim(serv, pool, &state);
848 if (!victim)
849 break;
850 svc_pool_wake_idle_thread(victim);
851 wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS,
852 TASK_IDLE);
853 nrservs++;
854 } while (nrservs < 0);
855 return 0;
856 }
857
858 /**
859 * svc_set_num_threads - adjust number of threads per RPC service
860 * @serv: RPC service to adjust
861 * @pool: Specific pool from which to choose threads, or NULL
862 * @nrservs: New number of threads for @serv (0 or less means kill all threads)
863 *
864 * Create or destroy threads to make the number of threads for @serv the
865 * given number. If @pool is non-NULL, change only threads in that pool;
866 * otherwise, round-robin between all pools for @serv. @serv's
867 * sv_nrthreads is adjusted for each thread created or destroyed.
868 *
869 * Caller must ensure mutual exclusion between this and server startup or
870 * shutdown.
871 *
872 * Returns zero on success or a negative errno if an error occurred while
873 * starting a thread.
874 */
875 int
svc_set_num_threads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)876 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
877 {
878 if (!pool)
879 nrservs -= serv->sv_nrthreads;
880 else
881 nrservs -= pool->sp_nrthreads;
882
883 if (nrservs > 0)
884 return svc_start_kthreads(serv, pool, nrservs);
885 if (nrservs < 0)
886 return svc_stop_kthreads(serv, pool, nrservs);
887 return 0;
888 }
889 EXPORT_SYMBOL_GPL(svc_set_num_threads);
890
891 /**
892 * svc_rqst_replace_page - Replace one page in rq_pages[]
893 * @rqstp: svc_rqst with pages to replace
894 * @page: replacement page
895 *
896 * When replacing a page in rq_pages, batch the release of the
897 * replaced pages to avoid hammering the page allocator.
898 *
899 * Return values:
900 * %true: page replaced
901 * %false: array bounds checking failed
902 */
svc_rqst_replace_page(struct svc_rqst * rqstp,struct page * page)903 bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
904 {
905 struct page **begin = rqstp->rq_pages;
906 struct page **end = &rqstp->rq_pages[rqstp->rq_maxpages];
907
908 if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) {
909 trace_svc_replace_page_err(rqstp);
910 return false;
911 }
912
913 if (*rqstp->rq_next_page) {
914 if (!folio_batch_add(&rqstp->rq_fbatch,
915 page_folio(*rqstp->rq_next_page)))
916 __folio_batch_release(&rqstp->rq_fbatch);
917 }
918
919 get_page(page);
920 *(rqstp->rq_next_page++) = page;
921 return true;
922 }
923 EXPORT_SYMBOL_GPL(svc_rqst_replace_page);
924
925 /**
926 * svc_rqst_release_pages - Release Reply buffer pages
927 * @rqstp: RPC transaction context
928 *
929 * Release response pages that might still be in flight after
930 * svc_send, and any spliced filesystem-owned pages.
931 */
svc_rqst_release_pages(struct svc_rqst * rqstp)932 void svc_rqst_release_pages(struct svc_rqst *rqstp)
933 {
934 int i, count = rqstp->rq_next_page - rqstp->rq_respages;
935
936 if (count) {
937 release_pages(rqstp->rq_respages, count);
938 for (i = 0; i < count; i++)
939 rqstp->rq_respages[i] = NULL;
940 }
941 }
942
943 /**
944 * svc_exit_thread - finalise the termination of a sunrpc server thread
945 * @rqstp: the svc_rqst which represents the thread.
946 *
947 * When a thread started with svc_new_thread() exits it must call
948 * svc_exit_thread() as its last act. This must be done with the
949 * service mutex held. Normally this is held by a DIFFERENT thread, the
950 * one that is calling svc_set_num_threads() and which will wait for
951 * SP_VICTIM_REMAINS to be cleared before dropping the mutex. If the
952 * thread exits for any reason other than svc_thread_should_stop()
953 * returning %true (which indicated that svc_set_num_threads() is
954 * waiting for it to exit), then it must take the service mutex itself,
955 * which can only safely be done using mutex_try_lock().
956 */
957 void
svc_exit_thread(struct svc_rqst * rqstp)958 svc_exit_thread(struct svc_rqst *rqstp)
959 {
960 struct svc_serv *serv = rqstp->rq_server;
961 struct svc_pool *pool = rqstp->rq_pool;
962
963 list_del_rcu(&rqstp->rq_all);
964
965 pool->sp_nrthreads -= 1;
966 serv->sv_nrthreads -= 1;
967 svc_sock_update_bufs(serv);
968
969 svc_rqst_free(rqstp);
970
971 clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
972 }
973 EXPORT_SYMBOL_GPL(svc_exit_thread);
974
975 /*
976 * Register an "inet" protocol family netid with the local
977 * rpcbind daemon via an rpcbind v4 SET request.
978 *
979 * No netconfig infrastructure is available in the kernel, so
980 * we map IP_ protocol numbers to netids by hand.
981 *
982 * Returns zero on success; a negative errno value is returned
983 * if any error occurs.
984 */
__svc_rpcb_register4(struct net * net,const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)985 static int __svc_rpcb_register4(struct net *net, const u32 program,
986 const u32 version,
987 const unsigned short protocol,
988 const unsigned short port)
989 {
990 const struct sockaddr_in sin = {
991 .sin_family = AF_INET,
992 .sin_addr.s_addr = htonl(INADDR_ANY),
993 .sin_port = htons(port),
994 };
995 const char *netid;
996 int error;
997
998 switch (protocol) {
999 case IPPROTO_UDP:
1000 netid = RPCBIND_NETID_UDP;
1001 break;
1002 case IPPROTO_TCP:
1003 netid = RPCBIND_NETID_TCP;
1004 break;
1005 default:
1006 return -ENOPROTOOPT;
1007 }
1008
1009 error = rpcb_v4_register(net, program, version,
1010 (const struct sockaddr *)&sin, netid);
1011
1012 /*
1013 * User space didn't support rpcbind v4, so retry this
1014 * registration request with the legacy rpcbind v2 protocol.
1015 */
1016 if (error == -EPROTONOSUPPORT)
1017 error = rpcb_register(net, program, version, protocol, port);
1018
1019 return error;
1020 }
1021
1022 #if IS_ENABLED(CONFIG_IPV6)
1023 /*
1024 * Register an "inet6" protocol family netid with the local
1025 * rpcbind daemon via an rpcbind v4 SET request.
1026 *
1027 * No netconfig infrastructure is available in the kernel, so
1028 * we map IP_ protocol numbers to netids by hand.
1029 *
1030 * Returns zero on success; a negative errno value is returned
1031 * if any error occurs.
1032 */
__svc_rpcb_register6(struct net * net,const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)1033 static int __svc_rpcb_register6(struct net *net, const u32 program,
1034 const u32 version,
1035 const unsigned short protocol,
1036 const unsigned short port)
1037 {
1038 const struct sockaddr_in6 sin6 = {
1039 .sin6_family = AF_INET6,
1040 .sin6_addr = IN6ADDR_ANY_INIT,
1041 .sin6_port = htons(port),
1042 };
1043 const char *netid;
1044 int error;
1045
1046 switch (protocol) {
1047 case IPPROTO_UDP:
1048 netid = RPCBIND_NETID_UDP6;
1049 break;
1050 case IPPROTO_TCP:
1051 netid = RPCBIND_NETID_TCP6;
1052 break;
1053 default:
1054 return -ENOPROTOOPT;
1055 }
1056
1057 error = rpcb_v4_register(net, program, version,
1058 (const struct sockaddr *)&sin6, netid);
1059
1060 /*
1061 * User space didn't support rpcbind version 4, so we won't
1062 * use a PF_INET6 listener.
1063 */
1064 if (error == -EPROTONOSUPPORT)
1065 error = -EAFNOSUPPORT;
1066
1067 return error;
1068 }
1069 #endif /* IS_ENABLED(CONFIG_IPV6) */
1070
1071 /*
1072 * Register a kernel RPC service via rpcbind version 4.
1073 *
1074 * Returns zero on success; a negative errno value is returned
1075 * if any error occurs.
1076 */
__svc_register(struct net * net,const char * progname,const u32 program,const u32 version,const int family,const unsigned short protocol,const unsigned short port)1077 static int __svc_register(struct net *net, const char *progname,
1078 const u32 program, const u32 version,
1079 const int family,
1080 const unsigned short protocol,
1081 const unsigned short port)
1082 {
1083 int error = -EAFNOSUPPORT;
1084
1085 switch (family) {
1086 case PF_INET:
1087 error = __svc_rpcb_register4(net, program, version,
1088 protocol, port);
1089 break;
1090 #if IS_ENABLED(CONFIG_IPV6)
1091 case PF_INET6:
1092 error = __svc_rpcb_register6(net, program, version,
1093 protocol, port);
1094 #endif
1095 }
1096
1097 trace_svc_register(progname, version, family, protocol, port, error);
1098 return error;
1099 }
1100
1101 static
svc_rpcbind_set_version(struct net * net,const struct svc_program * progp,u32 version,int family,unsigned short proto,unsigned short port)1102 int svc_rpcbind_set_version(struct net *net,
1103 const struct svc_program *progp,
1104 u32 version, int family,
1105 unsigned short proto,
1106 unsigned short port)
1107 {
1108 return __svc_register(net, progp->pg_name, progp->pg_prog,
1109 version, family, proto, port);
1110
1111 }
1112
svc_generic_rpcbind_set(struct net * net,const struct svc_program * progp,u32 version,int family,unsigned short proto,unsigned short port)1113 int svc_generic_rpcbind_set(struct net *net,
1114 const struct svc_program *progp,
1115 u32 version, int family,
1116 unsigned short proto,
1117 unsigned short port)
1118 {
1119 const struct svc_version *vers = progp->pg_vers[version];
1120 int error;
1121
1122 if (vers == NULL)
1123 return 0;
1124
1125 if (vers->vs_hidden) {
1126 trace_svc_noregister(progp->pg_name, version, proto,
1127 port, family, 0);
1128 return 0;
1129 }
1130
1131 /*
1132 * Don't register a UDP port if we need congestion
1133 * control.
1134 */
1135 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP)
1136 return 0;
1137
1138 error = svc_rpcbind_set_version(net, progp, version,
1139 family, proto, port);
1140
1141 return (vers->vs_rpcb_optnl) ? 0 : error;
1142 }
1143 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set);
1144
1145 /**
1146 * svc_register - register an RPC service with the local portmapper
1147 * @serv: svc_serv struct for the service to register
1148 * @net: net namespace for the service to register
1149 * @family: protocol family of service's listener socket
1150 * @proto: transport protocol number to advertise
1151 * @port: port to advertise
1152 *
1153 * Service is registered for any address in the passed-in protocol family
1154 */
svc_register(const struct svc_serv * serv,struct net * net,const int family,const unsigned short proto,const unsigned short port)1155 int svc_register(const struct svc_serv *serv, struct net *net,
1156 const int family, const unsigned short proto,
1157 const unsigned short port)
1158 {
1159 unsigned int p, i;
1160 int error = 0;
1161
1162 WARN_ON_ONCE(proto == 0 && port == 0);
1163 if (proto == 0 && port == 0)
1164 return -EINVAL;
1165
1166 for (p = 0; p < serv->sv_nprogs; p++) {
1167 struct svc_program *progp = &serv->sv_programs[p];
1168
1169 for (i = 0; i < progp->pg_nvers; i++) {
1170
1171 error = progp->pg_rpcbind_set(net, progp, i,
1172 family, proto, port);
1173 if (error < 0) {
1174 printk(KERN_WARNING "svc: failed to register "
1175 "%sv%u RPC service (errno %d).\n",
1176 progp->pg_name, i, -error);
1177 break;
1178 }
1179 }
1180 }
1181
1182 return error;
1183 }
1184
1185 /*
1186 * If user space is running rpcbind, it should take the v4 UNSET
1187 * and clear everything for this [program, version]. If user space
1188 * is running portmap, it will reject the v4 UNSET, but won't have
1189 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
1190 * in this case to clear all existing entries for [program, version].
1191 */
__svc_unregister(struct net * net,const u32 program,const u32 version,const char * progname)1192 static void __svc_unregister(struct net *net, const u32 program, const u32 version,
1193 const char *progname)
1194 {
1195 int error;
1196
1197 error = rpcb_v4_register(net, program, version, NULL, "");
1198
1199 /*
1200 * User space didn't support rpcbind v4, so retry this
1201 * request with the legacy rpcbind v2 protocol.
1202 */
1203 if (error == -EPROTONOSUPPORT)
1204 error = rpcb_register(net, program, version, 0, 0);
1205
1206 trace_svc_unregister(progname, version, error);
1207 }
1208
1209 /*
1210 * All netids, bind addresses and ports registered for [program, version]
1211 * are removed from the local rpcbind database (if the service is not
1212 * hidden) to make way for a new instance of the service.
1213 *
1214 * The result of unregistration is reported via dprintk for those who want
1215 * verification of the result, but is otherwise not important.
1216 */
svc_unregister(const struct svc_serv * serv,struct net * net)1217 static void svc_unregister(const struct svc_serv *serv, struct net *net)
1218 {
1219 struct sighand_struct *sighand;
1220 unsigned long flags;
1221 unsigned int p, i;
1222
1223 clear_thread_flag(TIF_SIGPENDING);
1224
1225 for (p = 0; p < serv->sv_nprogs; p++) {
1226 struct svc_program *progp = &serv->sv_programs[p];
1227
1228 for (i = 0; i < progp->pg_nvers; i++) {
1229 if (progp->pg_vers[i] == NULL)
1230 continue;
1231 if (progp->pg_vers[i]->vs_hidden)
1232 continue;
1233 __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
1234 }
1235 }
1236
1237 rcu_read_lock();
1238 sighand = rcu_dereference(current->sighand);
1239 spin_lock_irqsave(&sighand->siglock, flags);
1240 recalc_sigpending();
1241 spin_unlock_irqrestore(&sighand->siglock, flags);
1242 rcu_read_unlock();
1243 }
1244
1245 /*
1246 * dprintk the given error with the address of the client that caused it.
1247 */
1248 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1249 static __printf(2, 3)
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1250 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1251 {
1252 struct va_format vaf;
1253 va_list args;
1254 char buf[RPC_MAX_ADDRBUFLEN];
1255
1256 va_start(args, fmt);
1257
1258 vaf.fmt = fmt;
1259 vaf.va = &args;
1260
1261 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1262
1263 va_end(args);
1264 }
1265 #else
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1266 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1267 #endif
1268
1269 __be32
svc_generic_init_request(struct svc_rqst * rqstp,const struct svc_program * progp,struct svc_process_info * ret)1270 svc_generic_init_request(struct svc_rqst *rqstp,
1271 const struct svc_program *progp,
1272 struct svc_process_info *ret)
1273 {
1274 const struct svc_version *versp = NULL; /* compiler food */
1275 const struct svc_procedure *procp = NULL;
1276
1277 if (rqstp->rq_vers >= progp->pg_nvers )
1278 goto err_bad_vers;
1279 versp = progp->pg_vers[rqstp->rq_vers];
1280 if (!versp)
1281 goto err_bad_vers;
1282
1283 /*
1284 * Some protocol versions (namely NFSv4) require some form of
1285 * congestion control. (See RFC 7530 section 3.1 paragraph 2)
1286 * In other words, UDP is not allowed. We mark those when setting
1287 * up the svc_xprt, and verify that here.
1288 *
1289 * The spec is not very clear about what error should be returned
1290 * when someone tries to access a server that is listening on UDP
1291 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1292 * fit.
1293 */
1294 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1295 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1296 goto err_bad_vers;
1297
1298 if (rqstp->rq_proc >= versp->vs_nproc)
1299 goto err_bad_proc;
1300 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1301
1302 /* Initialize storage for argp and resp */
1303 memset(rqstp->rq_argp, 0, procp->pc_argzero);
1304 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1305
1306 /* Bump per-procedure stats counter */
1307 this_cpu_inc(versp->vs_count[rqstp->rq_proc]);
1308
1309 ret->dispatch = versp->vs_dispatch;
1310 return rpc_success;
1311 err_bad_vers:
1312 ret->mismatch.lovers = progp->pg_lovers;
1313 ret->mismatch.hivers = progp->pg_hivers;
1314 return rpc_prog_mismatch;
1315 err_bad_proc:
1316 return rpc_proc_unavail;
1317 }
1318 EXPORT_SYMBOL_GPL(svc_generic_init_request);
1319
1320 /*
1321 * Common routine for processing the RPC request.
1322 */
1323 static int
svc_process_common(struct svc_rqst * rqstp)1324 svc_process_common(struct svc_rqst *rqstp)
1325 {
1326 struct xdr_stream *xdr = &rqstp->rq_res_stream;
1327 struct svc_program *progp = NULL;
1328 const struct svc_procedure *procp = NULL;
1329 struct svc_serv *serv = rqstp->rq_server;
1330 struct svc_process_info process;
1331 enum svc_auth_status auth_res;
1332 unsigned int aoffset;
1333 int pr, rc;
1334 __be32 *p;
1335
1336 /* Reset the accept_stat for the RPC */
1337 rqstp->rq_accept_statp = NULL;
1338
1339 /* Will be turned off only when NFSv4 Sessions are used */
1340 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1341 clear_bit(RQ_DROPME, &rqstp->rq_flags);
1342
1343 /* Construct the first words of the reply: */
1344 svcxdr_init_encode(rqstp);
1345 xdr_stream_encode_be32(xdr, rqstp->rq_xid);
1346 xdr_stream_encode_be32(xdr, rpc_reply);
1347
1348 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4);
1349 if (unlikely(!p))
1350 goto err_short_len;
1351 if (*p++ != cpu_to_be32(RPC_VERSION))
1352 goto err_bad_rpc;
1353
1354 xdr_stream_encode_be32(xdr, rpc_msg_accepted);
1355
1356 rqstp->rq_prog = be32_to_cpup(p++);
1357 rqstp->rq_vers = be32_to_cpup(p++);
1358 rqstp->rq_proc = be32_to_cpup(p);
1359
1360 for (pr = 0; pr < serv->sv_nprogs; pr++)
1361 if (rqstp->rq_prog == serv->sv_programs[pr].pg_prog)
1362 progp = &serv->sv_programs[pr];
1363
1364 /*
1365 * Decode auth data, and add verifier to reply buffer.
1366 * We do this before anything else in order to get a decent
1367 * auth verifier.
1368 */
1369 auth_res = svc_authenticate(rqstp);
1370 /* Also give the program a chance to reject this call: */
1371 if (auth_res == SVC_OK && progp)
1372 auth_res = progp->pg_authenticate(rqstp);
1373 trace_svc_authenticate(rqstp, auth_res);
1374 switch (auth_res) {
1375 case SVC_OK:
1376 break;
1377 case SVC_GARBAGE:
1378 rqstp->rq_auth_stat = rpc_autherr_badcred;
1379 goto err_bad_auth;
1380 case SVC_DENIED:
1381 goto err_bad_auth;
1382 case SVC_CLOSE:
1383 goto close;
1384 case SVC_DROP:
1385 goto dropit;
1386 case SVC_COMPLETE:
1387 goto sendit;
1388 default:
1389 pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res);
1390 rqstp->rq_auth_stat = rpc_autherr_failed;
1391 goto err_bad_auth;
1392 }
1393
1394 if (progp == NULL)
1395 goto err_bad_prog;
1396
1397 switch (progp->pg_init_request(rqstp, progp, &process)) {
1398 case rpc_success:
1399 break;
1400 case rpc_prog_unavail:
1401 goto err_bad_prog;
1402 case rpc_prog_mismatch:
1403 goto err_bad_vers;
1404 case rpc_proc_unavail:
1405 goto err_bad_proc;
1406 }
1407
1408 procp = rqstp->rq_procinfo;
1409 /* Should this check go into the dispatcher? */
1410 if (!procp || !procp->pc_func)
1411 goto err_bad_proc;
1412
1413 /* Syntactic check complete */
1414 if (serv->sv_stats)
1415 serv->sv_stats->rpccnt++;
1416 trace_svc_process(rqstp, progp->pg_name);
1417
1418 aoffset = xdr_stream_pos(xdr);
1419
1420 /* un-reserve some of the out-queue now that we have a
1421 * better idea of reply size
1422 */
1423 if (procp->pc_xdrressize)
1424 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1425
1426 /* Call the function that processes the request. */
1427 rc = process.dispatch(rqstp);
1428 if (procp->pc_release)
1429 procp->pc_release(rqstp);
1430 xdr_finish_decode(xdr);
1431
1432 if (!rc)
1433 goto dropit;
1434 if (rqstp->rq_auth_stat != rpc_auth_ok)
1435 goto err_bad_auth;
1436
1437 if (*rqstp->rq_accept_statp != rpc_success)
1438 xdr_truncate_encode(xdr, aoffset);
1439
1440 if (procp->pc_encode == NULL)
1441 goto dropit;
1442
1443 sendit:
1444 if (svc_authorise(rqstp))
1445 goto close_xprt;
1446 return 1; /* Caller can now send it */
1447
1448 dropit:
1449 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1450 dprintk("svc: svc_process dropit\n");
1451 return 0;
1452
1453 close:
1454 svc_authorise(rqstp);
1455 close_xprt:
1456 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1457 svc_xprt_close(rqstp->rq_xprt);
1458 dprintk("svc: svc_process close\n");
1459 return 0;
1460
1461 err_short_len:
1462 svc_printk(rqstp, "short len %u, dropping request\n",
1463 rqstp->rq_arg.len);
1464 goto close_xprt;
1465
1466 err_bad_rpc:
1467 if (serv->sv_stats)
1468 serv->sv_stats->rpcbadfmt++;
1469 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
1470 xdr_stream_encode_u32(xdr, RPC_MISMATCH);
1471 /* Only RPCv2 supported */
1472 xdr_stream_encode_u32(xdr, RPC_VERSION);
1473 xdr_stream_encode_u32(xdr, RPC_VERSION);
1474 return 1; /* don't wrap */
1475
1476 err_bad_auth:
1477 dprintk("svc: authentication failed (%d)\n",
1478 be32_to_cpu(rqstp->rq_auth_stat));
1479 if (serv->sv_stats)
1480 serv->sv_stats->rpcbadauth++;
1481 /* Restore write pointer to location of reply status: */
1482 xdr_truncate_encode(xdr, XDR_UNIT * 2);
1483 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
1484 xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR);
1485 xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat);
1486 goto sendit;
1487
1488 err_bad_prog:
1489 dprintk("svc: unknown program %d\n", rqstp->rq_prog);
1490 if (serv->sv_stats)
1491 serv->sv_stats->rpcbadfmt++;
1492 *rqstp->rq_accept_statp = rpc_prog_unavail;
1493 goto sendit;
1494
1495 err_bad_vers:
1496 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1497 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1498
1499 if (serv->sv_stats)
1500 serv->sv_stats->rpcbadfmt++;
1501 *rqstp->rq_accept_statp = rpc_prog_mismatch;
1502
1503 /*
1504 * svc_authenticate() has already added the verifier and
1505 * advanced the stream just past rq_accept_statp.
1506 */
1507 xdr_stream_encode_u32(xdr, process.mismatch.lovers);
1508 xdr_stream_encode_u32(xdr, process.mismatch.hivers);
1509 goto sendit;
1510
1511 err_bad_proc:
1512 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1513
1514 if (serv->sv_stats)
1515 serv->sv_stats->rpcbadfmt++;
1516 *rqstp->rq_accept_statp = rpc_proc_unavail;
1517 goto sendit;
1518 }
1519
1520 /*
1521 * Drop request
1522 */
svc_drop(struct svc_rqst * rqstp)1523 static void svc_drop(struct svc_rqst *rqstp)
1524 {
1525 trace_svc_drop(rqstp);
1526 }
1527
1528 /**
1529 * svc_process - Execute one RPC transaction
1530 * @rqstp: RPC transaction context
1531 *
1532 */
svc_process(struct svc_rqst * rqstp)1533 void svc_process(struct svc_rqst *rqstp)
1534 {
1535 struct kvec *resv = &rqstp->rq_res.head[0];
1536 __be32 *p;
1537
1538 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
1539 if (!fail_sunrpc.ignore_server_disconnect &&
1540 should_fail(&fail_sunrpc.attr, 1))
1541 svc_xprt_deferred_close(rqstp->rq_xprt);
1542 #endif
1543
1544 /*
1545 * Setup response xdr_buf.
1546 * Initially it has just one page
1547 */
1548 rqstp->rq_next_page = &rqstp->rq_respages[1];
1549 resv->iov_base = page_address(rqstp->rq_respages[0]);
1550 resv->iov_len = 0;
1551 rqstp->rq_res.pages = rqstp->rq_next_page;
1552 rqstp->rq_res.len = 0;
1553 rqstp->rq_res.page_base = 0;
1554 rqstp->rq_res.page_len = 0;
1555 rqstp->rq_res.buflen = PAGE_SIZE;
1556 rqstp->rq_res.tail[0].iov_base = NULL;
1557 rqstp->rq_res.tail[0].iov_len = 0;
1558
1559 svcxdr_init_decode(rqstp);
1560 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2);
1561 if (unlikely(!p))
1562 goto out_drop;
1563 rqstp->rq_xid = *p++;
1564 if (unlikely(*p != rpc_call))
1565 goto out_baddir;
1566
1567 if (!svc_process_common(rqstp))
1568 goto out_drop;
1569 svc_send(rqstp);
1570 return;
1571
1572 out_baddir:
1573 svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
1574 be32_to_cpu(*p));
1575 if (rqstp->rq_server->sv_stats)
1576 rqstp->rq_server->sv_stats->rpcbadfmt++;
1577 out_drop:
1578 svc_drop(rqstp);
1579 }
1580
1581 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1582 /**
1583 * svc_process_bc - process a reverse-direction RPC request
1584 * @req: RPC request to be used for client-side processing
1585 * @rqstp: server-side execution context
1586 *
1587 */
svc_process_bc(struct rpc_rqst * req,struct svc_rqst * rqstp)1588 void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
1589 {
1590 struct rpc_timeout timeout = {
1591 .to_increment = 0,
1592 };
1593 struct rpc_task *task;
1594 int proc_error;
1595
1596 /* Build the svc_rqst used by the common processing routine */
1597 rqstp->rq_xid = req->rq_xid;
1598 rqstp->rq_prot = req->rq_xprt->prot;
1599 rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1600
1601 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1602 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1603 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1604 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1605
1606 /* Adjust the argument buffer length */
1607 rqstp->rq_arg.len = req->rq_private_buf.len;
1608 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1609 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1610 rqstp->rq_arg.page_len = 0;
1611 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1612 rqstp->rq_arg.page_len)
1613 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1614 rqstp->rq_arg.head[0].iov_len;
1615 else
1616 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1617 rqstp->rq_arg.page_len;
1618
1619 /* Reset the response buffer */
1620 rqstp->rq_res.head[0].iov_len = 0;
1621
1622 /*
1623 * Skip the XID and calldir fields because they've already
1624 * been processed by the caller.
1625 */
1626 svcxdr_init_decode(rqstp);
1627 if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2))
1628 return;
1629
1630 /* Parse and execute the bc call */
1631 proc_error = svc_process_common(rqstp);
1632
1633 atomic_dec(&req->rq_xprt->bc_slot_count);
1634 if (!proc_error) {
1635 /* Processing error: drop the request */
1636 xprt_free_bc_request(req);
1637 return;
1638 }
1639 /* Finally, send the reply synchronously */
1640 if (rqstp->bc_to_initval > 0) {
1641 timeout.to_initval = rqstp->bc_to_initval;
1642 timeout.to_retries = rqstp->bc_to_retries;
1643 } else {
1644 timeout.to_initval = req->rq_xprt->timeout->to_initval;
1645 timeout.to_retries = req->rq_xprt->timeout->to_retries;
1646 }
1647 timeout.to_maxval = timeout.to_initval;
1648 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1649 task = rpc_run_bc_task(req, &timeout);
1650
1651 if (IS_ERR(task))
1652 return;
1653
1654 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
1655 rpc_put_task(task);
1656 }
1657 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1658
1659 /**
1660 * svc_max_payload - Return transport-specific limit on the RPC payload
1661 * @rqstp: RPC transaction context
1662 *
1663 * Returns the maximum number of payload bytes the current transport
1664 * allows.
1665 */
svc_max_payload(const struct svc_rqst * rqstp)1666 u32 svc_max_payload(const struct svc_rqst *rqstp)
1667 {
1668 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1669
1670 if (rqstp->rq_server->sv_max_payload < max)
1671 max = rqstp->rq_server->sv_max_payload;
1672 return max;
1673 }
1674 EXPORT_SYMBOL_GPL(svc_max_payload);
1675
1676 /**
1677 * svc_proc_name - Return RPC procedure name in string form
1678 * @rqstp: svc_rqst to operate on
1679 *
1680 * Return value:
1681 * Pointer to a NUL-terminated string
1682 */
svc_proc_name(const struct svc_rqst * rqstp)1683 const char *svc_proc_name(const struct svc_rqst *rqstp)
1684 {
1685 if (rqstp && rqstp->rq_procinfo)
1686 return rqstp->rq_procinfo->pc_name;
1687 return "unknown";
1688 }
1689
1690
1691 /**
1692 * svc_encode_result_payload - mark a range of bytes as a result payload
1693 * @rqstp: svc_rqst to operate on
1694 * @offset: payload's byte offset in rqstp->rq_res
1695 * @length: size of payload, in bytes
1696 *
1697 * Returns zero on success, or a negative errno if a permanent
1698 * error occurred.
1699 */
svc_encode_result_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)1700 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1701 unsigned int length)
1702 {
1703 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset,
1704 length);
1705 }
1706 EXPORT_SYMBOL_GPL(svc_encode_result_payload);
1707
1708 /**
1709 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1710 * @rqstp: svc_rqst to operate on
1711 * @first: buffer containing first section of pathname
1712 * @p: buffer containing remaining section of pathname
1713 * @total: total length of the pathname argument
1714 *
1715 * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1716 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1717 * the returned string.
1718 */
svc_fill_symlink_pathname(struct svc_rqst * rqstp,struct kvec * first,void * p,size_t total)1719 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,
1720 void *p, size_t total)
1721 {
1722 size_t len, remaining;
1723 char *result, *dst;
1724
1725 result = kmalloc(total + 1, GFP_KERNEL);
1726 if (!result)
1727 return ERR_PTR(-ESERVERFAULT);
1728
1729 dst = result;
1730 remaining = total;
1731
1732 len = min_t(size_t, total, first->iov_len);
1733 if (len) {
1734 memcpy(dst, first->iov_base, len);
1735 dst += len;
1736 remaining -= len;
1737 }
1738
1739 if (remaining) {
1740 len = min_t(size_t, remaining, PAGE_SIZE);
1741 memcpy(dst, p, len);
1742 dst += len;
1743 }
1744
1745 *dst = '\0';
1746
1747 /* Sanity check: Linux doesn't allow the pathname argument to
1748 * contain a NUL byte.
1749 */
1750 if (strlen(result) != total) {
1751 kfree(result);
1752 return ERR_PTR(-EINVAL);
1753 }
1754 return result;
1755 }
1756 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);
1757