Lines Matching +full:protocol +full:- +full:node
1 // SPDX-License-Identifier: GPL-2.0-only
5 * High-level RPC service routines
47 SVC_POOL_AUTO = -1, /* choose one of the others */
51 SVC_POOL_PERNODE /* one pool per numa node */
65 unsigned int *pool_to; /* maps pool id to cpu or node */
66 unsigned int *to_pool; /* maps cpu or node to pool id */
92 err = -EINVAL; in __param_set_pool_mode()
97 if (m->count == 0) in __param_set_pool_mode()
98 m->mode = mode; in __param_set_pool_mode()
99 else if (mode != m->mode) in __param_set_pool_mode()
100 err = -EBUSY; in __param_set_pool_mode()
109 struct svc_pool_map *m = kp->arg; in param_set_pool_mode()
121 * sunrpc_get_pool_mode - get the current pool_mode for the host
134 switch (m->mode) in sunrpc_get_pool_mode()
145 return snprintf(buf, size, "%d", m->mode); in sunrpc_get_pool_mode()
159 len = min_t(int, len, ARRAY_SIZE(str) - 2); in param_get_pool_mode()
178 unsigned int node; in svc_pool_map_choose_mode() local
183 * so split pools on NUMA node boundaries in svc_pool_map_choose_mode()
188 node = first_online_node; in svc_pool_map_choose_mode()
189 if (nr_cpus_node(node) > 2) { in svc_pool_map_choose_mode()
191 * Non-trivial SMP, or CONFIG_NUMA on in svc_pool_map_choose_mode()
192 * non-NUMA hardware, e.g. with a generic in svc_pool_map_choose_mode()
210 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); in svc_pool_map_alloc_arrays()
211 if (!m->to_pool) in svc_pool_map_alloc_arrays()
213 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); in svc_pool_map_alloc_arrays()
214 if (!m->pool_to) in svc_pool_map_alloc_arrays()
220 kfree(m->to_pool); in svc_pool_map_alloc_arrays()
221 m->to_pool = NULL; in svc_pool_map_alloc_arrays()
223 return -ENOMEM; in svc_pool_map_alloc_arrays()
244 m->to_pool[cpu] = pidx; in svc_pool_map_init_percpu()
245 m->pool_to[pidx] = cpu; in svc_pool_map_init_percpu()
263 unsigned int node; in svc_pool_map_init_pernode() local
270 for_each_node_with_cpus(node) { in svc_pool_map_init_pernode()
273 m->to_pool[node] = pidx; in svc_pool_map_init_pernode()
274 m->pool_to[pidx] = node; in svc_pool_map_init_pernode()
294 int npools = -1; in svc_pool_map_get()
297 if (m->count++) { in svc_pool_map_get()
299 return m->npools; in svc_pool_map_get()
302 if (m->mode == SVC_POOL_AUTO) in svc_pool_map_get()
303 m->mode = svc_pool_map_choose_mode(); in svc_pool_map_get()
305 switch (m->mode) { in svc_pool_map_get()
317 m->mode = SVC_POOL_GLOBAL; in svc_pool_map_get()
319 m->npools = npools; in svc_pool_map_get()
335 if (!--m->count) { in svc_pool_map_put()
336 kfree(m->to_pool); in svc_pool_map_put()
337 m->to_pool = NULL; in svc_pool_map_put()
338 kfree(m->pool_to); in svc_pool_map_put()
339 m->pool_to = NULL; in svc_pool_map_put()
340 m->npools = 0; in svc_pool_map_put()
349 if (m->count) { in svc_pool_map_get_node()
350 if (m->mode == SVC_POOL_PERCPU) in svc_pool_map_get_node()
351 return cpu_to_node(m->pool_to[pidx]); in svc_pool_map_get_node()
352 if (m->mode == SVC_POOL_PERNODE) in svc_pool_map_get_node()
353 return m->pool_to[pidx]; in svc_pool_map_get_node()
365 unsigned int node = m->pool_to[pidx]; in svc_pool_map_set_cpumask() local
371 WARN_ON_ONCE(m->count == 0); in svc_pool_map_set_cpumask()
372 if (m->count == 0) in svc_pool_map_set_cpumask()
375 switch (m->mode) { in svc_pool_map_set_cpumask()
378 set_cpus_allowed_ptr(task, cpumask_of(node)); in svc_pool_map_set_cpumask()
383 set_cpus_allowed_ptr(task, cpumask_of_node(node)); in svc_pool_map_set_cpumask()
390 * svc_pool_for_cpu - Select pool to run a thread on this cpu
406 if (serv->sv_nrpools <= 1) in svc_pool_for_cpu()
407 return serv->sv_pools; in svc_pool_for_cpu()
409 switch (m->mode) { in svc_pool_for_cpu()
411 pidx = m->to_pool[cpu]; in svc_pool_for_cpu()
414 pidx = m->to_pool[cpu_to_node(cpu)]; in svc_pool_for_cpu()
418 return &serv->sv_pools[pidx % serv->sv_nrpools]; in svc_pool_for_cpu()
445 for (p = 0; p < serv->sv_nprogs; p++) { in svc_uses_rpcbind()
446 struct svc_program *progp = &serv->sv_programs[p]; in svc_uses_rpcbind()
448 for (i = 0; i < progp->pg_nvers; i++) { in svc_uses_rpcbind()
449 if (progp->pg_vers[i] == NULL) in svc_uses_rpcbind()
451 if (!progp->pg_vers[i]->vs_hidden) in svc_uses_rpcbind()
471 lwq_init(&serv->sv_cb_list); in __svc_init_bc()
494 serv->sv_name = prog->pg_name; in __svc_create()
495 serv->sv_programs = prog; in __svc_create()
496 serv->sv_nprogs = nprogs; in __svc_create()
497 serv->sv_stats = stats; in __svc_create()
500 serv->sv_max_payload = bufsize? bufsize : 4096; in __svc_create()
501 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); in __svc_create()
502 serv->sv_threadfn = threadfn; in __svc_create()
507 progp->pg_lovers = progp->pg_nvers-1; in __svc_create()
508 for (vers = 0; vers < progp->pg_nvers ; vers++) in __svc_create()
509 if (progp->pg_vers[vers]) { in __svc_create()
510 progp->pg_hivers = vers; in __svc_create()
511 if (progp->pg_lovers > vers) in __svc_create()
512 progp->pg_lovers = vers; in __svc_create()
513 if (progp->pg_vers[vers]->vs_xdrsize > xdrsize) in __svc_create()
514 xdrsize = progp->pg_vers[vers]->vs_xdrsize; in __svc_create()
517 serv->sv_xdrsize = xdrsize; in __svc_create()
518 INIT_LIST_HEAD(&serv->sv_tempsocks); in __svc_create()
519 INIT_LIST_HEAD(&serv->sv_permsocks); in __svc_create()
520 timer_setup(&serv->sv_temptimer, NULL, 0); in __svc_create()
521 spin_lock_init(&serv->sv_lock); in __svc_create()
525 serv->sv_nrpools = npools; in __svc_create()
526 serv->sv_pools = in __svc_create()
527 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), in __svc_create()
529 if (!serv->sv_pools) { in __svc_create()
534 for (i = 0; i < serv->sv_nrpools; i++) { in __svc_create()
535 struct svc_pool *pool = &serv->sv_pools[i]; in __svc_create()
538 i, serv->sv_name); in __svc_create()
540 pool->sp_id = i; in __svc_create()
541 lwq_init(&pool->sp_xprts); in __svc_create()
542 INIT_LIST_HEAD(&pool->sp_all_threads); in __svc_create()
543 init_llist_head(&pool->sp_idle_threads); in __svc_create()
545 percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); in __svc_create()
546 percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); in __svc_create()
547 percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); in __svc_create()
554 * svc_create - Create an RPC service
569 * svc_create_pooled - Create an RPC service with pooled threads
590 serv->sv_is_pooled = true; in svc_create_pooled()
610 dprintk("svc: svc_destroy(%s)\n", serv->sv_programs->pg_name); in svc_destroy()
611 timer_shutdown_sync(&serv->sv_temptimer); in svc_destroy()
616 WARN_ONCE(!list_empty(&serv->sv_permsocks), in svc_destroy()
617 "SVC: permsocks remain for %s\n", serv->sv_programs->pg_name); in svc_destroy()
618 WARN_ONCE(!list_empty(&serv->sv_tempsocks), in svc_destroy()
619 "SVC: tempsocks remain for %s\n", serv->sv_programs->pg_name); in svc_destroy()
623 if (serv->sv_is_pooled) in svc_destroy()
626 for (i = 0; i < serv->sv_nrpools; i++) { in svc_destroy()
627 struct svc_pool *pool = &serv->sv_pools[i]; in svc_destroy()
629 percpu_counter_destroy(&pool->sp_messages_arrived); in svc_destroy()
630 percpu_counter_destroy(&pool->sp_sockets_queued); in svc_destroy()
631 percpu_counter_destroy(&pool->sp_threads_woken); in svc_destroy()
633 kfree(serv->sv_pools); in svc_destroy()
639 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) in svc_init_buffer() argument
654 ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages, in svc_init_buffer()
655 rqstp->rq_pages); in svc_init_buffer()
667 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) in svc_release_buffer()
668 if (rqstp->rq_pages[i]) in svc_release_buffer()
669 put_page(rqstp->rq_pages[i]); in svc_release_buffer()
675 folio_batch_release(&rqstp->rq_fbatch); in svc_rqst_free()
677 if (rqstp->rq_scratch_page) in svc_rqst_free()
678 put_page(rqstp->rq_scratch_page); in svc_rqst_free()
679 kfree(rqstp->rq_resp); in svc_rqst_free()
680 kfree(rqstp->rq_argp); in svc_rqst_free()
681 kfree(rqstp->rq_auth_data); in svc_rqst_free()
686 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) in svc_prepare_thread() argument
690 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); in svc_prepare_thread()
694 folio_batch_init(&rqstp->rq_fbatch); in svc_prepare_thread()
696 rqstp->rq_server = serv; in svc_prepare_thread()
697 rqstp->rq_pool = pool; in svc_prepare_thread()
699 rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0); in svc_prepare_thread()
700 if (!rqstp->rq_scratch_page) in svc_prepare_thread()
703 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); in svc_prepare_thread()
704 if (!rqstp->rq_argp) in svc_prepare_thread()
707 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); in svc_prepare_thread()
708 if (!rqstp->rq_resp) in svc_prepare_thread()
711 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) in svc_prepare_thread()
714 rqstp->rq_err = -EAGAIN; /* No error yet */ in svc_prepare_thread()
716 serv->sv_nrthreads += 1; in svc_prepare_thread()
717 pool->sp_nrthreads += 1; in svc_prepare_thread()
722 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); in svc_prepare_thread()
732 * svc_pool_wake_idle_thread - Awaken an idle thread in @pool
746 ln = READ_ONCE(pool->sp_idle_threads.first); in svc_pool_wake_idle_thread()
749 WRITE_ONCE(rqstp->rq_qtime, ktime_get()); in svc_pool_wake_idle_thread()
750 if (!task_is_running(rqstp->rq_task)) { in svc_pool_wake_idle_thread()
751 wake_up_process(rqstp->rq_task); in svc_pool_wake_idle_thread()
752 trace_svc_wake_up(rqstp->rq_task->pid); in svc_pool_wake_idle_thread()
753 percpu_counter_inc(&pool->sp_threads_woken); in svc_pool_wake_idle_thread()
766 return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; in svc_pool_next()
779 for (i = 0; i < serv->sv_nrpools; i++) { in svc_pool_victim()
780 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; in svc_pool_victim()
781 if (pool->sp_nrthreads) in svc_pool_victim()
786 if (pool && pool->sp_nrthreads) { in svc_pool_victim()
787 set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); in svc_pool_victim()
788 set_bit(SP_NEED_VICTIM, &pool->sp_flags); in svc_pool_victim()
800 unsigned int state = serv->sv_nrthreads-1; in svc_start_kthreads()
801 int node; in svc_start_kthreads() local
805 nrservs--; in svc_start_kthreads()
807 node = svc_pool_map_get_node(chosen_pool->sp_id); in svc_start_kthreads()
809 rqstp = svc_prepare_thread(serv, chosen_pool, node); in svc_start_kthreads()
811 return -ENOMEM; in svc_start_kthreads()
812 task = kthread_create_on_node(serv->sv_threadfn, rqstp, in svc_start_kthreads()
813 node, "%s", serv->sv_name); in svc_start_kthreads()
819 rqstp->rq_task = task; in svc_start_kthreads()
820 if (serv->sv_nrpools > 1) in svc_start_kthreads()
821 svc_pool_map_set_cpumask(task, chosen_pool->sp_id); in svc_start_kthreads()
826 wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN); in svc_start_kthreads()
827 err = rqstp->rq_err; in svc_start_kthreads()
840 unsigned int state = serv->sv_nrthreads-1; in svc_stop_kthreads()
848 wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS, in svc_stop_kthreads()
856 * svc_set_num_threads - adjust number of threads per RPC service
862 * given number. If @pool is non-NULL, change only threads in that pool;
863 * otherwise, round-robin between all pools for @serv. @serv's
876 nrservs -= serv->sv_nrthreads; in svc_set_num_threads()
878 nrservs -= pool->sp_nrthreads; in svc_set_num_threads()
889 * svc_rqst_replace_page - Replace one page in rq_pages[]
902 struct page **begin = rqstp->rq_pages; in svc_rqst_replace_page()
903 struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES]; in svc_rqst_replace_page()
905 if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) { in svc_rqst_replace_page()
910 if (*rqstp->rq_next_page) { in svc_rqst_replace_page()
911 if (!folio_batch_add(&rqstp->rq_fbatch, in svc_rqst_replace_page()
912 page_folio(*rqstp->rq_next_page))) in svc_rqst_replace_page()
913 __folio_batch_release(&rqstp->rq_fbatch); in svc_rqst_replace_page()
917 *(rqstp->rq_next_page++) = page; in svc_rqst_replace_page()
923 * svc_rqst_release_pages - Release Reply buffer pages
927 * svc_send, and any spliced filesystem-owned pages.
931 int i, count = rqstp->rq_next_page - rqstp->rq_respages; in svc_rqst_release_pages()
934 release_pages(rqstp->rq_respages, count); in svc_rqst_release_pages()
936 rqstp->rq_respages[i] = NULL; in svc_rqst_release_pages()
941 * svc_exit_thread - finalise the termination of a sunrpc server thread
957 struct svc_serv *serv = rqstp->rq_server; in svc_exit_thread()
958 struct svc_pool *pool = rqstp->rq_pool; in svc_exit_thread()
960 list_del_rcu(&rqstp->rq_all); in svc_exit_thread()
962 pool->sp_nrthreads -= 1; in svc_exit_thread()
963 serv->sv_nrthreads -= 1; in svc_exit_thread()
968 clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags); in svc_exit_thread()
973 * Register an "inet" protocol family netid with the local
977 * we map IP_ protocol numbers to netids by hand.
984 const unsigned short protocol, in __svc_rpcb_register4() argument
995 switch (protocol) { in __svc_rpcb_register4()
1003 return -ENOPROTOOPT; in __svc_rpcb_register4()
1011 * registration request with the legacy rpcbind v2 protocol. in __svc_rpcb_register4()
1013 if (error == -EPROTONOSUPPORT) in __svc_rpcb_register4()
1014 error = rpcb_register(net, program, version, protocol, port); in __svc_rpcb_register4()
1021 * Register an "inet6" protocol family netid with the local
1025 * we map IP_ protocol numbers to netids by hand.
1032 const unsigned short protocol, in __svc_rpcb_register6() argument
1043 switch (protocol) { in __svc_rpcb_register6()
1051 return -ENOPROTOOPT; in __svc_rpcb_register6()
1061 if (error == -EPROTONOSUPPORT) in __svc_rpcb_register6()
1062 error = -EAFNOSUPPORT; in __svc_rpcb_register6()
1077 const unsigned short protocol, in __svc_register() argument
1080 int error = -EAFNOSUPPORT; in __svc_register()
1085 protocol, port); in __svc_register()
1090 protocol, port); in __svc_register()
1094 trace_svc_register(progname, version, family, protocol, port, error); in __svc_register()
1105 return __svc_register(net, progp->pg_name, progp->pg_prog, in svc_rpcbind_set_version()
1116 const struct svc_version *vers = progp->pg_vers[version]; in svc_generic_rpcbind_set()
1122 if (vers->vs_hidden) { in svc_generic_rpcbind_set()
1123 trace_svc_noregister(progp->pg_name, version, proto, in svc_generic_rpcbind_set()
1132 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) in svc_generic_rpcbind_set()
1138 return (vers->vs_rpcb_optnl) ? 0 : error; in svc_generic_rpcbind_set()
1143 * svc_register - register an RPC service with the local portmapper
1146 * @family: protocol family of service's listener socket
1147 * @proto: transport protocol number to advertise
1150 * Service is registered for any address in the passed-in protocol family
1161 return -EINVAL; in svc_register()
1163 for (p = 0; p < serv->sv_nprogs; p++) { in svc_register()
1164 struct svc_program *progp = &serv->sv_programs[p]; in svc_register()
1166 for (i = 0; i < progp->pg_nvers; i++) { in svc_register()
1168 error = progp->pg_rpcbind_set(net, progp, i, in svc_register()
1173 progp->pg_name, i, -error); in svc_register()
1198 * request with the legacy rpcbind v2 protocol. in __svc_unregister()
1200 if (error == -EPROTONOSUPPORT) in __svc_unregister()
1222 for (p = 0; p < serv->sv_nprogs; p++) { in svc_unregister()
1223 struct svc_program *progp = &serv->sv_programs[p]; in svc_unregister()
1225 for (i = 0; i < progp->pg_nvers; i++) { in svc_unregister()
1226 if (progp->pg_vers[i] == NULL) in svc_unregister()
1228 if (progp->pg_vers[i]->vs_hidden) in svc_unregister()
1230 __svc_unregister(net, progp->pg_prog, i, progp->pg_name); in svc_unregister()
1235 sighand = rcu_dereference(current->sighand); in svc_unregister()
1236 spin_lock_irqsave(&sighand->siglock, flags); in svc_unregister()
1238 spin_unlock_irqrestore(&sighand->siglock, flags); in svc_unregister()
1274 if (rqstp->rq_vers >= progp->pg_nvers ) in svc_generic_init_request()
1276 versp = progp->pg_vers[rqstp->rq_vers]; in svc_generic_init_request()
1281 * Some protocol versions (namely NFSv4) require some form of in svc_generic_init_request()
1291 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && in svc_generic_init_request()
1292 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) in svc_generic_init_request()
1295 if (rqstp->rq_proc >= versp->vs_nproc) in svc_generic_init_request()
1297 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc]; in svc_generic_init_request()
1300 memset(rqstp->rq_argp, 0, procp->pc_argzero); in svc_generic_init_request()
1301 memset(rqstp->rq_resp, 0, procp->pc_ressize); in svc_generic_init_request()
1303 /* Bump per-procedure stats counter */ in svc_generic_init_request()
1304 this_cpu_inc(versp->vs_count[rqstp->rq_proc]); in svc_generic_init_request()
1306 ret->dispatch = versp->vs_dispatch; in svc_generic_init_request()
1309 ret->mismatch.lovers = progp->pg_lovers; in svc_generic_init_request()
1310 ret->mismatch.hivers = progp->pg_hivers; in svc_generic_init_request()
1323 struct xdr_stream *xdr = &rqstp->rq_res_stream; in svc_process_common()
1326 struct svc_serv *serv = rqstp->rq_server; in svc_process_common()
1334 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); in svc_process_common()
1335 clear_bit(RQ_DROPME, &rqstp->rq_flags); in svc_process_common()
1339 xdr_stream_encode_be32(xdr, rqstp->rq_xid); in svc_process_common()
1342 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4); in svc_process_common()
1350 rqstp->rq_prog = be32_to_cpup(p++); in svc_process_common()
1351 rqstp->rq_vers = be32_to_cpup(p++); in svc_process_common()
1352 rqstp->rq_proc = be32_to_cpup(p); in svc_process_common()
1354 for (pr = 0; pr < serv->sv_nprogs; pr++) in svc_process_common()
1355 if (rqstp->rq_prog == serv->sv_programs[pr].pg_prog) in svc_process_common()
1356 progp = &serv->sv_programs[pr]; in svc_process_common()
1366 auth_res = progp->pg_authenticate(rqstp); in svc_process_common()
1391 switch (progp->pg_init_request(rqstp, progp, &process)) { in svc_process_common()
1402 procp = rqstp->rq_procinfo; in svc_process_common()
1404 if (!procp || !procp->pc_func) in svc_process_common()
1408 if (serv->sv_stats) in svc_process_common()
1409 serv->sv_stats->rpccnt++; in svc_process_common()
1410 trace_svc_process(rqstp, progp->pg_name); in svc_process_common()
1414 /* un-reserve some of the out-queue now that we have a in svc_process_common()
1417 if (procp->pc_xdrressize) in svc_process_common()
1418 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); in svc_process_common()
1422 if (procp->pc_release) in svc_process_common()
1423 procp->pc_release(rqstp); in svc_process_common()
1428 if (rqstp->rq_auth_stat != rpc_auth_ok) in svc_process_common()
1431 if (*rqstp->rq_accept_statp != rpc_success) in svc_process_common()
1434 if (procp->pc_encode == NULL) in svc_process_common()
1450 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) in svc_process_common()
1451 svc_xprt_close(rqstp->rq_xprt); in svc_process_common()
1457 rqstp->rq_arg.len); in svc_process_common()
1461 if (serv->sv_stats) in svc_process_common()
1462 serv->sv_stats->rpcbadfmt++; in svc_process_common()
1472 be32_to_cpu(rqstp->rq_auth_stat)); in svc_process_common()
1473 if (serv->sv_stats) in svc_process_common()
1474 serv->sv_stats->rpcbadauth++; in svc_process_common()
1479 xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat); in svc_process_common()
1483 dprintk("svc: unknown program %d\n", rqstp->rq_prog); in svc_process_common()
1484 if (serv->sv_stats) in svc_process_common()
1485 serv->sv_stats->rpcbadfmt++; in svc_process_common()
1486 *rqstp->rq_accept_statp = rpc_prog_unavail; in svc_process_common()
1491 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); in svc_process_common()
1493 if (serv->sv_stats) in svc_process_common()
1494 serv->sv_stats->rpcbadfmt++; in svc_process_common()
1495 *rqstp->rq_accept_statp = rpc_prog_mismatch; in svc_process_common()
1506 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); in svc_process_common()
1508 if (serv->sv_stats) in svc_process_common()
1509 serv->sv_stats->rpcbadfmt++; in svc_process_common()
1510 *rqstp->rq_accept_statp = rpc_proc_unavail; in svc_process_common()
1516 if (serv->sv_stats) in svc_process_common()
1517 serv->sv_stats->rpcbadfmt++; in svc_process_common()
1518 *rqstp->rq_accept_statp = rpc_garbage_args; in svc_process_common()
1522 if (serv->sv_stats) in svc_process_common()
1523 serv->sv_stats->rpcbadfmt++; in svc_process_common()
1524 *rqstp->rq_accept_statp = rpc_system_err; in svc_process_common()
1537 * svc_process - Execute one RPC transaction
1543 struct kvec *resv = &rqstp->rq_res.head[0]; in svc_process()
1549 svc_xprt_deferred_close(rqstp->rq_xprt); in svc_process()
1556 rqstp->rq_next_page = &rqstp->rq_respages[1]; in svc_process()
1557 resv->iov_base = page_address(rqstp->rq_respages[0]); in svc_process()
1558 resv->iov_len = 0; in svc_process()
1559 rqstp->rq_res.pages = rqstp->rq_next_page; in svc_process()
1560 rqstp->rq_res.len = 0; in svc_process()
1561 rqstp->rq_res.page_base = 0; in svc_process()
1562 rqstp->rq_res.page_len = 0; in svc_process()
1563 rqstp->rq_res.buflen = PAGE_SIZE; in svc_process()
1564 rqstp->rq_res.tail[0].iov_base = NULL; in svc_process()
1565 rqstp->rq_res.tail[0].iov_len = 0; in svc_process()
1568 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2); in svc_process()
1571 rqstp->rq_xid = *p++; in svc_process()
1583 if (rqstp->rq_server->sv_stats) in svc_process()
1584 rqstp->rq_server->sv_stats->rpcbadfmt++; in svc_process()
1591 * svc_process_bc - process a reverse-direction RPC request
1592 * @req: RPC request to be used for client-side processing
1593 * @rqstp: server-side execution context
1605 rqstp->rq_xid = req->rq_xid; in svc_process_bc()
1606 rqstp->rq_prot = req->rq_xprt->prot; in svc_process_bc()
1607 rqstp->rq_bc_net = req->rq_xprt->xprt_net; in svc_process_bc()
1609 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); in svc_process_bc()
1610 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); in svc_process_bc()
1611 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); in svc_process_bc()
1612 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); in svc_process_bc()
1615 rqstp->rq_arg.len = req->rq_private_buf.len; in svc_process_bc()
1616 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { in svc_process_bc()
1617 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; in svc_process_bc()
1618 rqstp->rq_arg.page_len = 0; in svc_process_bc()
1619 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + in svc_process_bc()
1620 rqstp->rq_arg.page_len) in svc_process_bc()
1621 rqstp->rq_arg.page_len = rqstp->rq_arg.len - in svc_process_bc()
1622 rqstp->rq_arg.head[0].iov_len; in svc_process_bc()
1624 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + in svc_process_bc()
1625 rqstp->rq_arg.page_len; in svc_process_bc()
1628 rqstp->rq_res.head[0].iov_len = 0; in svc_process_bc()
1635 if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) in svc_process_bc()
1641 atomic_dec(&req->rq_xprt->bc_slot_count); in svc_process_bc()
1648 if (rqstp->bc_to_initval > 0) { in svc_process_bc()
1649 timeout.to_initval = rqstp->bc_to_initval; in svc_process_bc()
1650 timeout.to_retries = rqstp->bc_to_retries; in svc_process_bc()
1652 timeout.to_initval = req->rq_xprt->timeout->to_initval; in svc_process_bc()
1653 timeout.to_retries = req->rq_xprt->timeout->to_retries; in svc_process_bc()
1656 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); in svc_process_bc()
1662 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); in svc_process_bc()
1668 * svc_max_payload - Return transport-specific limit on the RPC payload
1676 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; in svc_max_payload()
1678 if (rqstp->rq_server->sv_max_payload < max) in svc_max_payload()
1679 max = rqstp->rq_server->sv_max_payload; in svc_max_payload()
1685 * svc_proc_name - Return RPC procedure name in string form
1689 * Pointer to a NUL-terminated string
1693 if (rqstp && rqstp->rq_procinfo) in svc_proc_name()
1694 return rqstp->rq_procinfo->pc_name; in svc_proc_name()
1700 * svc_encode_result_payload - mark a range of bytes as a result payload
1702 * @offset: payload's byte offset in rqstp->rq_res
1711 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset, in svc_encode_result_payload()
1717 * svc_fill_write_vector - Construct data argument for VFS write call
1726 struct page **pages = payload->pages; in svc_fill_write_vector()
1727 struct kvec *first = payload->head; in svc_fill_write_vector()
1728 struct kvec *vec = rqstp->rq_vec; in svc_fill_write_vector()
1729 size_t total = payload->len; in svc_fill_write_vector()
1736 if (first->iov_len) { in svc_fill_write_vector()
1737 vec[i].iov_base = first->iov_base; in svc_fill_write_vector()
1738 vec[i].iov_len = min_t(size_t, total, first->iov_len); in svc_fill_write_vector()
1739 total -= vec[i].iov_len; in svc_fill_write_vector()
1746 total -= vec[i].iov_len; in svc_fill_write_vector()
1751 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec)); in svc_fill_write_vector()
1757 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1763 * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1764 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1775 return ERR_PTR(-ESERVERFAULT); in svc_fill_symlink_pathname()
1780 len = min_t(size_t, total, first->iov_len); in svc_fill_symlink_pathname()
1782 memcpy(dst, first->iov_base, len); in svc_fill_symlink_pathname()
1784 remaining -= len; in svc_fill_symlink_pathname()
1800 return ERR_PTR(-EINVAL); in svc_fill_symlink_pathname()