Lines Matching refs:rqstp

639 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
644 if (svc_is_backchannel(rqstp))
655 rqstp->rq_pages);
663 svc_release_buffer(struct svc_rqst *rqstp)
667 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
668 if (rqstp->rq_pages[i])
669 put_page(rqstp->rq_pages[i]);
673 svc_rqst_free(struct svc_rqst *rqstp)
675 folio_batch_release(&rqstp->rq_fbatch);
676 svc_release_buffer(rqstp);
677 if (rqstp->rq_scratch_page)
678 put_page(rqstp->rq_scratch_page);
679 kfree(rqstp->rq_resp);
680 kfree(rqstp->rq_argp);
681 kfree(rqstp->rq_auth_data);
682 kfree_rcu(rqstp, rq_rcu_head);
688 struct svc_rqst *rqstp;
690 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
691 if (!rqstp)
692 return rqstp;
694 folio_batch_init(&rqstp->rq_fbatch);
696 rqstp->rq_server = serv;
697 rqstp->rq_pool = pool;
699 rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0);
700 if (!rqstp->rq_scratch_page)
703 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
704 if (!rqstp->rq_argp)
707 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
708 if (!rqstp->rq_resp)
711 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
714 rqstp->rq_err = -EAGAIN; /* No error yet */
722 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
724 return rqstp;
727 svc_rqst_free(rqstp);
742 struct svc_rqst *rqstp;
748 rqstp = llist_entry(ln, struct svc_rqst, rq_idle);
749 WRITE_ONCE(rqstp->rq_qtime, ktime_get());
750 if (!task_is_running(rqstp->rq_task)) {
751 wake_up_process(rqstp->rq_task);
752 trace_svc_wake_up(rqstp->rq_task->pid);
797 struct svc_rqst *rqstp;
809 rqstp = svc_prepare_thread(serv, chosen_pool, node);
810 if (!rqstp)
812 task = kthread_create_on_node(serv->sv_threadfn, rqstp,
815 svc_exit_thread(rqstp);
819 rqstp->rq_task = task;
826 wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN);
827 err = rqstp->rq_err;
829 svc_exit_thread(rqstp);
890 * @rqstp: svc_rqst with pages to replace
900 bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
902 struct page **begin = rqstp->rq_pages;
903 struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES];
905 if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) {
906 trace_svc_replace_page_err(rqstp);
910 if (*rqstp->rq_next_page) {
911 if (!folio_batch_add(&rqstp->rq_fbatch,
912 page_folio(*rqstp->rq_next_page)))
913 __folio_batch_release(&rqstp->rq_fbatch);
917 *(rqstp->rq_next_page++) = page;
924 * @rqstp: RPC transaction context
929 void svc_rqst_release_pages(struct svc_rqst *rqstp)
931 int i, count = rqstp->rq_next_page - rqstp->rq_respages;
934 release_pages(rqstp->rq_respages, count);
936 rqstp->rq_respages[i] = NULL;
942 * @rqstp: the svc_rqst which represents the thread.
955 svc_exit_thread(struct svc_rqst *rqstp)
957 struct svc_serv *serv = rqstp->rq_server;
958 struct svc_pool *pool = rqstp->rq_pool;
960 list_del_rcu(&rqstp->rq_all);
966 svc_rqst_free(rqstp);
1247 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1258 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1263 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1267 svc_generic_init_request(struct svc_rqst *rqstp,
1274 if (rqstp->rq_vers >= progp->pg_nvers )
1276 versp = progp->pg_vers[rqstp->rq_vers];
1291 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1292 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1295 if (rqstp->rq_proc >= versp->vs_nproc)
1297 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1300 memset(rqstp->rq_argp, 0, procp->pc_argzero);
1301 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1304 this_cpu_inc(versp->vs_count[rqstp->rq_proc]);
1321 svc_process_common(struct svc_rqst *rqstp)
1323 struct xdr_stream *xdr = &rqstp->rq_res_stream;
1326 struct svc_serv *serv = rqstp->rq_server;
1334 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1335 clear_bit(RQ_DROPME, &rqstp->rq_flags);
1338 svcxdr_init_encode(rqstp);
1339 xdr_stream_encode_be32(xdr, rqstp->rq_xid);
1342 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4);
1350 rqstp->rq_prog = be32_to_cpup(p++);
1351 rqstp->rq_vers = be32_to_cpup(p++);
1352 rqstp->rq_proc = be32_to_cpup(p);
1355 if (rqstp->rq_prog == serv->sv_programs[pr].pg_prog)
1363 auth_res = svc_authenticate(rqstp);
1366 auth_res = progp->pg_authenticate(rqstp);
1367 trace_svc_authenticate(rqstp, auth_res);
1391 switch (progp->pg_init_request(rqstp, progp, &process)) {
1402 procp = rqstp->rq_procinfo;
1410 trace_svc_process(rqstp, progp->pg_name);
1418 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1421 rc = process.dispatch(rqstp);
1423 procp->pc_release(rqstp);
1428 if (rqstp->rq_auth_stat != rpc_auth_ok)
1431 if (*rqstp->rq_accept_statp != rpc_success)
1438 if (svc_authorise(rqstp))
1443 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1448 svc_authorise(rqstp);
1450 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1451 svc_xprt_close(rqstp->rq_xprt);
1456 svc_printk(rqstp, "short len %u, dropping request\n",
1457 rqstp->rq_arg.len);
1472 be32_to_cpu(rqstp->rq_auth_stat));
1479 xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat);
1483 dprintk("svc: unknown program %d\n", rqstp->rq_prog);
1486 *rqstp->rq_accept_statp = rpc_prog_unavail;
1490 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1491 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1495 *rqstp->rq_accept_statp = rpc_prog_mismatch;
1506 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1510 *rqstp->rq_accept_statp = rpc_proc_unavail;
1514 svc_printk(rqstp, "failed to decode RPC header\n");
1518 *rqstp->rq_accept_statp = rpc_garbage_args;
1524 *rqstp->rq_accept_statp = rpc_system_err;
1531 static void svc_drop(struct svc_rqst *rqstp)
1533 trace_svc_drop(rqstp);
1538 * @rqstp: RPC transaction context
1541 void svc_process(struct svc_rqst *rqstp)
1543 struct kvec *resv = &rqstp->rq_res.head[0];
1549 svc_xprt_deferred_close(rqstp->rq_xprt);
1556 rqstp->rq_next_page = &rqstp->rq_respages[1];
1557 resv->iov_base = page_address(rqstp->rq_respages[0]);
1559 rqstp->rq_res.pages = rqstp->rq_next_page;
1560 rqstp->rq_res.len = 0;
1561 rqstp->rq_res.page_base = 0;
1562 rqstp->rq_res.page_len = 0;
1563 rqstp->rq_res.buflen = PAGE_SIZE;
1564 rqstp->rq_res.tail[0].iov_base = NULL;
1565 rqstp->rq_res.tail[0].iov_len = 0;
1567 svcxdr_init_decode(rqstp);
1568 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2);
1571 rqstp->rq_xid = *p++;
1575 if (!svc_process_common(rqstp))
1577 svc_send(rqstp);
1581 svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
1583 if (rqstp->rq_server->sv_stats)
1584 rqstp->rq_server->sv_stats->rpcbadfmt++;
1586 svc_drop(rqstp);
1593 * @rqstp: server-side execution context
1596 void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
1605 rqstp->rq_xid = req->rq_xid;
1606 rqstp->rq_prot = req->rq_xprt->prot;
1607 rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1609 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1610 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1611 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1612 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1615 rqstp->rq_arg.len = req->rq_private_buf.len;
1616 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1617 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1618 rqstp->rq_arg.page_len = 0;
1619 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1620 rqstp->rq_arg.page_len)
1621 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1622 rqstp->rq_arg.head[0].iov_len;
1624 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1625 rqstp->rq_arg.page_len;
1628 rqstp->rq_res.head[0].iov_len = 0;
1634 svcxdr_init_decode(rqstp);
1635 if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2))
1639 proc_error = svc_process_common(rqstp);
1648 if (rqstp->bc_to_initval > 0) {
1649 timeout.to_initval = rqstp->bc_to_initval;
1650 timeout.to_retries = rqstp->bc_to_retries;
1656 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1669 * @rqstp: RPC transaction context
1674 u32 svc_max_payload(const struct svc_rqst *rqstp)
1676 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1678 if (rqstp->rq_server->sv_max_payload < max)
1679 max = rqstp->rq_server->sv_max_payload;
1686 * @rqstp: svc_rqst to operate on
1691 const char *svc_proc_name(const struct svc_rqst *rqstp)
1693 if (rqstp && rqstp->rq_procinfo)
1694 return rqstp->rq_procinfo->pc_name;
1701 * @rqstp: svc_rqst to operate on
1702 * @offset: payload's byte offset in rqstp->rq_res
1708 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1711 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset,
1718 * @rqstp: svc_rqst to operate on
1721 * Fills in rqstp::rq_vec, and returns the number of elements.
1723 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
1728 struct kvec *vec = rqstp->rq_vec;
1751 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
1758 * @rqstp: svc_rqst to operate on
1767 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,