Lines Matching refs:task

53 static void	call_start(struct rpc_task *task);
54 static void call_reserve(struct rpc_task *task);
55 static void call_reserveresult(struct rpc_task *task);
56 static void call_allocate(struct rpc_task *task);
57 static void call_encode(struct rpc_task *task);
58 static void call_decode(struct rpc_task *task);
59 static void call_bind(struct rpc_task *task);
60 static void call_bind_status(struct rpc_task *task);
61 static void call_transmit(struct rpc_task *task);
62 static void call_status(struct rpc_task *task);
63 static void call_transmit_status(struct rpc_task *task);
64 static void call_refresh(struct rpc_task *task);
65 static void call_refreshresult(struct rpc_task *task);
66 static void call_connect(struct rpc_task *task);
67 static void call_connect_status(struct rpc_task *task);
69 static int rpc_encode_header(struct rpc_task *task,
71 static int rpc_decode_header(struct rpc_task *task,
75 static void rpc_check_timeout(struct rpc_task *task);
903 * @error: RPC task error value to set
915 struct rpc_task *task;
924 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
925 if (!RPC_IS_ACTIVATED(task))
927 if (!fnmatch(task, data))
929 rpc_task_try_cancel(task, error);
1122 void rpc_task_release_transport(struct rpc_task *task)
1124 struct rpc_xprt *xprt = task->tk_xprt;
1127 task->tk_xprt = NULL;
1128 if (task->tk_client)
1129 rpc_task_release_xprt(task->tk_client, xprt);
1136 void rpc_task_release_client(struct rpc_task *task)
1138 struct rpc_clnt *clnt = task->tk_client;
1140 rpc_task_release_transport(task);
1142 /* Remove from client task list */
1144 list_del(&task->tk_task);
1146 task->tk_client = NULL;
1171 void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
1173 if (task->tk_xprt) {
1174 if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
1175 (task->tk_flags & RPC_TASK_MOVEABLE)))
1177 xprt_release(task);
1178 xprt_put(task->tk_xprt);
1180 if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
1181 task->tk_xprt = rpc_task_get_first_xprt(clnt);
1183 task->tk_xprt = rpc_task_get_next_xprt(clnt);
1187 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
1189 rpc_task_set_transport(task, clnt);
1190 task->tk_client = clnt;
1193 task->tk_flags |= RPC_TASK_SOFT;
1195 task->tk_flags |= RPC_TASK_TIMEOUT;
1197 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
1202 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
1205 task->tk_msg.rpc_proc = msg->rpc_proc;
1206 task->tk_msg.rpc_argp = msg->rpc_argp;
1207 task->tk_msg.rpc_resp = msg->rpc_resp;
1208 task->tk_msg.rpc_cred = msg->rpc_cred;
1209 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1210 get_cred(task->tk_msg.rpc_cred);
1218 rpc_default_callback(struct rpc_task *task, void *data)
1227 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
1228 * @task_setup_data: pointer to task initialisation data
1232 struct rpc_task *task;
1234 task = rpc_new_task(task_setup_data);
1235 if (IS_ERR(task))
1236 return task;
1238 if (!RPC_IS_ASYNC(task))
1239 task->tk_flags |= RPC_TASK_CRED_NOREF;
1241 rpc_task_set_client(task, task_setup_data->rpc_client);
1242 rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
1244 if (task->tk_action == NULL)
1245 rpc_call_start(task);
1247 atomic_inc(&task->tk_count);
1248 rpc_execute(task);
1249 return task;
1261 struct rpc_task *task;
1277 task = rpc_run_task(&task_setup_data);
1278 if (IS_ERR(task))
1279 return PTR_ERR(task);
1280 status = task->tk_status;
1281 rpc_put_task(task);
1298 struct rpc_task *task;
1307 task = rpc_run_task(&task_setup_data);
1308 if (IS_ERR(task))
1309 return PTR_ERR(task);
1310 rpc_put_task(task);
1316 static void call_bc_encode(struct rpc_task *task);
1319 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1322 * @timeout: timeout values to use for this task
1327 struct rpc_task *task;
1338 task = rpc_new_task(&task_setup_data);
1339 if (IS_ERR(task)) {
1341 return task;
1344 xprt_init_bc_request(req, task, timeout);
1346 task->tk_action = call_bc_encode;
1347 atomic_inc(&task->tk_count);
1348 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
1349 rpc_execute(task);
1351 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
1352 return task;
1377 rpc_call_start(struct rpc_task *task)
1379 task->tk_action = call_start;
1677 __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *))
1679 task->tk_status = 0;
1680 task->tk_rpc_status = 0;
1681 task->tk_action = action;
1690 rpc_restart_call(struct rpc_task *task)
1692 return __rpc_restart_call(task, call_start);
1701 rpc_restart_call_prepare(struct rpc_task *task)
1703 if (task->tk_ops->rpc_call_prepare != NULL)
1704 return __rpc_restart_call(task, rpc_prepare_task);
1705 return rpc_restart_call(task);
1710 *rpc_proc_name(const struct rpc_task *task)
1712 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1724 __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
1726 trace_rpc_call_rpcerror(task, tk_status, rpc_status);
1727 rpc_task_set_rpc_status(task, rpc_status);
1728 rpc_exit(task, tk_status);
1732 rpc_call_rpcerror(struct rpc_task *task, int status)
1734 __rpc_call_rpcerror(task, status, status);
1744 call_start(struct rpc_task *task)
1746 struct rpc_clnt *clnt = task->tk_client;
1747 int idx = task->tk_msg.rpc_proc->p_statidx;
1749 trace_rpc_request(task);
1751 if (task->tk_client->cl_shutdown) {
1752 rpc_call_rpcerror(task, -EIO);
1760 task->tk_action = call_reserve;
1761 rpc_task_set_transport(task, clnt);
1768 call_reserve(struct rpc_task *task)
1770 task->tk_status = 0;
1771 task->tk_action = call_reserveresult;
1772 xprt_reserve(task);
1775 static void call_retry_reserve(struct rpc_task *task);
1781 call_reserveresult(struct rpc_task *task)
1783 int status = task->tk_status;
1789 task->tk_status = 0;
1791 if (task->tk_rqstp) {
1792 task->tk_action = call_refresh;
1795 spin_lock(&task->tk_client->cl_lock);
1796 if (list_empty(&task->tk_task))
1797 list_add_tail(&task->tk_task, &task->tk_client->cl_tasks);
1798 spin_unlock(&task->tk_client->cl_lock);
1801 rpc_call_rpcerror(task, -EIO);
1807 rpc_delay(task, HZ >> 2);
1810 task->tk_action = call_retry_reserve;
1813 rpc_call_rpcerror(task, status);
1821 call_retry_reserve(struct rpc_task *task)
1823 task->tk_status = 0;
1824 task->tk_action = call_reserveresult;
1825 xprt_retry_reserve(task);
1832 call_refresh(struct rpc_task *task)
1834 task->tk_action = call_refreshresult;
1835 task->tk_status = 0;
1836 task->tk_client->cl_stats->rpcauthrefresh++;
1837 rpcauth_refreshcred(task);
1844 call_refreshresult(struct rpc_task *task)
1846 int status = task->tk_status;
1848 task->tk_status = 0;
1849 task->tk_action = call_refresh;
1852 if (rpcauth_uptodatecred(task)) {
1853 task->tk_action = call_allocate;
1861 rpc_delay(task, 3*HZ);
1865 if (!task->tk_cred_retry)
1867 task->tk_cred_retry--;
1868 trace_rpc_retry_refresh_status(task);
1873 rpc_delay(task, HZ >> 4);
1876 trace_rpc_refresh_status(task);
1877 rpc_call_rpcerror(task, status);
1885 call_allocate(struct rpc_task *task)
1887 const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
1888 struct rpc_rqst *req = task->tk_rqstp;
1890 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1893 task->tk_status = 0;
1894 task->tk_action = call_encode;
1915 status = xprt->ops->buf_alloc(task);
1916 trace_rpc_buf_alloc(task, status);
1920 rpc_call_rpcerror(task, status);
1924 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1925 task->tk_action = call_allocate;
1926 rpc_delay(task, HZ>>4);
1930 rpc_call_rpcerror(task, -ERESTARTSYS);
1934 rpc_task_need_encode(struct rpc_task *task)
1936 return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 &&
1937 (!(task->tk_flags & RPC_TASK_SENT) ||
1938 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) ||
1939 xprt_request_need_retransmit(task));
1943 rpc_xdr_encode(struct rpc_task *task)
1945 struct rpc_rqst *req = task->tk_rqstp;
1959 if (rpc_encode_header(task, &xdr))
1962 task->tk_status = rpcauth_wrap_req(task, &xdr);
1969 call_encode(struct rpc_task *task)
1971 if (!rpc_task_need_encode(task))
1974 /* Dequeue task from the receive queue while we're encoding */
1975 xprt_request_dequeue_xprt(task);
1977 rpc_xdr_encode(task);
1978 /* Add task to reply queue before transmission to avoid races */
1979 if (task->tk_status == 0 && rpc_reply_expected(task))
1980 task->tk_status = xprt_request_enqueue_receive(task);
1982 if (task->tk_status != 0) {
1984 switch (task->tk_status) {
1987 rpc_delay(task, HZ >> 4);
1990 if (!task->tk_cred_retry) {
1991 rpc_call_rpcerror(task, task->tk_status);
1993 task->tk_action = call_refresh;
1994 task->tk_cred_retry--;
1995 trace_rpc_retry_refresh_status(task);
1999 rpc_call_rpcerror(task, task->tk_status);
2004 xprt_request_enqueue_transmit(task);
2006 task->tk_action = call_transmit;
2008 if (!xprt_bound(task->tk_xprt))
2009 task->tk_action = call_bind;
2010 else if (!xprt_connected(task->tk_xprt))
2011 task->tk_action = call_connect;
2015 * Helpers to check if the task was already transmitted, and
2019 rpc_task_transmitted(struct rpc_task *task)
2021 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
2025 rpc_task_handle_transmitted(struct rpc_task *task)
2027 xprt_end_transmit(task);
2028 task->tk_action = call_transmit_status;
2035 call_bind(struct rpc_task *task)
2037 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2039 if (rpc_task_transmitted(task)) {
2040 rpc_task_handle_transmitted(task);
2045 task->tk_action = call_connect;
2049 task->tk_action = call_bind_status;
2050 if (!xprt_prepare_transmit(task))
2053 xprt->ops->rpcbind(task);
2060 call_bind_status(struct rpc_task *task)
2062 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2065 if (rpc_task_transmitted(task)) {
2066 rpc_task_handle_transmitted(task);
2070 if (task->tk_status >= 0)
2073 task->tk_status = 0;
2077 switch (task->tk_status) {
2079 rpc_delay(task, HZ >> 2);
2082 trace_rpcb_prog_unavail_err(task);
2084 if (task->tk_msg.rpc_proc->p_proc == 0) {
2088 rpc_delay(task, 3*HZ);
2091 rpc_delay(task, HZ >> 2);
2096 trace_rpcb_timeout_err(task);
2100 trace_rpcb_bind_version_err(task);
2103 trace_rpcb_bind_version_err(task);
2114 trace_rpcb_unreachable_err(task);
2115 if (!RPC_IS_SOFTCONN(task)) {
2116 rpc_delay(task, 5*HZ);
2119 status = task->tk_status;
2122 trace_rpcb_unrecognized_err(task);
2125 rpc_call_rpcerror(task, status);
2128 task->tk_action = call_connect;
2131 task->tk_status = 0;
2132 task->tk_action = call_bind;
2133 rpc_check_timeout(task);
2140 call_connect(struct rpc_task *task)
2142 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2144 if (rpc_task_transmitted(task)) {
2145 rpc_task_handle_transmitted(task);
2150 task->tk_action = call_transmit;
2154 task->tk_action = call_connect_status;
2155 if (task->tk_status < 0)
2157 if (task->tk_flags & RPC_TASK_NOCONNECT) {
2158 rpc_call_rpcerror(task, -ENOTCONN);
2161 if (!xprt_prepare_transmit(task))
2163 xprt_connect(task);
2170 call_connect_status(struct rpc_task *task)
2172 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2173 struct rpc_clnt *clnt = task->tk_client;
2174 int status = task->tk_status;
2176 if (rpc_task_transmitted(task)) {
2177 rpc_task_handle_transmitted(task);
2181 trace_rpc_connect_status(task);
2183 if (task->tk_status == 0) {
2188 task->tk_status = 0;
2192 task->tk_status = 0;
2197 if (RPC_IS_SOFTCONN(task))
2210 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
2211 task->tk_rqstp->rq_connect_cookie);
2212 if (RPC_IS_SOFTCONN(task))
2215 rpc_delay(task, 3*HZ);
2221 if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) &&
2222 (task->tk_flags & RPC_TASK_MOVEABLE) &&
2224 struct rpc_xprt *saved = task->tk_xprt;
2231 xprt_release(task);
2237 task->tk_xprt = NULL;
2238 task->tk_action = call_start;
2241 if (!task->tk_xprt)
2246 rpc_delay(task, HZ >> 2);
2249 rpc_call_rpcerror(task, status);
2252 task->tk_action = call_transmit;
2256 task->tk_action = call_bind;
2258 rpc_check_timeout(task);
2265 call_transmit(struct rpc_task *task)
2267 if (rpc_task_transmitted(task)) {
2268 rpc_task_handle_transmitted(task);
2272 task->tk_action = call_transmit_status;
2273 if (!xprt_prepare_transmit(task))
2275 task->tk_status = 0;
2276 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
2277 if (!xprt_connected(task->tk_xprt)) {
2278 task->tk_status = -ENOTCONN;
2281 xprt_transmit(task);
2283 xprt_end_transmit(task);
2290 call_transmit_status(struct rpc_task *task)
2292 task->tk_action = call_status;
2298 if (rpc_task_transmitted(task)) {
2299 task->tk_status = 0;
2300 xprt_request_wait_receive(task);
2304 switch (task->tk_status) {
2308 task->tk_status = 0;
2309 task->tk_action = call_encode;
2319 rpc_delay(task, HZ>>2);
2323 task->tk_action = call_transmit;
2324 task->tk_status = 0;
2333 if (RPC_IS_SOFTCONN(task)) {
2334 if (!task->tk_msg.rpc_proc->p_proc)
2335 trace_xprt_ping(task->tk_xprt,
2336 task->tk_status);
2337 rpc_call_rpcerror(task, task->tk_status);
2346 task->tk_action = call_bind;
2347 task->tk_status = 0;
2350 rpc_check_timeout(task);
2354 static void call_bc_transmit(struct rpc_task *task);
2355 static void call_bc_transmit_status(struct rpc_task *task);
2358 call_bc_encode(struct rpc_task *task)
2360 xprt_request_enqueue_transmit(task);
2361 task->tk_action = call_bc_transmit;
2369 call_bc_transmit(struct rpc_task *task)
2371 task->tk_action = call_bc_transmit_status;
2372 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
2373 if (!xprt_prepare_transmit(task))
2375 task->tk_status = 0;
2376 xprt_transmit(task);
2378 xprt_end_transmit(task);
2382 call_bc_transmit_status(struct rpc_task *task)
2384 struct rpc_rqst *req = task->tk_rqstp;
2386 if (rpc_task_transmitted(task))
2387 task->tk_status = 0;
2389 switch (task->tk_status) {
2404 rpc_delay(task, HZ>>2);
2408 task->tk_status = 0;
2409 task->tk_action = call_bc_transmit;
2420 "error: %d\n", task->tk_status);
2430 "error: %d\n", task->tk_status);
2433 task->tk_action = rpc_exit_task;
2441 call_status(struct rpc_task *task)
2443 struct rpc_clnt *clnt = task->tk_client;
2446 if (!task->tk_msg.rpc_proc->p_proc)
2447 trace_xprt_ping(task->tk_xprt, task->tk_status);
2449 status = task->tk_status;
2451 task->tk_action = call_decode;
2455 trace_rpc_call_status(task);
2456 task->tk_status = 0;
2463 if (RPC_IS_SOFTCONN(task))
2469 rpc_delay(task, 3*HZ);
2480 rpc_delay(task, 3*HZ);
2488 rpc_delay(task, HZ>>2);
2499 task->tk_action = call_encode;
2500 rpc_check_timeout(task);
2503 rpc_call_rpcerror(task, status);
2516 rpc_check_timeout(struct rpc_task *task)
2518 struct rpc_clnt *clnt = task->tk_client;
2520 if (RPC_SIGNALLED(task))
2523 if (xprt_adjust_timeout(task->tk_rqstp) == 0)
2526 trace_rpc_timeout_status(task);
2527 task->tk_timeouts++;
2529 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
2530 rpc_call_rpcerror(task, -ETIMEDOUT);
2534 if (RPC_IS_SOFT(task)) {
2540 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) &&
2541 rpc_check_connected(task->tk_rqstp))
2548 task->tk_xprt->servername);
2550 if (task->tk_flags & RPC_TASK_TIMEOUT)
2551 rpc_call_rpcerror(task, -ETIMEDOUT);
2553 __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT);
2557 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
2558 task->tk_flags |= RPC_CALL_MAJORSEEN;
2563 task->tk_xprt->servername);
2571 rpcauth_invalcred(task);
2578 call_decode(struct rpc_task *task)
2580 struct rpc_clnt *clnt = task->tk_client;
2581 struct rpc_rqst *req = task->tk_rqstp;
2585 if (!task->tk_msg.rpc_proc->p_decode) {
2586 task->tk_action = rpc_exit_task;
2590 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
2594 task->tk_xprt->servername);
2596 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
2613 trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
2621 err = rpc_decode_header(task, &xdr);
2625 task->tk_action = rpc_exit_task;
2626 task->tk_status = rpcauth_unwrap_resp(task, &xdr);
2630 task->tk_status = 0;
2631 if (task->tk_client->cl_discrtry)
2634 task->tk_action = call_encode;
2635 rpc_check_timeout(task);
2638 task->tk_action = call_reserve;
2639 rpc_check_timeout(task);
2640 rpcauth_invalcred(task);
2642 xprt_release(task);
2647 rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
2649 struct rpc_clnt *clnt = task->tk_client;
2650 struct rpc_rqst *req = task->tk_rqstp;
2663 *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc);
2665 error = rpcauth_marshcred(task, xdr);
2670 trace_rpc_bad_callhdr(task);
2671 rpc_call_rpcerror(task, error);
2676 rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
2678 struct rpc_clnt *clnt = task->tk_client;
2687 if (task->tk_rqstp->rq_rcv_buf.len & 3)
2699 error = rpcauth_checkverf(task, xdr);
2701 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
2704 rpcauth_invalcred(task);
2705 if (!task->tk_cred_retry)
2707 task->tk_cred_retry--;
2708 trace_rpc__stale_creds(task);
2721 trace_rpc__prog_unavail(task);
2725 trace_rpc__prog_mismatch(task);
2729 trace_rpc__proc_unavail(task);
2734 trace_rpc__garbage_args(task);
2743 if (task->tk_garb_retry) {
2744 task->tk_garb_retry--;
2745 task->tk_action = call_encode;
2749 rpc_call_rpcerror(task, error);
2753 trace_rpc__unparsable(task);
2758 trace_rpc_bad_verifier(task);
2778 trace_rpc__mismatch(task);
2793 rpcauth_invalcred(task);
2794 if (!task->tk_cred_retry)
2796 task->tk_cred_retry--;
2797 trace_rpc__stale_creds(task);
2802 if (!task->tk_garb_retry)
2804 task->tk_garb_retry--;
2805 trace_rpc__bad_creds(task);
2806 task->tk_action = call_encode;
2809 trace_rpc__auth_tooweak(task);
2811 task->tk_xprt->servername);
2840 rpc_null_call_prepare(struct rpc_task *task, void *data)
2842 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT;
2843 rpc_call_start(task);
2881 struct rpc_task *task;
2887 task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL);
2888 if (IS_ERR(task))
2889 return PTR_ERR(task);
2890 status = task->tk_status;
2891 rpc_put_task(task);
2906 struct rpc_task *task;
2909 task = rpc_run_task(&task_setup_data);
2910 if (IS_ERR(task))
2911 return PTR_ERR(task);
2912 status = task->tk_status;
2913 rpc_put_task(task);
2922 static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata)
2926 if (task->tk_status == 0)
2957 struct rpc_task *task;
2981 task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
2983 if (IS_ERR(task))
2984 return PTR_ERR(task);
2987 rpc_put_task(task);
2997 struct rpc_task *task;
3001 task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
3002 if (IS_ERR(task))
3003 return PTR_ERR(task);
3005 status = task->tk_status;
3006 rpc_put_task(task);
3340 const struct rpc_task *task)
3344 if (RPC_IS_QUEUED(task))
3345 rpc_waitq = rpc_qname(task->tk_waitqueue);
3348 task->tk_pid, task->tk_flags, task->tk_status,
3349 clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops,
3350 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
3351 task->tk_action, rpc_waitq);
3357 struct rpc_task *task;
3364 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
3369 rpc_show_task(clnt, task);