xref: /linux/net/sunrpc/xprt.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 /*
2  *  linux/net/sunrpc/xprt.c
3  *
4  *  This is a generic RPC call interface supporting congestion avoidance,
5  *  and asynchronous calls.
6  *
7  *  The interface works like this:
8  *
9  *  -	When a process places a call, it allocates a request slot if
10  *	one is available. Otherwise, it sleeps on the backlog queue
11  *	(xprt_reserve).
12  *  -	Next, the caller puts together the RPC message, stuffs it into
13  *	the request struct, and calls xprt_transmit().
14  *  -	xprt_transmit sends the message and installs the caller on the
15  *	transport's wait list. At the same time, if a reply is expected,
16  *	it installs a timer that is run after the packet's timeout has
17  *	expired.
18  *  -	When a packet arrives, the data_ready handler walks the list of
19  *	pending requests for that transport. If a matching XID is found, the
20  *	caller is woken up, and the timer removed.
21  *  -	When no reply arrives within the timeout interval, the timer is
22  *	fired by the kernel and runs xprt_timer(). It either adjusts the
23  *	timeout values (minor timeout) or wakes up the caller with a status
24  *	of -ETIMEDOUT.
25  *  -	When the caller receives a notification from RPC that a reply arrived,
26  *	it should release the RPC slot, and process the reply.
27  *	If the call timed out, it may choose to retry the operation by
28  *	adjusting the initial timeout value, and simply calling rpc_call
29  *	again.
30  *
31  *  Support for async RPC is done through a set of RPC-specific scheduling
32  *  primitives that `transparently' work for processes as well as async
33  *  tasks that rely on callbacks.
34  *
35  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36  *
37  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
38  */
39 
40 #include <linux/module.h>
41 
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
47 
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51 
52 #include "sunrpc.h"
53 
54 /*
55  * Local variables
56  */
57 
58 #ifdef RPC_DEBUG
59 # define RPCDBG_FACILITY	RPCDBG_XPRT
60 #endif
61 
62 /*
63  * Local functions
64  */
65 static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
66 static void	xprt_connect_status(struct rpc_task *task);
67 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
68 
69 static DEFINE_SPINLOCK(xprt_list_lock);
70 static LIST_HEAD(xprt_list);
71 
72 /*
73  * The transport code maintains an estimate on the maximum number of out-
74  * standing RPC requests, using a smoothed version of the congestion
75  * avoidance implemented in 44BSD. This is basically the Van Jacobson
76  * congestion algorithm: If a retransmit occurs, the congestion window is
77  * halved; otherwise, it is incremented by 1/cwnd when
78  *
79  *	-	a reply is received and
80  *	-	a full number of requests are outstanding and
81  *	-	the congestion window hasn't been updated recently.
82  */
83 #define RPC_CWNDSHIFT		(8U)
84 #define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
85 #define RPC_INITCWND		RPC_CWNDSCALE
86 #define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)
87 
88 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
89 
90 /**
91  * xprt_register_transport - register a transport implementation
92  * @transport: transport to register
93  *
94  * If a transport implementation is loaded as a kernel module, it can
95  * call this interface to make itself known to the RPC client.
96  *
97  * Returns:
98  * 0:		transport successfully registered
99  * -EEXIST:	transport already registered
100  * -EINVAL:	transport module being unloaded
101  */
102 int xprt_register_transport(struct xprt_class *transport)
103 {
104 	struct xprt_class *t;
105 	int result;
106 
107 	result = -EEXIST;
108 	spin_lock(&xprt_list_lock);
109 	list_for_each_entry(t, &xprt_list, list) {
110 		/* don't register the same transport class twice */
111 		if (t->ident == transport->ident)
112 			goto out;
113 	}
114 
115 	list_add_tail(&transport->list, &xprt_list);
116 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
117 	       transport->name);
118 	result = 0;
119 
120 out:
121 	spin_unlock(&xprt_list_lock);
122 	return result;
123 }
124 EXPORT_SYMBOL_GPL(xprt_register_transport);
125 
126 /**
127  * xprt_unregister_transport - unregister a transport implementation
128  * @transport: transport to unregister
129  *
130  * Returns:
131  * 0:		transport successfully unregistered
132  * -ENOENT:	transport never registered
133  */
134 int xprt_unregister_transport(struct xprt_class *transport)
135 {
136 	struct xprt_class *t;
137 	int result;
138 
139 	result = 0;
140 	spin_lock(&xprt_list_lock);
141 	list_for_each_entry(t, &xprt_list, list) {
142 		if (t == transport) {
143 			printk(KERN_INFO
144 				"RPC: Unregistered %s transport module.\n",
145 				transport->name);
146 			list_del_init(&transport->list);
147 			goto out;
148 		}
149 	}
150 	result = -ENOENT;
151 
152 out:
153 	spin_unlock(&xprt_list_lock);
154 	return result;
155 }
156 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
157 
158 /**
159  * xprt_load_transport - load a transport implementation
160  * @transport_name: transport to load
161  *
162  * Returns:
163  * 0:		transport successfully loaded
164  * -ENOENT:	transport module not available
165  */
166 int xprt_load_transport(const char *transport_name)
167 {
168 	struct xprt_class *t;
169 	char module_name[sizeof t->name + 5];
170 	int result;
171 
172 	result = 0;
173 	spin_lock(&xprt_list_lock);
174 	list_for_each_entry(t, &xprt_list, list) {
175 		if (strcmp(t->name, transport_name) == 0) {
176 			spin_unlock(&xprt_list_lock);
177 			goto out;
178 		}
179 	}
180 	spin_unlock(&xprt_list_lock);
181 	strcpy(module_name, "xprt");
182 	strncat(module_name, transport_name, sizeof t->name);
183 	result = request_module(module_name);
184 out:
185 	return result;
186 }
187 EXPORT_SYMBOL_GPL(xprt_load_transport);
188 
189 /**
190  * xprt_reserve_xprt - serialize write access to transports
191  * @task: task that is requesting access to the transport
192  *
193  * This prevents mixing the payload of separate requests, and prevents
194  * transport connects from colliding with writes.  No congestion control
195  * is provided.
196  */
197 int xprt_reserve_xprt(struct rpc_task *task)
198 {
199 	struct rpc_rqst *req = task->tk_rqstp;
200 	struct rpc_xprt	*xprt = req->rq_xprt;
201 
202 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
203 		if (task == xprt->snd_task)
204 			return 1;
205 		if (task == NULL)
206 			return 0;
207 		goto out_sleep;
208 	}
209 	xprt->snd_task = task;
210 	if (req) {
211 		req->rq_bytes_sent = 0;
212 		req->rq_ntrans++;
213 	}
214 	return 1;
215 
216 out_sleep:
217 	dprintk("RPC: %5u failed to lock transport %p\n",
218 			task->tk_pid, xprt);
219 	task->tk_timeout = 0;
220 	task->tk_status = -EAGAIN;
221 	if (req && req->rq_ntrans)
222 		rpc_sleep_on(&xprt->resend, task, NULL);
223 	else
224 		rpc_sleep_on(&xprt->sending, task, NULL);
225 	return 0;
226 }
227 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
228 
229 static void xprt_clear_locked(struct rpc_xprt *xprt)
230 {
231 	xprt->snd_task = NULL;
232 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
233 		smp_mb__before_clear_bit();
234 		clear_bit(XPRT_LOCKED, &xprt->state);
235 		smp_mb__after_clear_bit();
236 	} else
237 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
238 }
239 
240 /*
241  * xprt_reserve_xprt_cong - serialize write access to transports
242  * @task: task that is requesting access to the transport
243  *
244  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
245  * integrated into the decision of whether a request is allowed to be
246  * woken up and given access to the transport.
247  */
248 int xprt_reserve_xprt_cong(struct rpc_task *task)
249 {
250 	struct rpc_xprt	*xprt = task->tk_xprt;
251 	struct rpc_rqst *req = task->tk_rqstp;
252 
253 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
254 		if (task == xprt->snd_task)
255 			return 1;
256 		goto out_sleep;
257 	}
258 	if (__xprt_get_cong(xprt, task)) {
259 		xprt->snd_task = task;
260 		if (req) {
261 			req->rq_bytes_sent = 0;
262 			req->rq_ntrans++;
263 		}
264 		return 1;
265 	}
266 	xprt_clear_locked(xprt);
267 out_sleep:
268 	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
269 	task->tk_timeout = 0;
270 	task->tk_status = -EAGAIN;
271 	if (req && req->rq_ntrans)
272 		rpc_sleep_on(&xprt->resend, task, NULL);
273 	else
274 		rpc_sleep_on(&xprt->sending, task, NULL);
275 	return 0;
276 }
277 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
278 
279 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
280 {
281 	int retval;
282 
283 	spin_lock_bh(&xprt->transport_lock);
284 	retval = xprt->ops->reserve_xprt(task);
285 	spin_unlock_bh(&xprt->transport_lock);
286 	return retval;
287 }
288 
289 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
290 {
291 	struct rpc_task *task;
292 	struct rpc_rqst *req;
293 
294 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
295 		return;
296 
297 	task = rpc_wake_up_next(&xprt->resend);
298 	if (!task) {
299 		task = rpc_wake_up_next(&xprt->sending);
300 		if (!task)
301 			goto out_unlock;
302 	}
303 
304 	req = task->tk_rqstp;
305 	xprt->snd_task = task;
306 	if (req) {
307 		req->rq_bytes_sent = 0;
308 		req->rq_ntrans++;
309 	}
310 	return;
311 
312 out_unlock:
313 	xprt_clear_locked(xprt);
314 }
315 
316 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
317 {
318 	struct rpc_task *task;
319 
320 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
321 		return;
322 	if (RPCXPRT_CONGESTED(xprt))
323 		goto out_unlock;
324 	task = rpc_wake_up_next(&xprt->resend);
325 	if (!task) {
326 		task = rpc_wake_up_next(&xprt->sending);
327 		if (!task)
328 			goto out_unlock;
329 	}
330 	if (__xprt_get_cong(xprt, task)) {
331 		struct rpc_rqst *req = task->tk_rqstp;
332 		xprt->snd_task = task;
333 		if (req) {
334 			req->rq_bytes_sent = 0;
335 			req->rq_ntrans++;
336 		}
337 		return;
338 	}
339 out_unlock:
340 	xprt_clear_locked(xprt);
341 }
342 
343 /**
344  * xprt_release_xprt - allow other requests to use a transport
345  * @xprt: transport with other tasks potentially waiting
346  * @task: task that is releasing access to the transport
347  *
348  * Note that "task" can be NULL.  No congestion control is provided.
349  */
350 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
351 {
352 	if (xprt->snd_task == task) {
353 		xprt_clear_locked(xprt);
354 		__xprt_lock_write_next(xprt);
355 	}
356 }
357 EXPORT_SYMBOL_GPL(xprt_release_xprt);
358 
359 /**
360  * xprt_release_xprt_cong - allow other requests to use a transport
361  * @xprt: transport with other tasks potentially waiting
362  * @task: task that is releasing access to the transport
363  *
364  * Note that "task" can be NULL.  Another task is awoken to use the
365  * transport if the transport's congestion window allows it.
366  */
367 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
368 {
369 	if (xprt->snd_task == task) {
370 		xprt_clear_locked(xprt);
371 		__xprt_lock_write_next_cong(xprt);
372 	}
373 }
374 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
375 
376 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
377 {
378 	spin_lock_bh(&xprt->transport_lock);
379 	xprt->ops->release_xprt(xprt, task);
380 	spin_unlock_bh(&xprt->transport_lock);
381 }
382 
383 /*
384  * Van Jacobson congestion avoidance. Check if the congestion window
385  * overflowed. Put the task to sleep if this is the case.
386  */
387 static int
388 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
389 {
390 	struct rpc_rqst *req = task->tk_rqstp;
391 
392 	if (req->rq_cong)
393 		return 1;
394 	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
395 			task->tk_pid, xprt->cong, xprt->cwnd);
396 	if (RPCXPRT_CONGESTED(xprt))
397 		return 0;
398 	req->rq_cong = 1;
399 	xprt->cong += RPC_CWNDSCALE;
400 	return 1;
401 }
402 
403 /*
404  * Adjust the congestion window, and wake up the next task
405  * that has been sleeping due to congestion
406  */
407 static void
408 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
409 {
410 	if (!req->rq_cong)
411 		return;
412 	req->rq_cong = 0;
413 	xprt->cong -= RPC_CWNDSCALE;
414 	__xprt_lock_write_next_cong(xprt);
415 }
416 
417 /**
418  * xprt_release_rqst_cong - housekeeping when request is complete
419  * @task: RPC request that recently completed
420  *
421  * Useful for transports that require congestion control.
422  */
423 void xprt_release_rqst_cong(struct rpc_task *task)
424 {
425 	__xprt_put_cong(task->tk_xprt, task->tk_rqstp);
426 }
427 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
428 
429 /**
430  * xprt_adjust_cwnd - adjust transport congestion window
431  * @task: recently completed RPC request used to adjust window
432  * @result: result code of completed RPC request
433  *
434  * We use a time-smoothed congestion estimator to avoid heavy oscillation.
435  */
436 void xprt_adjust_cwnd(struct rpc_task *task, int result)
437 {
438 	struct rpc_rqst *req = task->tk_rqstp;
439 	struct rpc_xprt *xprt = task->tk_xprt;
440 	unsigned long cwnd = xprt->cwnd;
441 
442 	if (result >= 0 && cwnd <= xprt->cong) {
443 		/* The (cwnd >> 1) term makes sure
444 		 * the result gets rounded properly. */
445 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
446 		if (cwnd > RPC_MAXCWND(xprt))
447 			cwnd = RPC_MAXCWND(xprt);
448 		__xprt_lock_write_next_cong(xprt);
449 	} else if (result == -ETIMEDOUT) {
450 		cwnd >>= 1;
451 		if (cwnd < RPC_CWNDSCALE)
452 			cwnd = RPC_CWNDSCALE;
453 	}
454 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
455 			xprt->cong, xprt->cwnd, cwnd);
456 	xprt->cwnd = cwnd;
457 	__xprt_put_cong(xprt, req);
458 }
459 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
460 
461 /**
462  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
463  * @xprt: transport with waiting tasks
464  * @status: result code to plant in each task before waking it
465  *
466  */
467 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
468 {
469 	if (status < 0)
470 		rpc_wake_up_status(&xprt->pending, status);
471 	else
472 		rpc_wake_up(&xprt->pending);
473 }
474 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
475 
476 /**
477  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
478  * @task: task to be put to sleep
479  * @action: function pointer to be executed after wait
480  */
481 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
482 {
483 	struct rpc_rqst *req = task->tk_rqstp;
484 	struct rpc_xprt *xprt = req->rq_xprt;
485 
486 	task->tk_timeout = req->rq_timeout;
487 	rpc_sleep_on(&xprt->pending, task, action);
488 }
489 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
490 
491 /**
492  * xprt_write_space - wake the task waiting for transport output buffer space
493  * @xprt: transport with waiting tasks
494  *
495  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
496  */
497 void xprt_write_space(struct rpc_xprt *xprt)
498 {
499 	if (unlikely(xprt->shutdown))
500 		return;
501 
502 	spin_lock_bh(&xprt->transport_lock);
503 	if (xprt->snd_task) {
504 		dprintk("RPC:       write space: waking waiting task on "
505 				"xprt %p\n", xprt);
506 		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
507 	}
508 	spin_unlock_bh(&xprt->transport_lock);
509 }
510 EXPORT_SYMBOL_GPL(xprt_write_space);
511 
512 /**
513  * xprt_set_retrans_timeout_def - set a request's retransmit timeout
514  * @task: task whose timeout is to be set
515  *
516  * Set a request's retransmit timeout based on the transport's
517  * default timeout parameters.  Used by transports that don't adjust
518  * the retransmit timeout based on round-trip time estimation.
519  */
520 void xprt_set_retrans_timeout_def(struct rpc_task *task)
521 {
522 	task->tk_timeout = task->tk_rqstp->rq_timeout;
523 }
524 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
525 
526 /*
527  * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
528  * @task: task whose timeout is to be set
529  *
530  * Set a request's retransmit timeout using the RTT estimator.
531  */
532 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
533 {
534 	int timer = task->tk_msg.rpc_proc->p_timer;
535 	struct rpc_clnt *clnt = task->tk_client;
536 	struct rpc_rtt *rtt = clnt->cl_rtt;
537 	struct rpc_rqst *req = task->tk_rqstp;
538 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
539 
540 	task->tk_timeout = rpc_calc_rto(rtt, timer);
541 	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
542 	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
543 		task->tk_timeout = max_timeout;
544 }
545 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
546 
547 static void xprt_reset_majortimeo(struct rpc_rqst *req)
548 {
549 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
550 
551 	req->rq_majortimeo = req->rq_timeout;
552 	if (to->to_exponential)
553 		req->rq_majortimeo <<= to->to_retries;
554 	else
555 		req->rq_majortimeo += to->to_increment * to->to_retries;
556 	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
557 		req->rq_majortimeo = to->to_maxval;
558 	req->rq_majortimeo += jiffies;
559 }
560 
561 /**
562  * xprt_adjust_timeout - adjust timeout values for next retransmit
563  * @req: RPC request containing parameters to use for the adjustment
564  *
565  */
566 int xprt_adjust_timeout(struct rpc_rqst *req)
567 {
568 	struct rpc_xprt *xprt = req->rq_xprt;
569 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
570 	int status = 0;
571 
572 	if (time_before(jiffies, req->rq_majortimeo)) {
573 		if (to->to_exponential)
574 			req->rq_timeout <<= 1;
575 		else
576 			req->rq_timeout += to->to_increment;
577 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
578 			req->rq_timeout = to->to_maxval;
579 		req->rq_retries++;
580 	} else {
581 		req->rq_timeout = to->to_initval;
582 		req->rq_retries = 0;
583 		xprt_reset_majortimeo(req);
584 		/* Reset the RTT counters == "slow start" */
585 		spin_lock_bh(&xprt->transport_lock);
586 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
587 		spin_unlock_bh(&xprt->transport_lock);
588 		status = -ETIMEDOUT;
589 	}
590 
591 	if (req->rq_timeout == 0) {
592 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
593 		req->rq_timeout = 5 * HZ;
594 	}
595 	return status;
596 }
597 
598 static void xprt_autoclose(struct work_struct *work)
599 {
600 	struct rpc_xprt *xprt =
601 		container_of(work, struct rpc_xprt, task_cleanup);
602 
603 	xprt->ops->close(xprt);
604 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
605 	xprt_release_write(xprt, NULL);
606 }
607 
608 /**
609  * xprt_disconnect_done - mark a transport as disconnected
610  * @xprt: transport to flag for disconnect
611  *
612  */
613 void xprt_disconnect_done(struct rpc_xprt *xprt)
614 {
615 	dprintk("RPC:       disconnected transport %p\n", xprt);
616 	spin_lock_bh(&xprt->transport_lock);
617 	xprt_clear_connected(xprt);
618 	xprt_wake_pending_tasks(xprt, -EAGAIN);
619 	spin_unlock_bh(&xprt->transport_lock);
620 }
621 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
622 
623 /**
624  * xprt_force_disconnect - force a transport to disconnect
625  * @xprt: transport to disconnect
626  *
627  */
628 void xprt_force_disconnect(struct rpc_xprt *xprt)
629 {
630 	/* Don't race with the test_bit() in xprt_clear_locked() */
631 	spin_lock_bh(&xprt->transport_lock);
632 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
633 	/* Try to schedule an autoclose RPC call */
634 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
635 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
636 	xprt_wake_pending_tasks(xprt, -EAGAIN);
637 	spin_unlock_bh(&xprt->transport_lock);
638 }
639 
640 /**
641  * xprt_conditional_disconnect - force a transport to disconnect
642  * @xprt: transport to disconnect
643  * @cookie: 'connection cookie'
644  *
645  * This attempts to break the connection if and only if 'cookie' matches
646  * the current transport 'connection cookie'. It ensures that we don't
647  * try to break the connection more than once when we need to retransmit
648  * a batch of RPC requests.
649  *
650  */
651 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
652 {
653 	/* Don't race with the test_bit() in xprt_clear_locked() */
654 	spin_lock_bh(&xprt->transport_lock);
655 	if (cookie != xprt->connect_cookie)
656 		goto out;
657 	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
658 		goto out;
659 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
660 	/* Try to schedule an autoclose RPC call */
661 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
662 		queue_work(rpciod_workqueue, &xprt->task_cleanup);
663 	xprt_wake_pending_tasks(xprt, -EAGAIN);
664 out:
665 	spin_unlock_bh(&xprt->transport_lock);
666 }
667 
668 static void
669 xprt_init_autodisconnect(unsigned long data)
670 {
671 	struct rpc_xprt *xprt = (struct rpc_xprt *)data;
672 
673 	spin_lock(&xprt->transport_lock);
674 	if (!list_empty(&xprt->recv) || xprt->shutdown)
675 		goto out_abort;
676 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
677 		goto out_abort;
678 	spin_unlock(&xprt->transport_lock);
679 	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
680 	queue_work(rpciod_workqueue, &xprt->task_cleanup);
681 	return;
682 out_abort:
683 	spin_unlock(&xprt->transport_lock);
684 }
685 
686 /**
687  * xprt_connect - schedule a transport connect operation
688  * @task: RPC task that is requesting the connect
689  *
690  */
691 void xprt_connect(struct rpc_task *task)
692 {
693 	struct rpc_xprt	*xprt = task->tk_xprt;
694 
695 	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
696 			xprt, (xprt_connected(xprt) ? "is" : "is not"));
697 
698 	if (!xprt_bound(xprt)) {
699 		task->tk_status = -EAGAIN;
700 		return;
701 	}
702 	if (!xprt_lock_write(xprt, task))
703 		return;
704 
705 	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
706 		xprt->ops->close(xprt);
707 
708 	if (xprt_connected(xprt))
709 		xprt_release_write(xprt, task);
710 	else {
711 		if (task->tk_rqstp)
712 			task->tk_rqstp->rq_bytes_sent = 0;
713 
714 		task->tk_timeout = task->tk_rqstp->rq_timeout;
715 		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
716 
717 		if (test_bit(XPRT_CLOSING, &xprt->state))
718 			return;
719 		if (xprt_test_and_set_connecting(xprt))
720 			return;
721 		xprt->stat.connect_start = jiffies;
722 		xprt->ops->connect(task);
723 	}
724 }
725 
726 static void xprt_connect_status(struct rpc_task *task)
727 {
728 	struct rpc_xprt	*xprt = task->tk_xprt;
729 
730 	if (task->tk_status == 0) {
731 		xprt->stat.connect_count++;
732 		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
733 		dprintk("RPC: %5u xprt_connect_status: connection established\n",
734 				task->tk_pid);
735 		return;
736 	}
737 
738 	switch (task->tk_status) {
739 	case -EAGAIN:
740 		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
741 		break;
742 	case -ETIMEDOUT:
743 		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
744 				"out\n", task->tk_pid);
745 		break;
746 	default:
747 		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
748 				"server %s\n", task->tk_pid, -task->tk_status,
749 				task->tk_client->cl_server);
750 		xprt_release_write(xprt, task);
751 		task->tk_status = -EIO;
752 	}
753 }
754 
755 /**
756  * xprt_lookup_rqst - find an RPC request corresponding to an XID
757  * @xprt: transport on which the original request was transmitted
758  * @xid: RPC XID of incoming reply
759  *
760  */
761 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
762 {
763 	struct list_head *pos;
764 
765 	list_for_each(pos, &xprt->recv) {
766 		struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
767 		if (entry->rq_xid == xid)
768 			return entry;
769 	}
770 
771 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
772 			ntohl(xid));
773 	xprt->stat.bad_xids++;
774 	return NULL;
775 }
776 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
777 
778 static void xprt_update_rtt(struct rpc_task *task)
779 {
780 	struct rpc_rqst *req = task->tk_rqstp;
781 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
782 	unsigned timer = task->tk_msg.rpc_proc->p_timer;
783 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
784 
785 	if (timer) {
786 		if (req->rq_ntrans == 1)
787 			rpc_update_rtt(rtt, timer, m);
788 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
789 	}
790 }
791 
792 /**
793  * xprt_complete_rqst - called when reply processing is complete
794  * @task: RPC request that recently completed
795  * @copied: actual number of bytes received from the transport
796  *
797  * Caller holds transport lock.
798  */
799 void xprt_complete_rqst(struct rpc_task *task, int copied)
800 {
801 	struct rpc_rqst *req = task->tk_rqstp;
802 	struct rpc_xprt *xprt = req->rq_xprt;
803 
804 	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
805 			task->tk_pid, ntohl(req->rq_xid), copied);
806 
807 	xprt->stat.recvs++;
808 	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
809 	if (xprt->ops->timer != NULL)
810 		xprt_update_rtt(task);
811 
812 	list_del_init(&req->rq_list);
813 	req->rq_private_buf.len = copied;
814 	/* Ensure all writes are done before we update */
815 	/* req->rq_reply_bytes_recvd */
816 	smp_wmb();
817 	req->rq_reply_bytes_recvd = copied;
818 	rpc_wake_up_queued_task(&xprt->pending, task);
819 }
820 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
821 
822 static void xprt_timer(struct rpc_task *task)
823 {
824 	struct rpc_rqst *req = task->tk_rqstp;
825 	struct rpc_xprt *xprt = req->rq_xprt;
826 
827 	if (task->tk_status != -ETIMEDOUT)
828 		return;
829 	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
830 
831 	spin_lock_bh(&xprt->transport_lock);
832 	if (!req->rq_reply_bytes_recvd) {
833 		if (xprt->ops->timer)
834 			xprt->ops->timer(task);
835 	} else
836 		task->tk_status = 0;
837 	spin_unlock_bh(&xprt->transport_lock);
838 }
839 
840 static inline int xprt_has_timer(struct rpc_xprt *xprt)
841 {
842 	return xprt->idle_timeout != 0;
843 }
844 
845 /**
846  * xprt_prepare_transmit - reserve the transport before sending a request
847  * @task: RPC task about to send a request
848  *
849  */
850 int xprt_prepare_transmit(struct rpc_task *task)
851 {
852 	struct rpc_rqst	*req = task->tk_rqstp;
853 	struct rpc_xprt	*xprt = req->rq_xprt;
854 	int err = 0;
855 
856 	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
857 
858 	spin_lock_bh(&xprt->transport_lock);
859 	if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
860 		err = req->rq_reply_bytes_recvd;
861 		goto out_unlock;
862 	}
863 	if (!xprt->ops->reserve_xprt(task))
864 		err = -EAGAIN;
865 out_unlock:
866 	spin_unlock_bh(&xprt->transport_lock);
867 	return err;
868 }
869 
870 void xprt_end_transmit(struct rpc_task *task)
871 {
872 	xprt_release_write(task->tk_rqstp->rq_xprt, task);
873 }
874 
875 /**
876  * xprt_transmit - send an RPC request on a transport
877  * @task: controlling RPC task
878  *
879  * We have to copy the iovec because sendmsg fiddles with its contents.
880  */
881 void xprt_transmit(struct rpc_task *task)
882 {
883 	struct rpc_rqst	*req = task->tk_rqstp;
884 	struct rpc_xprt	*xprt = req->rq_xprt;
885 	int status;
886 
887 	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
888 
889 	if (!req->rq_reply_bytes_recvd) {
890 		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
891 			/*
892 			 * Add to the list only if we're expecting a reply
893 			 */
894 			spin_lock_bh(&xprt->transport_lock);
895 			/* Update the softirq receive buffer */
896 			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
897 					sizeof(req->rq_private_buf));
898 			/* Add request to the receive list */
899 			list_add_tail(&req->rq_list, &xprt->recv);
900 			spin_unlock_bh(&xprt->transport_lock);
901 			xprt_reset_majortimeo(req);
902 			/* Turn off autodisconnect */
903 			del_singleshot_timer_sync(&xprt->timer);
904 		}
905 	} else if (!req->rq_bytes_sent)
906 		return;
907 
908 	req->rq_connect_cookie = xprt->connect_cookie;
909 	req->rq_xtime = ktime_get();
910 	status = xprt->ops->send_request(task);
911 	if (status != 0) {
912 		task->tk_status = status;
913 		return;
914 	}
915 
916 	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
917 	spin_lock_bh(&xprt->transport_lock);
918 
919 	xprt->ops->set_retrans_timeout(task);
920 
921 	xprt->stat.sends++;
922 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
923 	xprt->stat.bklog_u += xprt->backlog.qlen;
924 
925 	/* Don't race with disconnect */
926 	if (!xprt_connected(xprt))
927 		task->tk_status = -ENOTCONN;
928 	else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
929 		/*
930 		 * Sleep on the pending queue since
931 		 * we're expecting a reply.
932 		 */
933 		rpc_sleep_on(&xprt->pending, task, xprt_timer);
934 	}
935 	spin_unlock_bh(&xprt->transport_lock);
936 }
937 
938 static void xprt_alloc_slot(struct rpc_task *task)
939 {
940 	struct rpc_xprt	*xprt = task->tk_xprt;
941 
942 	task->tk_status = 0;
943 	if (task->tk_rqstp)
944 		return;
945 	if (!list_empty(&xprt->free)) {
946 		struct rpc_rqst	*req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
947 		list_del_init(&req->rq_list);
948 		task->tk_rqstp = req;
949 		xprt_request_init(task, xprt);
950 		return;
951 	}
952 	dprintk("RPC:       waiting for request slot\n");
953 	task->tk_status = -EAGAIN;
954 	task->tk_timeout = 0;
955 	rpc_sleep_on(&xprt->backlog, task, NULL);
956 }
957 
958 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
959 {
960 	memset(req, 0, sizeof(*req));	/* mark unused */
961 
962 	spin_lock(&xprt->reserve_lock);
963 	list_add(&req->rq_list, &xprt->free);
964 	rpc_wake_up_next(&xprt->backlog);
965 	spin_unlock(&xprt->reserve_lock);
966 }
967 
968 /**
969  * xprt_reserve - allocate an RPC request slot
970  * @task: RPC task requesting a slot allocation
971  *
972  * If no more slots are available, place the task on the transport's
973  * backlog queue.
974  */
975 void xprt_reserve(struct rpc_task *task)
976 {
977 	struct rpc_xprt	*xprt = task->tk_xprt;
978 
979 	task->tk_status = -EIO;
980 	spin_lock(&xprt->reserve_lock);
981 	xprt_alloc_slot(task);
982 	spin_unlock(&xprt->reserve_lock);
983 }
984 
985 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
986 {
987 	return xprt->xid++;
988 }
989 
990 static inline void xprt_init_xid(struct rpc_xprt *xprt)
991 {
992 	xprt->xid = net_random();
993 }
994 
995 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
996 {
997 	struct rpc_rqst	*req = task->tk_rqstp;
998 
999 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1000 	req->rq_task	= task;
1001 	req->rq_xprt    = xprt;
1002 	req->rq_buffer  = NULL;
1003 	req->rq_xid     = xprt_alloc_xid(xprt);
1004 	req->rq_release_snd_buf = NULL;
1005 	xprt_reset_majortimeo(req);
1006 	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1007 			req, ntohl(req->rq_xid));
1008 }
1009 
1010 /**
1011  * xprt_release - release an RPC request slot
1012  * @task: task which is finished with the slot
1013  *
1014  */
1015 void xprt_release(struct rpc_task *task)
1016 {
1017 	struct rpc_xprt	*xprt;
1018 	struct rpc_rqst	*req;
1019 
1020 	if (!(req = task->tk_rqstp))
1021 		return;
1022 
1023 	xprt = req->rq_xprt;
1024 	rpc_count_iostats(task);
1025 	spin_lock_bh(&xprt->transport_lock);
1026 	xprt->ops->release_xprt(xprt, task);
1027 	if (xprt->ops->release_request)
1028 		xprt->ops->release_request(task);
1029 	if (!list_empty(&req->rq_list))
1030 		list_del(&req->rq_list);
1031 	xprt->last_used = jiffies;
1032 	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1033 		mod_timer(&xprt->timer,
1034 				xprt->last_used + xprt->idle_timeout);
1035 	spin_unlock_bh(&xprt->transport_lock);
1036 	if (req->rq_buffer)
1037 		xprt->ops->buf_free(req->rq_buffer);
1038 	task->tk_rqstp = NULL;
1039 	if (req->rq_release_snd_buf)
1040 		req->rq_release_snd_buf(req);
1041 
1042 	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1043 	if (likely(!bc_prealloc(req)))
1044 		xprt_free_slot(xprt, req);
1045 	else
1046 		xprt_free_bc_request(req);
1047 }
1048 
1049 /**
1050  * xprt_create_transport - create an RPC transport
1051  * @args: rpc transport creation arguments
1052  *
1053  */
1054 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1055 {
1056 	struct rpc_xprt	*xprt;
1057 	struct rpc_rqst	*req;
1058 	struct xprt_class *t;
1059 
1060 	spin_lock(&xprt_list_lock);
1061 	list_for_each_entry(t, &xprt_list, list) {
1062 		if (t->ident == args->ident) {
1063 			spin_unlock(&xprt_list_lock);
1064 			goto found;
1065 		}
1066 	}
1067 	spin_unlock(&xprt_list_lock);
1068 	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
1069 	return ERR_PTR(-EIO);
1070 
1071 found:
1072 	xprt = t->setup(args);
1073 	if (IS_ERR(xprt)) {
1074 		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1075 				-PTR_ERR(xprt));
1076 		return xprt;
1077 	}
1078 
1079 	kref_init(&xprt->kref);
1080 	spin_lock_init(&xprt->transport_lock);
1081 	spin_lock_init(&xprt->reserve_lock);
1082 
1083 	INIT_LIST_HEAD(&xprt->free);
1084 	INIT_LIST_HEAD(&xprt->recv);
1085 #if defined(CONFIG_NFS_V4_1)
1086 	spin_lock_init(&xprt->bc_pa_lock);
1087 	INIT_LIST_HEAD(&xprt->bc_pa_list);
1088 #endif /* CONFIG_NFS_V4_1 */
1089 
1090 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1091 	if (xprt_has_timer(xprt))
1092 		setup_timer(&xprt->timer, xprt_init_autodisconnect,
1093 			    (unsigned long)xprt);
1094 	else
1095 		init_timer(&xprt->timer);
1096 	xprt->last_used = jiffies;
1097 	xprt->cwnd = RPC_INITCWND;
1098 	xprt->bind_index = 0;
1099 
1100 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1101 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1102 	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1103 	rpc_init_wait_queue(&xprt->resend, "xprt_resend");
1104 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1105 
1106 	/* initialize free list */
1107 	for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
1108 		list_add(&req->rq_list, &xprt->free);
1109 
1110 	xprt_init_xid(xprt);
1111 
1112 	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1113 			xprt->max_reqs);
1114 	return xprt;
1115 }
1116 
1117 /**
1118  * xprt_destroy - destroy an RPC transport, killing off all requests.
1119  * @kref: kref for the transport to destroy
1120  *
1121  */
1122 static void xprt_destroy(struct kref *kref)
1123 {
1124 	struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
1125 
1126 	dprintk("RPC:       destroying transport %p\n", xprt);
1127 	xprt->shutdown = 1;
1128 	del_timer_sync(&xprt->timer);
1129 
1130 	rpc_destroy_wait_queue(&xprt->binding);
1131 	rpc_destroy_wait_queue(&xprt->pending);
1132 	rpc_destroy_wait_queue(&xprt->sending);
1133 	rpc_destroy_wait_queue(&xprt->resend);
1134 	rpc_destroy_wait_queue(&xprt->backlog);
1135 	/*
1136 	 * Tear down transport state and free the rpc_xprt
1137 	 */
1138 	xprt->ops->destroy(xprt);
1139 }
1140 
1141 /**
1142  * xprt_put - release a reference to an RPC transport.
1143  * @xprt: pointer to the transport
1144  *
1145  */
1146 void xprt_put(struct rpc_xprt *xprt)
1147 {
1148 	kref_put(&xprt->kref, xprt_destroy);
1149 }
1150 
1151 /**
1152  * xprt_get - return a reference to an RPC transport.
1153  * @xprt: pointer to the transport
1154  *
1155  */
1156 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1157 {
1158 	kref_get(&xprt->kref);
1159 	return xprt;
1160 }
1161