xref: /linux/net/sunrpc/xprt.c (revision 6a0abce4c4cce0890e2c930b960b9a05c8c6e5da)
1 /*
2  *  linux/net/sunrpc/xprt.c
3  *
4  *  This is a generic RPC call interface supporting congestion avoidance,
5  *  and asynchronous calls.
6  *
7  *  The interface works like this:
8  *
9  *  -	When a process places a call, it allocates a request slot if
10  *	one is available. Otherwise, it sleeps on the backlog queue
11  *	(xprt_reserve).
12  *  -	Next, the caller puts together the RPC message, stuffs it into
13  *	the request struct, and calls xprt_transmit().
14  *  -	xprt_transmit sends the message and installs the caller on the
15  *	transport's wait list. At the same time, if a reply is expected,
16  *	it installs a timer that is run after the packet's timeout has
17  *	expired.
18  *  -	When a packet arrives, the data_ready handler walks the list of
19  *	pending requests for that transport. If a matching XID is found, the
20  *	caller is woken up, and the timer removed.
21  *  -	When no reply arrives within the timeout interval, the timer is
22  *	fired by the kernel and runs xprt_timer(). It either adjusts the
23  *	timeout values (minor timeout) or wakes up the caller with a status
24  *	of -ETIMEDOUT.
25  *  -	When the caller receives a notification from RPC that a reply arrived,
26  *	it should release the RPC slot, and process the reply.
27  *	If the call timed out, it may choose to retry the operation by
28  *	adjusting the initial timeout value, and simply calling rpc_call
29  *	again.
30  *
31  *  Support for async RPC is done through a set of RPC-specific scheduling
32  *  primitives that `transparently' work for processes as well as async
33  *  tasks that rely on callbacks.
34  *
35  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36  *
37  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
38  */
39 
40 #include <linux/module.h>
41 
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
47 
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51 #include <linux/rcupdate.h>
52 
53 #include <trace/events/sunrpc.h>
54 
55 #include "sunrpc.h"
56 
57 /*
58  * Local variables
59  */
60 
61 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
62 # define RPCDBG_FACILITY	RPCDBG_XPRT
63 #endif
64 
65 /*
66  * Local functions
67  */
68 static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
69 static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
70 static void	 xprt_destroy(struct rpc_xprt *xprt);
71 
72 static DEFINE_SPINLOCK(xprt_list_lock);
73 static LIST_HEAD(xprt_list);
74 
75 /**
76  * xprt_register_transport - register a transport implementation
77  * @transport: transport to register
78  *
79  * If a transport implementation is loaded as a kernel module, it can
80  * call this interface to make itself known to the RPC client.
81  *
82  * Returns:
83  * 0:		transport successfully registered
84  * -EEXIST:	transport already registered
85  * -EINVAL:	transport module being unloaded
86  */
87 int xprt_register_transport(struct xprt_class *transport)
88 {
89 	struct xprt_class *t;
90 	int result;
91 
92 	result = -EEXIST;
93 	spin_lock(&xprt_list_lock);
94 	list_for_each_entry(t, &xprt_list, list) {
95 		/* don't register the same transport class twice */
96 		if (t->ident == transport->ident)
97 			goto out;
98 	}
99 
100 	list_add_tail(&transport->list, &xprt_list);
101 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
102 	       transport->name);
103 	result = 0;
104 
105 out:
106 	spin_unlock(&xprt_list_lock);
107 	return result;
108 }
109 EXPORT_SYMBOL_GPL(xprt_register_transport);
110 
111 /**
112  * xprt_unregister_transport - unregister a transport implementation
113  * @transport: transport to unregister
114  *
115  * Returns:
116  * 0:		transport successfully unregistered
117  * -ENOENT:	transport never registered
118  */
119 int xprt_unregister_transport(struct xprt_class *transport)
120 {
121 	struct xprt_class *t;
122 	int result;
123 
124 	result = 0;
125 	spin_lock(&xprt_list_lock);
126 	list_for_each_entry(t, &xprt_list, list) {
127 		if (t == transport) {
128 			printk(KERN_INFO
129 				"RPC: Unregistered %s transport module.\n",
130 				transport->name);
131 			list_del_init(&transport->list);
132 			goto out;
133 		}
134 	}
135 	result = -ENOENT;
136 
137 out:
138 	spin_unlock(&xprt_list_lock);
139 	return result;
140 }
141 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
142 
143 /**
144  * xprt_load_transport - load a transport implementation
145  * @transport_name: transport to load
146  *
147  * Returns:
148  * 0:		transport successfully loaded
149  * -ENOENT:	transport module not available
150  */
151 int xprt_load_transport(const char *transport_name)
152 {
153 	struct xprt_class *t;
154 	int result;
155 
156 	result = 0;
157 	spin_lock(&xprt_list_lock);
158 	list_for_each_entry(t, &xprt_list, list) {
159 		if (strcmp(t->name, transport_name) == 0) {
160 			spin_unlock(&xprt_list_lock);
161 			goto out;
162 		}
163 	}
164 	spin_unlock(&xprt_list_lock);
165 	result = request_module("xprt%s", transport_name);
166 out:
167 	return result;
168 }
169 EXPORT_SYMBOL_GPL(xprt_load_transport);
170 
171 static void xprt_clear_locked(struct rpc_xprt *xprt)
172 {
173 	xprt->snd_task = NULL;
174 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
175 		smp_mb__before_atomic();
176 		clear_bit(XPRT_LOCKED, &xprt->state);
177 		smp_mb__after_atomic();
178 	} else
179 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
180 }
181 
182 /**
183  * xprt_reserve_xprt - serialize write access to transports
184  * @task: task that is requesting access to the transport
185  * @xprt: pointer to the target transport
186  *
187  * This prevents mixing the payload of separate requests, and prevents
188  * transport connects from colliding with writes.  No congestion control
189  * is provided.
190  */
191 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
192 {
193 	struct rpc_rqst *req = task->tk_rqstp;
194 
195 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
196 		if (task == xprt->snd_task)
197 			return 1;
198 		goto out_sleep;
199 	}
200 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
201 		goto out_unlock;
202 	xprt->snd_task = task;
203 
204 	return 1;
205 
206 out_unlock:
207 	xprt_clear_locked(xprt);
208 out_sleep:
209 	dprintk("RPC: %5u failed to lock transport %p\n",
210 			task->tk_pid, xprt);
211 	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
212 	task->tk_status = -EAGAIN;
213 	rpc_sleep_on(&xprt->sending, task, NULL);
214 	return 0;
215 }
216 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
217 
218 static bool
219 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
220 {
221 	return test_bit(XPRT_CWND_WAIT, &xprt->state);
222 }
223 
224 static void
225 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
226 {
227 	if (!list_empty(&xprt->xmit_queue)) {
228 		/* Peek at head of queue to see if it can make progress */
229 		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
230 					rq_xmit)->rq_cong)
231 			return;
232 	}
233 	set_bit(XPRT_CWND_WAIT, &xprt->state);
234 }
235 
236 static void
237 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
238 {
239 	if (!RPCXPRT_CONGESTED(xprt))
240 		clear_bit(XPRT_CWND_WAIT, &xprt->state);
241 }
242 
243 /*
244  * xprt_reserve_xprt_cong - serialize write access to transports
245  * @task: task that is requesting access to the transport
246  *
247  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
248  * integrated into the decision of whether a request is allowed to be
249  * woken up and given access to the transport.
250  * Note that the lock is only granted if we know there are free slots.
251  */
252 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
253 {
254 	struct rpc_rqst *req = task->tk_rqstp;
255 
256 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
257 		if (task == xprt->snd_task)
258 			return 1;
259 		goto out_sleep;
260 	}
261 	if (req == NULL) {
262 		xprt->snd_task = task;
263 		return 1;
264 	}
265 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
266 		goto out_unlock;
267 	if (!xprt_need_congestion_window_wait(xprt)) {
268 		xprt->snd_task = task;
269 		return 1;
270 	}
271 out_unlock:
272 	xprt_clear_locked(xprt);
273 out_sleep:
274 	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
275 	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
276 	task->tk_status = -EAGAIN;
277 	rpc_sleep_on(&xprt->sending, task, NULL);
278 	return 0;
279 }
280 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
281 
282 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
283 {
284 	int retval;
285 
286 	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
287 		return 1;
288 	spin_lock_bh(&xprt->transport_lock);
289 	retval = xprt->ops->reserve_xprt(xprt, task);
290 	spin_unlock_bh(&xprt->transport_lock);
291 	return retval;
292 }
293 
294 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
295 {
296 	struct rpc_xprt *xprt = data;
297 
298 	xprt->snd_task = task;
299 	return true;
300 }
301 
302 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
303 {
304 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
305 		return;
306 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
307 		goto out_unlock;
308 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
309 				__xprt_lock_write_func, xprt))
310 		return;
311 out_unlock:
312 	xprt_clear_locked(xprt);
313 }
314 
315 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
316 {
317 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
318 		return;
319 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
320 		goto out_unlock;
321 	if (xprt_need_congestion_window_wait(xprt))
322 		goto out_unlock;
323 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
324 				__xprt_lock_write_func, xprt))
325 		return;
326 out_unlock:
327 	xprt_clear_locked(xprt);
328 }
329 
330 /**
331  * xprt_release_xprt - allow other requests to use a transport
332  * @xprt: transport with other tasks potentially waiting
333  * @task: task that is releasing access to the transport
334  *
335  * Note that "task" can be NULL.  No congestion control is provided.
336  */
337 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
338 {
339 	if (xprt->snd_task == task) {
340 		xprt_clear_locked(xprt);
341 		__xprt_lock_write_next(xprt);
342 	}
343 }
344 EXPORT_SYMBOL_GPL(xprt_release_xprt);
345 
346 /**
347  * xprt_release_xprt_cong - allow other requests to use a transport
348  * @xprt: transport with other tasks potentially waiting
349  * @task: task that is releasing access to the transport
350  *
351  * Note that "task" can be NULL.  Another task is awoken to use the
352  * transport if the transport's congestion window allows it.
353  */
354 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
355 {
356 	if (xprt->snd_task == task) {
357 		xprt_clear_locked(xprt);
358 		__xprt_lock_write_next_cong(xprt);
359 	}
360 }
361 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
362 
363 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
364 {
365 	if (xprt->snd_task != task)
366 		return;
367 	spin_lock_bh(&xprt->transport_lock);
368 	xprt->ops->release_xprt(xprt, task);
369 	spin_unlock_bh(&xprt->transport_lock);
370 }
371 
372 /*
373  * Van Jacobson congestion avoidance. Check if the congestion window
374  * overflowed. Put the task to sleep if this is the case.
375  */
376 static int
377 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
378 {
379 	if (req->rq_cong)
380 		return 1;
381 	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
382 			req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
383 	if (RPCXPRT_CONGESTED(xprt)) {
384 		xprt_set_congestion_window_wait(xprt);
385 		return 0;
386 	}
387 	req->rq_cong = 1;
388 	xprt->cong += RPC_CWNDSCALE;
389 	return 1;
390 }
391 
392 /*
393  * Adjust the congestion window, and wake up the next task
394  * that has been sleeping due to congestion
395  */
396 static void
397 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
398 {
399 	if (!req->rq_cong)
400 		return;
401 	req->rq_cong = 0;
402 	xprt->cong -= RPC_CWNDSCALE;
403 	xprt_test_and_clear_congestion_window_wait(xprt);
404 	__xprt_lock_write_next_cong(xprt);
405 }
406 
407 /**
408  * xprt_request_get_cong - Request congestion control credits
409  * @xprt: pointer to transport
410  * @req: pointer to RPC request
411  *
412  * Useful for transports that require congestion control.
413  */
414 bool
415 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
416 {
417 	bool ret = false;
418 
419 	if (req->rq_cong)
420 		return true;
421 	spin_lock_bh(&xprt->transport_lock);
422 	ret = __xprt_get_cong(xprt, req) != 0;
423 	spin_unlock_bh(&xprt->transport_lock);
424 	return ret;
425 }
426 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
427 
428 /**
429  * xprt_release_rqst_cong - housekeeping when request is complete
430  * @task: RPC request that recently completed
431  *
432  * Useful for transports that require congestion control.
433  */
434 void xprt_release_rqst_cong(struct rpc_task *task)
435 {
436 	struct rpc_rqst *req = task->tk_rqstp;
437 
438 	__xprt_put_cong(req->rq_xprt, req);
439 }
440 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
441 
442 /*
443  * Clear the congestion window wait flag and wake up the next
444  * entry on xprt->sending
445  */
446 static void
447 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
448 {
449 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
450 		spin_lock_bh(&xprt->transport_lock);
451 		__xprt_lock_write_next_cong(xprt);
452 		spin_unlock_bh(&xprt->transport_lock);
453 	}
454 }
455 
456 /**
457  * xprt_adjust_cwnd - adjust transport congestion window
458  * @xprt: pointer to xprt
459  * @task: recently completed RPC request used to adjust window
460  * @result: result code of completed RPC request
461  *
462  * The transport code maintains an estimate on the maximum number of out-
463  * standing RPC requests, using a smoothed version of the congestion
464  * avoidance implemented in 44BSD. This is basically the Van Jacobson
465  * congestion algorithm: If a retransmit occurs, the congestion window is
466  * halved; otherwise, it is incremented by 1/cwnd when
467  *
468  *	-	a reply is received and
469  *	-	a full number of requests are outstanding and
470  *	-	the congestion window hasn't been updated recently.
471  */
472 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
473 {
474 	struct rpc_rqst *req = task->tk_rqstp;
475 	unsigned long cwnd = xprt->cwnd;
476 
477 	if (result >= 0 && cwnd <= xprt->cong) {
478 		/* The (cwnd >> 1) term makes sure
479 		 * the result gets rounded properly. */
480 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
481 		if (cwnd > RPC_MAXCWND(xprt))
482 			cwnd = RPC_MAXCWND(xprt);
483 		__xprt_lock_write_next_cong(xprt);
484 	} else if (result == -ETIMEDOUT) {
485 		cwnd >>= 1;
486 		if (cwnd < RPC_CWNDSCALE)
487 			cwnd = RPC_CWNDSCALE;
488 	}
489 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
490 			xprt->cong, xprt->cwnd, cwnd);
491 	xprt->cwnd = cwnd;
492 	__xprt_put_cong(xprt, req);
493 }
494 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
495 
496 /**
497  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
498  * @xprt: transport with waiting tasks
499  * @status: result code to plant in each task before waking it
500  *
501  */
502 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
503 {
504 	if (status < 0)
505 		rpc_wake_up_status(&xprt->pending, status);
506 	else
507 		rpc_wake_up(&xprt->pending);
508 }
509 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
510 
511 /**
512  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
513  * @xprt: transport
514  *
515  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
516  * we don't in general want to force a socket disconnection due to
517  * an incomplete RPC call transmission.
518  */
519 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
520 {
521 	set_bit(XPRT_WRITE_SPACE, &xprt->state);
522 }
523 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
524 
525 static bool
526 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
527 {
528 	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
529 		__xprt_lock_write_next(xprt);
530 		dprintk("RPC:       write space: waking waiting task on "
531 				"xprt %p\n", xprt);
532 		return true;
533 	}
534 	return false;
535 }
536 
537 /**
538  * xprt_write_space - wake the task waiting for transport output buffer space
539  * @xprt: transport with waiting tasks
540  *
541  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
542  */
543 bool xprt_write_space(struct rpc_xprt *xprt)
544 {
545 	bool ret;
546 
547 	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
548 		return false;
549 	spin_lock_bh(&xprt->transport_lock);
550 	ret = xprt_clear_write_space_locked(xprt);
551 	spin_unlock_bh(&xprt->transport_lock);
552 	return ret;
553 }
554 EXPORT_SYMBOL_GPL(xprt_write_space);
555 
556 /**
557  * xprt_set_retrans_timeout_def - set a request's retransmit timeout
558  * @task: task whose timeout is to be set
559  *
560  * Set a request's retransmit timeout based on the transport's
561  * default timeout parameters.  Used by transports that don't adjust
562  * the retransmit timeout based on round-trip time estimation.
563  */
564 void xprt_set_retrans_timeout_def(struct rpc_task *task)
565 {
566 	task->tk_timeout = task->tk_rqstp->rq_timeout;
567 }
568 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
569 
570 /**
571  * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
572  * @task: task whose timeout is to be set
573  *
574  * Set a request's retransmit timeout using the RTT estimator.
575  */
576 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
577 {
578 	int timer = task->tk_msg.rpc_proc->p_timer;
579 	struct rpc_clnt *clnt = task->tk_client;
580 	struct rpc_rtt *rtt = clnt->cl_rtt;
581 	struct rpc_rqst *req = task->tk_rqstp;
582 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
583 
584 	task->tk_timeout = rpc_calc_rto(rtt, timer);
585 	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
586 	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
587 		task->tk_timeout = max_timeout;
588 }
589 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
590 
591 static void xprt_reset_majortimeo(struct rpc_rqst *req)
592 {
593 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
594 
595 	req->rq_majortimeo = req->rq_timeout;
596 	if (to->to_exponential)
597 		req->rq_majortimeo <<= to->to_retries;
598 	else
599 		req->rq_majortimeo += to->to_increment * to->to_retries;
600 	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
601 		req->rq_majortimeo = to->to_maxval;
602 	req->rq_majortimeo += jiffies;
603 }
604 
605 /**
606  * xprt_adjust_timeout - adjust timeout values for next retransmit
607  * @req: RPC request containing parameters to use for the adjustment
608  *
609  */
610 int xprt_adjust_timeout(struct rpc_rqst *req)
611 {
612 	struct rpc_xprt *xprt = req->rq_xprt;
613 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
614 	int status = 0;
615 
616 	if (time_before(jiffies, req->rq_majortimeo)) {
617 		if (to->to_exponential)
618 			req->rq_timeout <<= 1;
619 		else
620 			req->rq_timeout += to->to_increment;
621 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
622 			req->rq_timeout = to->to_maxval;
623 		req->rq_retries++;
624 	} else {
625 		req->rq_timeout = to->to_initval;
626 		req->rq_retries = 0;
627 		xprt_reset_majortimeo(req);
628 		/* Reset the RTT counters == "slow start" */
629 		spin_lock_bh(&xprt->transport_lock);
630 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
631 		spin_unlock_bh(&xprt->transport_lock);
632 		status = -ETIMEDOUT;
633 	}
634 
635 	if (req->rq_timeout == 0) {
636 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
637 		req->rq_timeout = 5 * HZ;
638 	}
639 	return status;
640 }
641 
642 static void xprt_autoclose(struct work_struct *work)
643 {
644 	struct rpc_xprt *xprt =
645 		container_of(work, struct rpc_xprt, task_cleanup);
646 
647 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
648 	xprt->ops->close(xprt);
649 	xprt_release_write(xprt, NULL);
650 	wake_up_bit(&xprt->state, XPRT_LOCKED);
651 }
652 
653 /**
654  * xprt_disconnect_done - mark a transport as disconnected
655  * @xprt: transport to flag for disconnect
656  *
657  */
658 void xprt_disconnect_done(struct rpc_xprt *xprt)
659 {
660 	dprintk("RPC:       disconnected transport %p\n", xprt);
661 	spin_lock_bh(&xprt->transport_lock);
662 	xprt_clear_connected(xprt);
663 	xprt_clear_write_space_locked(xprt);
664 	xprt_wake_pending_tasks(xprt, -EAGAIN);
665 	spin_unlock_bh(&xprt->transport_lock);
666 }
667 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
668 
669 /**
670  * xprt_force_disconnect - force a transport to disconnect
671  * @xprt: transport to disconnect
672  *
673  */
674 void xprt_force_disconnect(struct rpc_xprt *xprt)
675 {
676 	/* Don't race with the test_bit() in xprt_clear_locked() */
677 	spin_lock_bh(&xprt->transport_lock);
678 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
679 	/* Try to schedule an autoclose RPC call */
680 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
681 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
682 	else if (xprt->snd_task)
683 		rpc_wake_up_queued_task_set_status(&xprt->pending,
684 				xprt->snd_task, -ENOTCONN);
685 	spin_unlock_bh(&xprt->transport_lock);
686 }
687 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
688 
689 static unsigned int
690 xprt_connect_cookie(struct rpc_xprt *xprt)
691 {
692 	return READ_ONCE(xprt->connect_cookie);
693 }
694 
695 static bool
696 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
697 {
698 	struct rpc_rqst *req = task->tk_rqstp;
699 	struct rpc_xprt *xprt = req->rq_xprt;
700 
701 	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
702 		!xprt_connected(xprt);
703 }
704 
705 /**
706  * xprt_conditional_disconnect - force a transport to disconnect
707  * @xprt: transport to disconnect
708  * @cookie: 'connection cookie'
709  *
710  * This attempts to break the connection if and only if 'cookie' matches
711  * the current transport 'connection cookie'. It ensures that we don't
712  * try to break the connection more than once when we need to retransmit
713  * a batch of RPC requests.
714  *
715  */
716 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
717 {
718 	/* Don't race with the test_bit() in xprt_clear_locked() */
719 	spin_lock_bh(&xprt->transport_lock);
720 	if (cookie != xprt->connect_cookie)
721 		goto out;
722 	if (test_bit(XPRT_CLOSING, &xprt->state))
723 		goto out;
724 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
725 	/* Try to schedule an autoclose RPC call */
726 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
727 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
728 	xprt_wake_pending_tasks(xprt, -EAGAIN);
729 out:
730 	spin_unlock_bh(&xprt->transport_lock);
731 }
732 
733 static bool
734 xprt_has_timer(const struct rpc_xprt *xprt)
735 {
736 	return xprt->idle_timeout != 0;
737 }
738 
739 static void
740 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
741 	__must_hold(&xprt->transport_lock)
742 {
743 	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
744 		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
745 }
746 
747 static void
748 xprt_init_autodisconnect(struct timer_list *t)
749 {
750 	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
751 
752 	spin_lock(&xprt->transport_lock);
753 	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
754 		goto out_abort;
755 	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
756 	xprt->last_used = jiffies;
757 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
758 		goto out_abort;
759 	spin_unlock(&xprt->transport_lock);
760 	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
761 	return;
762 out_abort:
763 	spin_unlock(&xprt->transport_lock);
764 }
765 
766 bool xprt_lock_connect(struct rpc_xprt *xprt,
767 		struct rpc_task *task,
768 		void *cookie)
769 {
770 	bool ret = false;
771 
772 	spin_lock_bh(&xprt->transport_lock);
773 	if (!test_bit(XPRT_LOCKED, &xprt->state))
774 		goto out;
775 	if (xprt->snd_task != task)
776 		goto out;
777 	xprt->snd_task = cookie;
778 	ret = true;
779 out:
780 	spin_unlock_bh(&xprt->transport_lock);
781 	return ret;
782 }
783 
784 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
785 {
786 	spin_lock_bh(&xprt->transport_lock);
787 	if (xprt->snd_task != cookie)
788 		goto out;
789 	if (!test_bit(XPRT_LOCKED, &xprt->state))
790 		goto out;
791 	xprt->snd_task =NULL;
792 	xprt->ops->release_xprt(xprt, NULL);
793 	xprt_schedule_autodisconnect(xprt);
794 out:
795 	spin_unlock_bh(&xprt->transport_lock);
796 	wake_up_bit(&xprt->state, XPRT_LOCKED);
797 }
798 
799 /**
800  * xprt_connect - schedule a transport connect operation
801  * @task: RPC task that is requesting the connect
802  *
803  */
804 void xprt_connect(struct rpc_task *task)
805 {
806 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
807 
808 	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
809 			xprt, (xprt_connected(xprt) ? "is" : "is not"));
810 
811 	if (!xprt_bound(xprt)) {
812 		task->tk_status = -EAGAIN;
813 		return;
814 	}
815 	if (!xprt_lock_write(xprt, task))
816 		return;
817 
818 	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
819 		xprt->ops->close(xprt);
820 
821 	if (!xprt_connected(xprt)) {
822 		task->tk_timeout = task->tk_rqstp->rq_timeout;
823 		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
824 		rpc_sleep_on(&xprt->pending, task, NULL);
825 
826 		if (test_bit(XPRT_CLOSING, &xprt->state))
827 			return;
828 		if (xprt_test_and_set_connecting(xprt))
829 			return;
830 		/* Race breaker */
831 		if (!xprt_connected(xprt)) {
832 			xprt->stat.connect_start = jiffies;
833 			xprt->ops->connect(xprt, task);
834 		} else {
835 			xprt_clear_connecting(xprt);
836 			task->tk_status = 0;
837 			rpc_wake_up_queued_task(&xprt->pending, task);
838 		}
839 	}
840 	xprt_release_write(xprt, task);
841 }
842 
843 enum xprt_xid_rb_cmp {
844 	XID_RB_EQUAL,
845 	XID_RB_LEFT,
846 	XID_RB_RIGHT,
847 };
848 static enum xprt_xid_rb_cmp
849 xprt_xid_cmp(__be32 xid1, __be32 xid2)
850 {
851 	if (xid1 == xid2)
852 		return XID_RB_EQUAL;
853 	if ((__force u32)xid1 < (__force u32)xid2)
854 		return XID_RB_LEFT;
855 	return XID_RB_RIGHT;
856 }
857 
858 static struct rpc_rqst *
859 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
860 {
861 	struct rb_node *n = xprt->recv_queue.rb_node;
862 	struct rpc_rqst *req;
863 
864 	while (n != NULL) {
865 		req = rb_entry(n, struct rpc_rqst, rq_recv);
866 		switch (xprt_xid_cmp(xid, req->rq_xid)) {
867 		case XID_RB_LEFT:
868 			n = n->rb_left;
869 			break;
870 		case XID_RB_RIGHT:
871 			n = n->rb_right;
872 			break;
873 		case XID_RB_EQUAL:
874 			return req;
875 		}
876 	}
877 	return NULL;
878 }
879 
880 static void
881 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
882 {
883 	struct rb_node **p = &xprt->recv_queue.rb_node;
884 	struct rb_node *n = NULL;
885 	struct rpc_rqst *req;
886 
887 	while (*p != NULL) {
888 		n = *p;
889 		req = rb_entry(n, struct rpc_rqst, rq_recv);
890 		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
891 		case XID_RB_LEFT:
892 			p = &n->rb_left;
893 			break;
894 		case XID_RB_RIGHT:
895 			p = &n->rb_right;
896 			break;
897 		case XID_RB_EQUAL:
898 			WARN_ON_ONCE(new != req);
899 			return;
900 		}
901 	}
902 	rb_link_node(&new->rq_recv, n, p);
903 	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
904 }
905 
906 static void
907 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
908 {
909 	rb_erase(&req->rq_recv, &xprt->recv_queue);
910 }
911 
912 /**
913  * xprt_lookup_rqst - find an RPC request corresponding to an XID
914  * @xprt: transport on which the original request was transmitted
915  * @xid: RPC XID of incoming reply
916  *
917  * Caller holds xprt->queue_lock.
918  */
919 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
920 {
921 	struct rpc_rqst *entry;
922 
923 	entry = xprt_request_rb_find(xprt, xid);
924 	if (entry != NULL) {
925 		trace_xprt_lookup_rqst(xprt, xid, 0);
926 		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
927 		return entry;
928 	}
929 
930 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
931 			ntohl(xid));
932 	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
933 	xprt->stat.bad_xids++;
934 	return NULL;
935 }
936 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
937 
938 static bool
939 xprt_is_pinned_rqst(struct rpc_rqst *req)
940 {
941 	return atomic_read(&req->rq_pin) != 0;
942 }
943 
944 /**
945  * xprt_pin_rqst - Pin a request on the transport receive list
946  * @req: Request to pin
947  *
948  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
949  * so should be holding the xprt receive lock.
950  */
951 void xprt_pin_rqst(struct rpc_rqst *req)
952 {
953 	atomic_inc(&req->rq_pin);
954 }
955 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
956 
957 /**
958  * xprt_unpin_rqst - Unpin a request on the transport receive list
959  * @req: Request to pin
960  *
961  * Caller should be holding the xprt receive lock.
962  */
963 void xprt_unpin_rqst(struct rpc_rqst *req)
964 {
965 	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
966 		atomic_dec(&req->rq_pin);
967 		return;
968 	}
969 	if (atomic_dec_and_test(&req->rq_pin))
970 		wake_up_var(&req->rq_pin);
971 }
972 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
973 
974 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
975 {
976 	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
977 }
978 
979 static bool
980 xprt_request_data_received(struct rpc_task *task)
981 {
982 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
983 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
984 }
985 
986 static bool
987 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
988 {
989 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
990 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
991 }
992 
993 /**
994  * xprt_request_enqueue_receive - Add an request to the receive queue
995  * @task: RPC task
996  *
997  */
998 void
999 xprt_request_enqueue_receive(struct rpc_task *task)
1000 {
1001 	struct rpc_rqst *req = task->tk_rqstp;
1002 	struct rpc_xprt *xprt = req->rq_xprt;
1003 
1004 	if (!xprt_request_need_enqueue_receive(task, req))
1005 		return;
1006 	spin_lock(&xprt->queue_lock);
1007 
1008 	/* Update the softirq receive buffer */
1009 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1010 			sizeof(req->rq_private_buf));
1011 
1012 	/* Add request to the receive list */
1013 	xprt_request_rb_insert(xprt, req);
1014 	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1015 	spin_unlock(&xprt->queue_lock);
1016 
1017 	xprt_reset_majortimeo(req);
1018 	/* Turn off autodisconnect */
1019 	del_singleshot_timer_sync(&xprt->timer);
1020 }
1021 
1022 /**
1023  * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1024  * @task: RPC task
1025  *
1026  * Caller must hold xprt->queue_lock.
1027  */
1028 static void
1029 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1030 {
1031 	struct rpc_rqst *req = task->tk_rqstp;
1032 
1033 	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1034 		xprt_request_rb_remove(req->rq_xprt, req);
1035 }
1036 
1037 /**
1038  * xprt_update_rtt - Update RPC RTT statistics
1039  * @task: RPC request that recently completed
1040  *
1041  * Caller holds xprt->queue_lock.
1042  */
1043 void xprt_update_rtt(struct rpc_task *task)
1044 {
1045 	struct rpc_rqst *req = task->tk_rqstp;
1046 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1047 	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1048 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1049 
1050 	if (timer) {
1051 		if (req->rq_ntrans == 1)
1052 			rpc_update_rtt(rtt, timer, m);
1053 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1054 	}
1055 }
1056 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1057 
1058 /**
1059  * xprt_complete_rqst - called when reply processing is complete
1060  * @task: RPC request that recently completed
1061  * @copied: actual number of bytes received from the transport
1062  *
1063  * Caller holds xprt->queue_lock.
1064  */
1065 void xprt_complete_rqst(struct rpc_task *task, int copied)
1066 {
1067 	struct rpc_rqst *req = task->tk_rqstp;
1068 	struct rpc_xprt *xprt = req->rq_xprt;
1069 
1070 	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
1071 			task->tk_pid, ntohl(req->rq_xid), copied);
1072 	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1073 
1074 	xprt->stat.recvs++;
1075 
1076 	req->rq_private_buf.len = copied;
1077 	/* Ensure all writes are done before we update */
1078 	/* req->rq_reply_bytes_recvd */
1079 	smp_wmb();
1080 	req->rq_reply_bytes_recvd = copied;
1081 	xprt_request_dequeue_receive_locked(task);
1082 	rpc_wake_up_queued_task(&xprt->pending, task);
1083 }
1084 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1085 
1086 static void xprt_timer(struct rpc_task *task)
1087 {
1088 	struct rpc_rqst *req = task->tk_rqstp;
1089 	struct rpc_xprt *xprt = req->rq_xprt;
1090 
1091 	if (task->tk_status != -ETIMEDOUT)
1092 		return;
1093 
1094 	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1095 	if (!req->rq_reply_bytes_recvd) {
1096 		if (xprt->ops->timer)
1097 			xprt->ops->timer(xprt, task);
1098 	} else
1099 		task->tk_status = 0;
1100 }
1101 
1102 /**
1103  * xprt_request_wait_receive - wait for the reply to an RPC request
1104  * @task: RPC task about to send a request
1105  *
1106  */
1107 void xprt_request_wait_receive(struct rpc_task *task)
1108 {
1109 	struct rpc_rqst *req = task->tk_rqstp;
1110 	struct rpc_xprt *xprt = req->rq_xprt;
1111 
1112 	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1113 		return;
1114 	/*
1115 	 * Sleep on the pending queue if we're expecting a reply.
1116 	 * The spinlock ensures atomicity between the test of
1117 	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1118 	 */
1119 	spin_lock(&xprt->queue_lock);
1120 	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1121 		xprt->ops->set_retrans_timeout(task);
1122 		rpc_sleep_on(&xprt->pending, task, xprt_timer);
1123 		/*
1124 		 * Send an extra queue wakeup call if the
1125 		 * connection was dropped in case the call to
1126 		 * rpc_sleep_on() raced.
1127 		 */
1128 		if (xprt_request_retransmit_after_disconnect(task))
1129 			rpc_wake_up_queued_task_set_status(&xprt->pending,
1130 					task, -ENOTCONN);
1131 	}
1132 	spin_unlock(&xprt->queue_lock);
1133 }
1134 
1135 static bool
1136 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1137 {
1138 	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1139 }
1140 
1141 /**
1142  * xprt_request_enqueue_transmit - queue a task for transmission
1143  * @task: pointer to rpc_task
1144  *
1145  * Add a task to the transmission queue.
1146  */
1147 void
1148 xprt_request_enqueue_transmit(struct rpc_task *task)
1149 {
1150 	struct rpc_rqst *pos, *req = task->tk_rqstp;
1151 	struct rpc_xprt *xprt = req->rq_xprt;
1152 
1153 	if (xprt_request_need_enqueue_transmit(task, req)) {
1154 		spin_lock(&xprt->queue_lock);
1155 		/*
1156 		 * Requests that carry congestion control credits are added
1157 		 * to the head of the list to avoid starvation issues.
1158 		 */
1159 		if (req->rq_cong) {
1160 			xprt_clear_congestion_window_wait(xprt);
1161 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1162 				if (pos->rq_cong)
1163 					continue;
1164 				/* Note: req is added _before_ pos */
1165 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1166 				INIT_LIST_HEAD(&req->rq_xmit2);
1167 				goto out;
1168 			}
1169 		} else if (RPC_IS_SWAPPER(task)) {
1170 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1171 				if (pos->rq_cong || pos->rq_bytes_sent)
1172 					continue;
1173 				if (RPC_IS_SWAPPER(pos->rq_task))
1174 					continue;
1175 				/* Note: req is added _before_ pos */
1176 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1177 				INIT_LIST_HEAD(&req->rq_xmit2);
1178 				goto out;
1179 			}
1180 		} else {
1181 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1182 				if (pos->rq_task->tk_owner != task->tk_owner)
1183 					continue;
1184 				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1185 				INIT_LIST_HEAD(&req->rq_xmit);
1186 				goto out;
1187 			}
1188 		}
1189 		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1190 		INIT_LIST_HEAD(&req->rq_xmit2);
1191 out:
1192 		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1193 		spin_unlock(&xprt->queue_lock);
1194 	}
1195 }
1196 
1197 /**
1198  * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1199  * @task: pointer to rpc_task
1200  *
1201  * Remove a task from the transmission queue
1202  * Caller must hold xprt->queue_lock
1203  */
1204 static void
1205 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1206 {
1207 	struct rpc_rqst *req = task->tk_rqstp;
1208 
1209 	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1210 		return;
1211 	if (!list_empty(&req->rq_xmit)) {
1212 		list_del(&req->rq_xmit);
1213 		if (!list_empty(&req->rq_xmit2)) {
1214 			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1215 					struct rpc_rqst, rq_xmit2);
1216 			list_del(&req->rq_xmit2);
1217 			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1218 		}
1219 	} else
1220 		list_del(&req->rq_xmit2);
1221 }
1222 
1223 /**
1224  * xprt_request_dequeue_transmit - remove a task from the transmission queue
1225  * @task: pointer to rpc_task
1226  *
1227  * Remove a task from the transmission queue
1228  */
1229 static void
1230 xprt_request_dequeue_transmit(struct rpc_task *task)
1231 {
1232 	struct rpc_rqst *req = task->tk_rqstp;
1233 	struct rpc_xprt *xprt = req->rq_xprt;
1234 
1235 	spin_lock(&xprt->queue_lock);
1236 	xprt_request_dequeue_transmit_locked(task);
1237 	spin_unlock(&xprt->queue_lock);
1238 }
1239 
1240 /**
1241  * xprt_request_prepare - prepare an encoded request for transport
1242  * @req: pointer to rpc_rqst
1243  *
1244  * Calls into the transport layer to do whatever is needed to prepare
1245  * the request for transmission or receive.
1246  */
1247 void
1248 xprt_request_prepare(struct rpc_rqst *req)
1249 {
1250 	struct rpc_xprt *xprt = req->rq_xprt;
1251 
1252 	if (xprt->ops->prepare_request)
1253 		xprt->ops->prepare_request(req);
1254 }
1255 
1256 /**
1257  * xprt_request_need_retransmit - Test if a task needs retransmission
1258  * @task: pointer to rpc_task
1259  *
1260  * Test for whether a connection breakage requires the task to retransmit
1261  */
1262 bool
1263 xprt_request_need_retransmit(struct rpc_task *task)
1264 {
1265 	return xprt_request_retransmit_after_disconnect(task);
1266 }
1267 
1268 /**
1269  * xprt_prepare_transmit - reserve the transport before sending a request
1270  * @task: RPC task about to send a request
1271  *
1272  */
1273 bool xprt_prepare_transmit(struct rpc_task *task)
1274 {
1275 	struct rpc_rqst	*req = task->tk_rqstp;
1276 	struct rpc_xprt	*xprt = req->rq_xprt;
1277 
1278 	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1279 
1280 	if (!xprt_lock_write(xprt, task)) {
1281 		/* Race breaker: someone may have transmitted us */
1282 		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1283 			rpc_wake_up_queued_task_set_status(&xprt->sending,
1284 					task, 0);
1285 		return false;
1286 
1287 	}
1288 	return true;
1289 }
1290 
1291 void xprt_end_transmit(struct rpc_task *task)
1292 {
1293 	xprt_release_write(task->tk_rqstp->rq_xprt, task);
1294 }
1295 
1296 /**
1297  * xprt_request_transmit - send an RPC request on a transport
1298  * @req: pointer to request to transmit
1299  * @snd_task: RPC task that owns the transport lock
1300  *
1301  * This performs the transmission of a single request.
1302  * Note that if the request is not the same as snd_task, then it
1303  * does need to be pinned.
1304  * Returns '0' on success.
1305  */
1306 static int
1307 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1308 {
1309 	struct rpc_xprt *xprt = req->rq_xprt;
1310 	struct rpc_task *task = req->rq_task;
1311 	unsigned int connect_cookie;
1312 	int is_retrans = RPC_WAS_SENT(task);
1313 	int status;
1314 
1315 	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
1316 
1317 	if (!req->rq_bytes_sent) {
1318 		if (xprt_request_data_received(task)) {
1319 			status = 0;
1320 			goto out_dequeue;
1321 		}
1322 		/* Verify that our message lies in the RPCSEC_GSS window */
1323 		if (rpcauth_xmit_need_reencode(task)) {
1324 			status = -EBADMSG;
1325 			goto out_dequeue;
1326 		}
1327 	}
1328 
1329 	/*
1330 	 * Update req->rq_ntrans before transmitting to avoid races with
1331 	 * xprt_update_rtt(), which needs to know that it is recording a
1332 	 * reply to the first transmission.
1333 	 */
1334 	req->rq_ntrans++;
1335 
1336 	connect_cookie = xprt->connect_cookie;
1337 	status = xprt->ops->send_request(req);
1338 	trace_xprt_transmit(xprt, req->rq_xid, status);
1339 	if (status != 0) {
1340 		req->rq_ntrans--;
1341 		return status;
1342 	}
1343 
1344 	if (is_retrans)
1345 		task->tk_client->cl_stats->rpcretrans++;
1346 
1347 	xprt_inject_disconnect(xprt);
1348 
1349 	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
1350 	task->tk_flags |= RPC_TASK_SENT;
1351 	spin_lock_bh(&xprt->transport_lock);
1352 
1353 	xprt->stat.sends++;
1354 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1355 	xprt->stat.bklog_u += xprt->backlog.qlen;
1356 	xprt->stat.sending_u += xprt->sending.qlen;
1357 	xprt->stat.pending_u += xprt->pending.qlen;
1358 	spin_unlock_bh(&xprt->transport_lock);
1359 
1360 	req->rq_connect_cookie = connect_cookie;
1361 out_dequeue:
1362 	xprt_request_dequeue_transmit(task);
1363 	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1364 	return status;
1365 }
1366 
1367 /**
1368  * xprt_transmit - send an RPC request on a transport
1369  * @task: controlling RPC task
1370  *
1371  * Attempts to drain the transmit queue. On exit, either the transport
1372  * signalled an error that needs to be handled before transmission can
1373  * resume, or @task finished transmitting, and detected that it already
1374  * received a reply.
1375  */
1376 void
1377 xprt_transmit(struct rpc_task *task)
1378 {
1379 	struct rpc_rqst *next, *req = task->tk_rqstp;
1380 	struct rpc_xprt	*xprt = req->rq_xprt;
1381 	int status;
1382 
1383 	spin_lock(&xprt->queue_lock);
1384 	while (!list_empty(&xprt->xmit_queue)) {
1385 		next = list_first_entry(&xprt->xmit_queue,
1386 				struct rpc_rqst, rq_xmit);
1387 		xprt_pin_rqst(next);
1388 		spin_unlock(&xprt->queue_lock);
1389 		status = xprt_request_transmit(next, task);
1390 		if (status == -EBADMSG && next != req)
1391 			status = 0;
1392 		cond_resched();
1393 		spin_lock(&xprt->queue_lock);
1394 		xprt_unpin_rqst(next);
1395 		if (status == 0) {
1396 			if (!xprt_request_data_received(task) ||
1397 			    test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1398 				continue;
1399 		} else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1400 			task->tk_status = status;
1401 		break;
1402 	}
1403 	spin_unlock(&xprt->queue_lock);
1404 }
1405 
1406 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1407 {
1408 	set_bit(XPRT_CONGESTED, &xprt->state);
1409 	rpc_sleep_on(&xprt->backlog, task, NULL);
1410 }
1411 
1412 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1413 {
1414 	if (rpc_wake_up_next(&xprt->backlog) == NULL)
1415 		clear_bit(XPRT_CONGESTED, &xprt->state);
1416 }
1417 
1418 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1419 {
1420 	bool ret = false;
1421 
1422 	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1423 		goto out;
1424 	spin_lock(&xprt->reserve_lock);
1425 	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1426 		rpc_sleep_on(&xprt->backlog, task, NULL);
1427 		ret = true;
1428 	}
1429 	spin_unlock(&xprt->reserve_lock);
1430 out:
1431 	return ret;
1432 }
1433 
1434 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1435 {
1436 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1437 
1438 	if (xprt->num_reqs >= xprt->max_reqs)
1439 		goto out;
1440 	++xprt->num_reqs;
1441 	spin_unlock(&xprt->reserve_lock);
1442 	req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1443 	spin_lock(&xprt->reserve_lock);
1444 	if (req != NULL)
1445 		goto out;
1446 	--xprt->num_reqs;
1447 	req = ERR_PTR(-ENOMEM);
1448 out:
1449 	return req;
1450 }
1451 
1452 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1453 {
1454 	if (xprt->num_reqs > xprt->min_reqs) {
1455 		--xprt->num_reqs;
1456 		kfree(req);
1457 		return true;
1458 	}
1459 	return false;
1460 }
1461 
1462 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1463 {
1464 	struct rpc_rqst *req;
1465 
1466 	spin_lock(&xprt->reserve_lock);
1467 	if (!list_empty(&xprt->free)) {
1468 		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1469 		list_del(&req->rq_list);
1470 		goto out_init_req;
1471 	}
1472 	req = xprt_dynamic_alloc_slot(xprt);
1473 	if (!IS_ERR(req))
1474 		goto out_init_req;
1475 	switch (PTR_ERR(req)) {
1476 	case -ENOMEM:
1477 		dprintk("RPC:       dynamic allocation of request slot "
1478 				"failed! Retrying\n");
1479 		task->tk_status = -ENOMEM;
1480 		break;
1481 	case -EAGAIN:
1482 		xprt_add_backlog(xprt, task);
1483 		dprintk("RPC:       waiting for request slot\n");
1484 		/* fall through */
1485 	default:
1486 		task->tk_status = -EAGAIN;
1487 	}
1488 	spin_unlock(&xprt->reserve_lock);
1489 	return;
1490 out_init_req:
1491 	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1492 				     xprt->num_reqs);
1493 	spin_unlock(&xprt->reserve_lock);
1494 
1495 	task->tk_status = 0;
1496 	task->tk_rqstp = req;
1497 }
1498 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1499 
1500 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1501 {
1502 	spin_lock(&xprt->reserve_lock);
1503 	if (!xprt_dynamic_free_slot(xprt, req)) {
1504 		memset(req, 0, sizeof(*req));	/* mark unused */
1505 		list_add(&req->rq_list, &xprt->free);
1506 	}
1507 	xprt_wake_up_backlog(xprt);
1508 	spin_unlock(&xprt->reserve_lock);
1509 }
1510 EXPORT_SYMBOL_GPL(xprt_free_slot);
1511 
1512 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1513 {
1514 	struct rpc_rqst *req;
1515 	while (!list_empty(&xprt->free)) {
1516 		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1517 		list_del(&req->rq_list);
1518 		kfree(req);
1519 	}
1520 }
1521 
1522 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1523 		unsigned int num_prealloc,
1524 		unsigned int max_alloc)
1525 {
1526 	struct rpc_xprt *xprt;
1527 	struct rpc_rqst *req;
1528 	int i;
1529 
1530 	xprt = kzalloc(size, GFP_KERNEL);
1531 	if (xprt == NULL)
1532 		goto out;
1533 
1534 	xprt_init(xprt, net);
1535 
1536 	for (i = 0; i < num_prealloc; i++) {
1537 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1538 		if (!req)
1539 			goto out_free;
1540 		list_add(&req->rq_list, &xprt->free);
1541 	}
1542 	if (max_alloc > num_prealloc)
1543 		xprt->max_reqs = max_alloc;
1544 	else
1545 		xprt->max_reqs = num_prealloc;
1546 	xprt->min_reqs = num_prealloc;
1547 	xprt->num_reqs = num_prealloc;
1548 
1549 	return xprt;
1550 
1551 out_free:
1552 	xprt_free(xprt);
1553 out:
1554 	return NULL;
1555 }
1556 EXPORT_SYMBOL_GPL(xprt_alloc);
1557 
1558 void xprt_free(struct rpc_xprt *xprt)
1559 {
1560 	put_net(xprt->xprt_net);
1561 	xprt_free_all_slots(xprt);
1562 	kfree_rcu(xprt, rcu);
1563 }
1564 EXPORT_SYMBOL_GPL(xprt_free);
1565 
1566 static void
1567 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1568 {
1569 	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1570 }
1571 
1572 static __be32
1573 xprt_alloc_xid(struct rpc_xprt *xprt)
1574 {
1575 	__be32 xid;
1576 
1577 	spin_lock(&xprt->reserve_lock);
1578 	xid = (__force __be32)xprt->xid++;
1579 	spin_unlock(&xprt->reserve_lock);
1580 	return xid;
1581 }
1582 
1583 static void
1584 xprt_init_xid(struct rpc_xprt *xprt)
1585 {
1586 	xprt->xid = prandom_u32();
1587 }
1588 
1589 static void
1590 xprt_request_init(struct rpc_task *task)
1591 {
1592 	struct rpc_xprt *xprt = task->tk_xprt;
1593 	struct rpc_rqst	*req = task->tk_rqstp;
1594 
1595 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1596 	req->rq_task	= task;
1597 	req->rq_xprt    = xprt;
1598 	req->rq_buffer  = NULL;
1599 	req->rq_xid	= xprt_alloc_xid(xprt);
1600 	xprt_init_connect_cookie(req, xprt);
1601 	req->rq_bytes_sent = 0;
1602 	req->rq_snd_buf.len = 0;
1603 	req->rq_snd_buf.buflen = 0;
1604 	req->rq_rcv_buf.len = 0;
1605 	req->rq_rcv_buf.buflen = 0;
1606 	req->rq_snd_buf.bvec = NULL;
1607 	req->rq_rcv_buf.bvec = NULL;
1608 	req->rq_release_snd_buf = NULL;
1609 	xprt_reset_majortimeo(req);
1610 	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1611 			req, ntohl(req->rq_xid));
1612 }
1613 
1614 static void
1615 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1616 {
1617 	xprt->ops->alloc_slot(xprt, task);
1618 	if (task->tk_rqstp != NULL)
1619 		xprt_request_init(task);
1620 }
1621 
1622 /**
1623  * xprt_reserve - allocate an RPC request slot
1624  * @task: RPC task requesting a slot allocation
1625  *
1626  * If the transport is marked as being congested, or if no more
1627  * slots are available, place the task on the transport's
1628  * backlog queue.
1629  */
1630 void xprt_reserve(struct rpc_task *task)
1631 {
1632 	struct rpc_xprt *xprt = task->tk_xprt;
1633 
1634 	task->tk_status = 0;
1635 	if (task->tk_rqstp != NULL)
1636 		return;
1637 
1638 	task->tk_timeout = 0;
1639 	task->tk_status = -EAGAIN;
1640 	if (!xprt_throttle_congested(xprt, task))
1641 		xprt_do_reserve(xprt, task);
1642 }
1643 
1644 /**
1645  * xprt_retry_reserve - allocate an RPC request slot
1646  * @task: RPC task requesting a slot allocation
1647  *
1648  * If no more slots are available, place the task on the transport's
1649  * backlog queue.
1650  * Note that the only difference with xprt_reserve is that we now
1651  * ignore the value of the XPRT_CONGESTED flag.
1652  */
1653 void xprt_retry_reserve(struct rpc_task *task)
1654 {
1655 	struct rpc_xprt *xprt = task->tk_xprt;
1656 
1657 	task->tk_status = 0;
1658 	if (task->tk_rqstp != NULL)
1659 		return;
1660 
1661 	task->tk_timeout = 0;
1662 	task->tk_status = -EAGAIN;
1663 	xprt_do_reserve(xprt, task);
1664 }
1665 
1666 static void
1667 xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
1668 {
1669 	struct rpc_xprt *xprt = req->rq_xprt;
1670 
1671 	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1672 	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1673 	    xprt_is_pinned_rqst(req)) {
1674 		spin_lock(&xprt->queue_lock);
1675 		xprt_request_dequeue_transmit_locked(task);
1676 		xprt_request_dequeue_receive_locked(task);
1677 		while (xprt_is_pinned_rqst(req)) {
1678 			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1679 			spin_unlock(&xprt->queue_lock);
1680 			xprt_wait_on_pinned_rqst(req);
1681 			spin_lock(&xprt->queue_lock);
1682 			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1683 		}
1684 		spin_unlock(&xprt->queue_lock);
1685 	}
1686 }
1687 
1688 /**
1689  * xprt_release - release an RPC request slot
1690  * @task: task which is finished with the slot
1691  *
1692  */
1693 void xprt_release(struct rpc_task *task)
1694 {
1695 	struct rpc_xprt	*xprt;
1696 	struct rpc_rqst	*req = task->tk_rqstp;
1697 
1698 	if (req == NULL) {
1699 		if (task->tk_client) {
1700 			xprt = task->tk_xprt;
1701 			xprt_release_write(xprt, task);
1702 		}
1703 		return;
1704 	}
1705 
1706 	xprt = req->rq_xprt;
1707 	if (task->tk_ops->rpc_count_stats != NULL)
1708 		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1709 	else if (task->tk_client)
1710 		rpc_count_iostats(task, task->tk_client->cl_metrics);
1711 	xprt_request_dequeue_all(task, req);
1712 	spin_lock_bh(&xprt->transport_lock);
1713 	xprt->ops->release_xprt(xprt, task);
1714 	if (xprt->ops->release_request)
1715 		xprt->ops->release_request(task);
1716 	xprt->last_used = jiffies;
1717 	xprt_schedule_autodisconnect(xprt);
1718 	spin_unlock_bh(&xprt->transport_lock);
1719 	if (req->rq_buffer)
1720 		xprt->ops->buf_free(task);
1721 	xprt_inject_disconnect(xprt);
1722 	xdr_free_bvec(&req->rq_rcv_buf);
1723 	if (req->rq_cred != NULL)
1724 		put_rpccred(req->rq_cred);
1725 	task->tk_rqstp = NULL;
1726 	if (req->rq_release_snd_buf)
1727 		req->rq_release_snd_buf(req);
1728 
1729 	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1730 	if (likely(!bc_prealloc(req)))
1731 		xprt->ops->free_slot(xprt, req);
1732 	else
1733 		xprt_free_bc_request(req);
1734 }
1735 
1736 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1737 void
1738 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1739 {
1740 	struct xdr_buf *xbufp = &req->rq_snd_buf;
1741 
1742 	task->tk_rqstp = req;
1743 	req->rq_task = task;
1744 	xprt_init_connect_cookie(req, req->rq_xprt);
1745 	/*
1746 	 * Set up the xdr_buf length.
1747 	 * This also indicates that the buffer is XDR encoded already.
1748 	 */
1749 	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1750 		xbufp->tail[0].iov_len;
1751 	req->rq_bytes_sent = 0;
1752 }
1753 #endif
1754 
1755 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1756 {
1757 	kref_init(&xprt->kref);
1758 
1759 	spin_lock_init(&xprt->transport_lock);
1760 	spin_lock_init(&xprt->reserve_lock);
1761 	spin_lock_init(&xprt->queue_lock);
1762 
1763 	INIT_LIST_HEAD(&xprt->free);
1764 	xprt->recv_queue = RB_ROOT;
1765 	INIT_LIST_HEAD(&xprt->xmit_queue);
1766 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1767 	spin_lock_init(&xprt->bc_pa_lock);
1768 	INIT_LIST_HEAD(&xprt->bc_pa_list);
1769 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1770 	INIT_LIST_HEAD(&xprt->xprt_switch);
1771 
1772 	xprt->last_used = jiffies;
1773 	xprt->cwnd = RPC_INITCWND;
1774 	xprt->bind_index = 0;
1775 
1776 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1777 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1778 	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1779 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1780 
1781 	xprt_init_xid(xprt);
1782 
1783 	xprt->xprt_net = get_net(net);
1784 }
1785 
1786 /**
1787  * xprt_create_transport - create an RPC transport
1788  * @args: rpc transport creation arguments
1789  *
1790  */
1791 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1792 {
1793 	struct rpc_xprt	*xprt;
1794 	struct xprt_class *t;
1795 
1796 	spin_lock(&xprt_list_lock);
1797 	list_for_each_entry(t, &xprt_list, list) {
1798 		if (t->ident == args->ident) {
1799 			spin_unlock(&xprt_list_lock);
1800 			goto found;
1801 		}
1802 	}
1803 	spin_unlock(&xprt_list_lock);
1804 	dprintk("RPC: transport (%d) not supported\n", args->ident);
1805 	return ERR_PTR(-EIO);
1806 
1807 found:
1808 	xprt = t->setup(args);
1809 	if (IS_ERR(xprt)) {
1810 		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1811 				-PTR_ERR(xprt));
1812 		goto out;
1813 	}
1814 	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1815 		xprt->idle_timeout = 0;
1816 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1817 	if (xprt_has_timer(xprt))
1818 		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1819 	else
1820 		timer_setup(&xprt->timer, NULL, 0);
1821 
1822 	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1823 		xprt_destroy(xprt);
1824 		return ERR_PTR(-EINVAL);
1825 	}
1826 	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1827 	if (xprt->servername == NULL) {
1828 		xprt_destroy(xprt);
1829 		return ERR_PTR(-ENOMEM);
1830 	}
1831 
1832 	rpc_xprt_debugfs_register(xprt);
1833 
1834 	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1835 			xprt->max_reqs);
1836 out:
1837 	return xprt;
1838 }
1839 
1840 static void xprt_destroy_cb(struct work_struct *work)
1841 {
1842 	struct rpc_xprt *xprt =
1843 		container_of(work, struct rpc_xprt, task_cleanup);
1844 
1845 	rpc_xprt_debugfs_unregister(xprt);
1846 	rpc_destroy_wait_queue(&xprt->binding);
1847 	rpc_destroy_wait_queue(&xprt->pending);
1848 	rpc_destroy_wait_queue(&xprt->sending);
1849 	rpc_destroy_wait_queue(&xprt->backlog);
1850 	kfree(xprt->servername);
1851 	/*
1852 	 * Tear down transport state and free the rpc_xprt
1853 	 */
1854 	xprt->ops->destroy(xprt);
1855 }
1856 
1857 /**
1858  * xprt_destroy - destroy an RPC transport, killing off all requests.
1859  * @xprt: transport to destroy
1860  *
1861  */
1862 static void xprt_destroy(struct rpc_xprt *xprt)
1863 {
1864 	dprintk("RPC:       destroying transport %p\n", xprt);
1865 
1866 	/*
1867 	 * Exclude transport connect/disconnect handlers and autoclose
1868 	 */
1869 	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1870 
1871 	del_timer_sync(&xprt->timer);
1872 
1873 	/*
1874 	 * Destroy sockets etc from the system workqueue so they can
1875 	 * safely flush receive work running on rpciod.
1876 	 */
1877 	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1878 	schedule_work(&xprt->task_cleanup);
1879 }
1880 
1881 static void xprt_destroy_kref(struct kref *kref)
1882 {
1883 	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1884 }
1885 
1886 /**
1887  * xprt_get - return a reference to an RPC transport.
1888  * @xprt: pointer to the transport
1889  *
1890  */
1891 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1892 {
1893 	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1894 		return xprt;
1895 	return NULL;
1896 }
1897 EXPORT_SYMBOL_GPL(xprt_get);
1898 
1899 /**
1900  * xprt_put - release a reference to an RPC transport.
1901  * @xprt: pointer to the transport
1902  *
1903  */
1904 void xprt_put(struct rpc_xprt *xprt)
1905 {
1906 	if (xprt != NULL)
1907 		kref_put(&xprt->kref, xprt_destroy_kref);
1908 }
1909 EXPORT_SYMBOL_GPL(xprt_put);
1910