xref: /linux/include/linux/sunrpc/xprt.h (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  linux/include/linux/sunrpc/xprt.h
4  *
5  *  Declarations for the RPC transport interface.
6  *
7  *  Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8  */
9 
10 #ifndef _LINUX_SUNRPC_XPRT_H
11 #define _LINUX_SUNRPC_XPRT_H
12 
13 #include <linux/uio.h>
14 #include <linux/socket.h>
15 #include <linux/in.h>
16 #include <linux/ktime.h>
17 #include <linux/kref.h>
18 #include <linux/sunrpc/sched.h>
19 #include <linux/sunrpc/xdr.h>
20 #include <linux/sunrpc/msg_prot.h>
21 
22 #define RPC_MIN_SLOT_TABLE	(2U)
23 #define RPC_DEF_SLOT_TABLE	(16U)
24 #define RPC_MAX_SLOT_TABLE_LIMIT	(65536U)
25 #define RPC_MAX_SLOT_TABLE	RPC_MAX_SLOT_TABLE_LIMIT
26 
27 #define RPC_CWNDSHIFT		(8U)
28 #define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
29 #define RPC_INITCWND		RPC_CWNDSCALE
30 #define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)
31 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
32 
33 #define RPC_GSS_SEQNO_ARRAY_SIZE 3U
34 
35 enum rpc_display_format_t {
36 	RPC_DISPLAY_ADDR = 0,
37 	RPC_DISPLAY_PORT,
38 	RPC_DISPLAY_PROTO,
39 	RPC_DISPLAY_HEX_ADDR,
40 	RPC_DISPLAY_HEX_PORT,
41 	RPC_DISPLAY_NETID,
42 	RPC_DISPLAY_MAX,
43 };
44 
45 struct rpc_task;
46 struct rpc_xprt;
47 struct xprt_class;
48 struct seq_file;
49 struct svc_serv;
50 struct net;
51 #include <linux/lwq.h>
52 
53 /*
54  * This describes a complete RPC request
55  */
56 struct rpc_rqst {
57 	/*
58 	 * This is the user-visible part
59 	 */
60 	struct rpc_xprt *	rq_xprt;		/* RPC client */
61 	struct xdr_buf		rq_snd_buf;		/* send buffer */
62 	struct xdr_buf		rq_rcv_buf;		/* recv buffer */
63 
64 	/*
65 	 * This is the private part
66 	 */
67 	struct rpc_task *	rq_task;	/* RPC task data */
68 	struct rpc_cred *	rq_cred;	/* Bound cred */
69 	__be32			rq_xid;		/* request XID */
70 	int			rq_cong;	/* has incremented xprt->cong */
71 	u32			rq_seqnos[RPC_GSS_SEQNO_ARRAY_SIZE];	/* past gss req seq nos. */
72 	unsigned int		rq_seqno_count;	/* number of entries in rq_seqnos */
73 	int			rq_enc_pages_num;
74 	struct page		**rq_enc_pages;	/* scratch pages for use by
75 						   gss privacy code */
76 	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
77 
78 	union {
79 		struct list_head	rq_list;	/* Slot allocation list */
80 		struct rb_node		rq_recv;	/* Receive queue */
81 	};
82 
83 	struct list_head	rq_xmit;	/* Send queue */
84 	struct list_head	rq_xmit2;	/* Send queue */
85 
86 	void			*rq_buffer;	/* Call XDR encode buffer */
87 	size_t			rq_callsize;
88 	void			*rq_rbuffer;	/* Reply XDR decode buffer */
89 	size_t			rq_rcvsize;
90 	size_t			rq_xmit_bytes_sent;	/* total bytes sent */
91 	size_t			rq_reply_bytes_recvd;	/* total reply bytes */
92 							/* received */
93 
94 	struct xdr_buf		rq_private_buf;		/* The receive buffer
95 							 * used in the softirq.
96 							 */
97 	unsigned long		rq_majortimeo;	/* major timeout alarm */
98 	unsigned long		rq_minortimeo;	/* minor timeout alarm */
99 	unsigned long		rq_timeout;	/* Current timeout value */
100 	ktime_t			rq_rtt;		/* round-trip time */
101 	unsigned int		rq_retries;	/* # of retries */
102 	unsigned int		rq_connect_cookie;
103 						/* A cookie used to track the
104 						   state of the transport
105 						   connection */
106 	atomic_t		rq_pin;
107 
108 	/*
109 	 * Partial send handling
110 	 */
111 	u32			rq_bytes_sent;	/* Bytes we have sent */
112 
113 	ktime_t			rq_xtime;	/* transmit time stamp */
114 	int			rq_ntrans;
115 
116 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
117 	struct lwq_node		rq_bc_list;	/* Callback service list */
118 	unsigned long		rq_bc_pa_state;	/* Backchannel prealloc state */
119 	struct list_head	rq_bc_pa_list;	/* Backchannel prealloc list */
120 #endif /* CONFIG_SUNRPC_BACKCHANEL */
121 };
122 #define rq_svec			rq_snd_buf.head
123 #define rq_slen			rq_snd_buf.len
124 
xprt_rqst_add_seqno(struct rpc_rqst * req,u32 seqno)125 static inline int xprt_rqst_add_seqno(struct rpc_rqst *req, u32 seqno)
126 {
127 	if (likely(req->rq_seqno_count < RPC_GSS_SEQNO_ARRAY_SIZE))
128 		req->rq_seqno_count++;
129 
130 	/* Shift array to make room for the newest element at the beginning */
131 	memmove(&req->rq_seqnos[1], &req->rq_seqnos[0],
132 		(RPC_GSS_SEQNO_ARRAY_SIZE - 1) * sizeof(req->rq_seqnos[0]));
133 	req->rq_seqnos[0] = seqno;
134 	return 0;
135 }
136 
137 /* RPC transport layer security policies */
138 enum xprtsec_policies {
139 	RPC_XPRTSEC_NONE = 0,
140 	RPC_XPRTSEC_TLS_ANON,
141 	RPC_XPRTSEC_TLS_X509,
142 };
143 
144 struct xprtsec_parms {
145 	enum xprtsec_policies	policy;
146 
147 	/* authentication material */
148 	key_serial_t		cert_serial;
149 	key_serial_t		privkey_serial;
150 };
151 
152 struct rpc_xprt_ops {
153 	void		(*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
154 	int		(*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
155 	void		(*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
156 	void		(*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
157 	void		(*free_slot)(struct rpc_xprt *xprt,
158 				     struct rpc_rqst *req);
159 	void		(*rpcbind)(struct rpc_task *task);
160 	void		(*set_port)(struct rpc_xprt *xprt, unsigned short port);
161 	void		(*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
162 	int		(*get_srcaddr)(struct rpc_xprt *xprt, char *buf,
163 				       size_t buflen);
164 	unsigned short	(*get_srcport)(struct rpc_xprt *xprt);
165 	int		(*buf_alloc)(struct rpc_task *task);
166 	void		(*buf_free)(struct rpc_task *task);
167 	int		(*prepare_request)(struct rpc_rqst *req,
168 					   struct xdr_buf *buf);
169 	int		(*send_request)(struct rpc_rqst *req);
170 	void		(*abort_send_request)(struct rpc_rqst *req);
171 	void		(*wait_for_reply_request)(struct rpc_task *task);
172 	void		(*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
173 	void		(*release_request)(struct rpc_task *task);
174 	void		(*close)(struct rpc_xprt *xprt);
175 	void		(*destroy)(struct rpc_xprt *xprt);
176 	void		(*set_connect_timeout)(struct rpc_xprt *xprt,
177 					unsigned long connect_timeout,
178 					unsigned long reconnect_timeout);
179 	void		(*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
180 	int		(*enable_swap)(struct rpc_xprt *xprt);
181 	void		(*disable_swap)(struct rpc_xprt *xprt);
182 	void		(*inject_disconnect)(struct rpc_xprt *xprt);
183 	int		(*bc_setup)(struct rpc_xprt *xprt,
184 				    unsigned int min_reqs);
185 	size_t		(*bc_maxpayload)(struct rpc_xprt *xprt);
186 	unsigned int	(*bc_num_slots)(struct rpc_xprt *xprt);
187 	void		(*bc_free_rqst)(struct rpc_rqst *rqst);
188 	void		(*bc_destroy)(struct rpc_xprt *xprt,
189 				      unsigned int max_reqs);
190 };
191 
192 /*
193  * RPC transport identifiers
194  *
195  * To preserve compatibility with the historical use of raw IP protocol
196  * id's for transport selection, UDP and TCP identifiers are specified
197  * with the previous values. No such restriction exists for new transports,
198  * except that they may not collide with these values (17 and 6,
199  * respectively).
200  */
201 #define XPRT_TRANSPORT_BC       (1 << 31)
202 enum xprt_transports {
203 	XPRT_TRANSPORT_UDP	= IPPROTO_UDP,
204 	XPRT_TRANSPORT_TCP	= IPPROTO_TCP,
205 	XPRT_TRANSPORT_BC_TCP	= IPPROTO_TCP | XPRT_TRANSPORT_BC,
206 	XPRT_TRANSPORT_RDMA	= 256,
207 	XPRT_TRANSPORT_BC_RDMA	= XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
208 	XPRT_TRANSPORT_LOCAL	= 257,
209 	XPRT_TRANSPORT_TCP_TLS	= 258,
210 };
211 
212 struct rpc_sysfs_xprt;
213 struct rpc_xprt {
214 	struct kref		kref;		/* Reference count */
215 	const struct rpc_xprt_ops *ops;		/* transport methods */
216 	unsigned int		id;		/* transport id */
217 
218 	const struct rpc_timeout *timeout;	/* timeout parms */
219 	struct sockaddr_storage	addr;		/* server address */
220 	size_t			addrlen;	/* size of server address */
221 	int			prot;		/* IP protocol */
222 
223 	unsigned long		cong;		/* current congestion */
224 	unsigned long		cwnd;		/* congestion window */
225 
226 	size_t			max_payload;	/* largest RPC payload size,
227 						   in bytes */
228 
229 	struct rpc_wait_queue	binding;	/* requests waiting on rpcbind */
230 	struct rpc_wait_queue	sending;	/* requests waiting to send */
231 	struct rpc_wait_queue	pending;	/* requests in flight */
232 	struct rpc_wait_queue	backlog;	/* waiting for slot */
233 	struct list_head	free;		/* free slots */
234 	unsigned int		max_reqs;	/* max number of slots */
235 	unsigned int		min_reqs;	/* min number of slots */
236 	unsigned int		num_reqs;	/* total slots */
237 	unsigned long		state;		/* transport state */
238 	unsigned char		resvport   : 1,	/* use a reserved port */
239 				reuseport  : 1; /* reuse port on reconnect */
240 	atomic_t		swapper;	/* we're swapping over this
241 						   transport */
242 	unsigned int		bind_index;	/* bind function index */
243 
244 	/*
245 	 * Multipath
246 	 */
247 	struct list_head	xprt_switch;
248 
249 	/*
250 	 * Connection of transports
251 	 */
252 	unsigned long		bind_timeout,
253 				reestablish_timeout;
254 	struct xprtsec_parms	xprtsec;
255 	unsigned int		connect_cookie;	/* A cookie that gets bumped
256 						   every time the transport
257 						   is reconnected */
258 
259 	/*
260 	 * Disconnection of idle transports
261 	 */
262 	struct work_struct	task_cleanup;
263 	struct timer_list	timer;
264 	unsigned long		last_used,
265 				idle_timeout,
266 				connect_timeout,
267 				max_reconnect_timeout;
268 
269 	/*
270 	 * Send stuff
271 	 */
272 	atomic_long_t		queuelen;
273 	spinlock_t		transport_lock;	/* lock transport info */
274 	spinlock_t		reserve_lock;	/* lock slot table */
275 	spinlock_t		queue_lock;	/* send/receive queue lock */
276 	u32			xid;		/* Next XID value to use */
277 	struct rpc_task *	snd_task;	/* Task blocked in send */
278 
279 	struct list_head	xmit_queue;	/* Send queue */
280 	atomic_long_t		xmit_queuelen;
281 
282 	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
283 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
284 	struct svc_serv		*bc_serv;       /* The RPC service which will */
285 						/* process the callback */
286 	unsigned int		bc_alloc_max;
287 	unsigned int		bc_alloc_count;	/* Total number of preallocs */
288 	atomic_t		bc_slot_count;	/* Number of allocated slots */
289 	spinlock_t		bc_pa_lock;	/* Protects the preallocated
290 						 * items */
291 	struct list_head	bc_pa_list;	/* List of preallocated
292 						 * backchannel rpc_rqst's */
293 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
294 
295 	struct rb_root		recv_queue;	/* Receive queue */
296 
297 	struct {
298 		unsigned long		bind_count,	/* total number of binds */
299 					connect_count,	/* total number of connects */
300 					connect_start,	/* connect start timestamp */
301 					connect_time,	/* jiffies waiting for connect */
302 					sends,		/* how many complete requests */
303 					recvs,		/* how many complete requests */
304 					bad_xids,	/* lookup_rqst didn't find XID */
305 					max_slots;	/* max rpc_slots used */
306 
307 		unsigned long long	req_u,		/* average requests on the wire */
308 					bklog_u,	/* backlog queue utilization */
309 					sending_u,	/* send q utilization */
310 					pending_u;	/* pend q utilization */
311 	} stat;
312 
313 	struct net		*xprt_net;
314 	netns_tracker		ns_tracker;
315 	const char		*servername;
316 	const char		*address_strings[RPC_DISPLAY_MAX];
317 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
318 	struct dentry		*debugfs;		/* debugfs directory */
319 #endif
320 	struct rcu_head		rcu;
321 	const struct xprt_class	*xprt_class;
322 	struct rpc_sysfs_xprt	*xprt_sysfs;
323 	bool			main; /*mark if this is the 1st transport */
324 };
325 
326 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
327 /*
328  * Backchannel flags
329  */
330 #define	RPC_BC_PA_IN_USE	0x0001		/* Preallocated backchannel */
331 						/* buffer in use */
332 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
333 
334 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
bc_prealloc(struct rpc_rqst * req)335 static inline int bc_prealloc(struct rpc_rqst *req)
336 {
337 	return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
338 }
339 #else
bc_prealloc(struct rpc_rqst * req)340 static inline int bc_prealloc(struct rpc_rqst *req)
341 {
342 	return 0;
343 }
344 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
345 
346 #define XPRT_CREATE_INFINITE_SLOTS	(1U)
347 #define XPRT_CREATE_NO_IDLE_TIMEOUT	(1U << 1)
348 
349 struct xprt_create {
350 	int			ident;		/* XPRT_TRANSPORT identifier */
351 	struct net *		net;
352 	struct sockaddr *	srcaddr;	/* optional local address */
353 	struct sockaddr *	dstaddr;	/* remote peer address */
354 	size_t			addrlen;
355 	const char		*servername;
356 	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
357 	struct rpc_xprt_switch	*bc_xps;
358 	unsigned int		flags;
359 	struct xprtsec_parms	xprtsec;
360 	unsigned long		connect_timeout;
361 	unsigned long		reconnect_timeout;
362 };
363 
364 struct xprt_class {
365 	struct list_head	list;
366 	int			ident;		/* XPRT_TRANSPORT identifier */
367 	struct rpc_xprt *	(*setup)(struct xprt_create *);
368 	struct module		*owner;
369 	char			name[32];
370 	const char *		netid[];
371 };
372 
373 /*
374  * Generic internal transport functions
375  */
376 struct rpc_xprt		*xprt_create_transport(struct xprt_create *args);
377 void			xprt_connect(struct rpc_task *task);
378 unsigned long		xprt_reconnect_delay(const struct rpc_xprt *xprt);
379 void			xprt_reconnect_backoff(struct rpc_xprt *xprt,
380 					       unsigned long init_to);
381 void			xprt_reserve(struct rpc_task *task);
382 void			xprt_retry_reserve(struct rpc_task *task);
383 int			xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
384 int			xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
385 void			xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
386 void			xprt_free_slot(struct rpc_xprt *xprt,
387 				       struct rpc_rqst *req);
388 bool			xprt_prepare_transmit(struct rpc_task *task);
389 void			xprt_request_enqueue_transmit(struct rpc_task *task);
390 int			xprt_request_enqueue_receive(struct rpc_task *task);
391 void			xprt_request_wait_receive(struct rpc_task *task);
392 void			xprt_request_dequeue_xprt(struct rpc_task *task);
393 bool			xprt_request_need_retransmit(struct rpc_task *task);
394 void			xprt_transmit(struct rpc_task *task);
395 void			xprt_end_transmit(struct rpc_task *task);
396 int			xprt_adjust_timeout(struct rpc_rqst *req);
397 void			xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
398 void			xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
399 void			xprt_release(struct rpc_task *task);
400 struct rpc_xprt *	xprt_get(struct rpc_xprt *xprt);
401 void			xprt_put(struct rpc_xprt *xprt);
402 struct rpc_xprt *	xprt_alloc(struct net *net, size_t size,
403 				unsigned int num_prealloc,
404 				unsigned int max_req);
405 void			xprt_free(struct rpc_xprt *);
406 void			xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
407 bool			xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
408 void			xprt_cleanup_ids(void);
409 
410 static inline int
xprt_enable_swap(struct rpc_xprt * xprt)411 xprt_enable_swap(struct rpc_xprt *xprt)
412 {
413 	return xprt->ops->enable_swap(xprt);
414 }
415 
416 static inline void
xprt_disable_swap(struct rpc_xprt * xprt)417 xprt_disable_swap(struct rpc_xprt *xprt)
418 {
419 	xprt->ops->disable_swap(xprt);
420 }
421 
422 /*
423  * Transport switch helper functions
424  */
425 int			xprt_register_transport(struct xprt_class *type);
426 int			xprt_unregister_transport(struct xprt_class *type);
427 int			xprt_find_transport_ident(const char *);
428 void			xprt_wait_for_reply_request_def(struct rpc_task *task);
429 void			xprt_wait_for_reply_request_rtt(struct rpc_task *task);
430 void			xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
431 void			xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
432 bool			xprt_write_space(struct rpc_xprt *xprt);
433 void			xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
434 struct rpc_rqst *	xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
435 void			xprt_update_rtt(struct rpc_task *task);
436 void			xprt_complete_rqst(struct rpc_task *task, int copied);
437 void			xprt_pin_rqst(struct rpc_rqst *req);
438 void			xprt_unpin_rqst(struct rpc_rqst *req);
439 void			xprt_release_rqst_cong(struct rpc_task *task);
440 bool			xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
441 void			xprt_disconnect_done(struct rpc_xprt *xprt);
442 void			xprt_force_disconnect(struct rpc_xprt *xprt);
443 void			xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
444 
445 bool			xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
446 void			xprt_unlock_connect(struct rpc_xprt *, void *);
447 void			xprt_release_write(struct rpc_xprt *, struct rpc_task *);
448 
449 /*
450  * Reserved bit positions in xprt->state
451  */
452 #define XPRT_LOCKED		(0)
453 #define XPRT_CONNECTED		(1)
454 #define XPRT_CONNECTING		(2)
455 #define XPRT_CLOSE_WAIT		(3)
456 #define XPRT_BOUND		(4)
457 #define XPRT_BINDING		(5)
458 #define XPRT_CLOSING		(6)
459 #define XPRT_OFFLINE		(7)
460 #define XPRT_REMOVE		(8)
461 #define XPRT_CONGESTED		(9)
462 #define XPRT_CWND_WAIT		(10)
463 #define XPRT_WRITE_SPACE	(11)
464 #define XPRT_SND_IS_COOKIE	(12)
465 
xprt_set_connected(struct rpc_xprt * xprt)466 static inline void xprt_set_connected(struct rpc_xprt *xprt)
467 {
468 	set_bit(XPRT_CONNECTED, &xprt->state);
469 }
470 
xprt_clear_connected(struct rpc_xprt * xprt)471 static inline void xprt_clear_connected(struct rpc_xprt *xprt)
472 {
473 	clear_bit(XPRT_CONNECTED, &xprt->state);
474 }
475 
xprt_connected(struct rpc_xprt * xprt)476 static inline int xprt_connected(struct rpc_xprt *xprt)
477 {
478 	return test_bit(XPRT_CONNECTED, &xprt->state);
479 }
480 
xprt_test_and_set_connected(struct rpc_xprt * xprt)481 static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
482 {
483 	return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
484 }
485 
xprt_test_and_clear_connected(struct rpc_xprt * xprt)486 static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
487 {
488 	return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
489 }
490 
xprt_clear_connecting(struct rpc_xprt * xprt)491 static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
492 {
493 	smp_mb__before_atomic();
494 	clear_bit(XPRT_CONNECTING, &xprt->state);
495 	smp_mb__after_atomic();
496 }
497 
xprt_connecting(struct rpc_xprt * xprt)498 static inline int xprt_connecting(struct rpc_xprt *xprt)
499 {
500 	return test_bit(XPRT_CONNECTING, &xprt->state);
501 }
502 
xprt_test_and_set_connecting(struct rpc_xprt * xprt)503 static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
504 {
505 	return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
506 }
507 
xprt_set_bound(struct rpc_xprt * xprt)508 static inline void xprt_set_bound(struct rpc_xprt *xprt)
509 {
510 	test_and_set_bit(XPRT_BOUND, &xprt->state);
511 }
512 
xprt_bound(struct rpc_xprt * xprt)513 static inline int xprt_bound(struct rpc_xprt *xprt)
514 {
515 	return test_bit(XPRT_BOUND, &xprt->state);
516 }
517 
xprt_clear_bound(struct rpc_xprt * xprt)518 static inline void xprt_clear_bound(struct rpc_xprt *xprt)
519 {
520 	clear_bit(XPRT_BOUND, &xprt->state);
521 }
522 
xprt_clear_binding(struct rpc_xprt * xprt)523 static inline void xprt_clear_binding(struct rpc_xprt *xprt)
524 {
525 	smp_mb__before_atomic();
526 	clear_bit(XPRT_BINDING, &xprt->state);
527 	smp_mb__after_atomic();
528 }
529 
xprt_test_and_set_binding(struct rpc_xprt * xprt)530 static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
531 {
532 	return test_and_set_bit(XPRT_BINDING, &xprt->state);
533 }
534 
535 void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
536 void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
537 void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
538 #endif /* _LINUX_SUNRPC_XPRT_H */
539