1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * linux/include/linux/sunrpc/xprt.h
4 *
5 * Declarations for the RPC transport interface.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
10 #ifndef _LINUX_SUNRPC_XPRT_H
11 #define _LINUX_SUNRPC_XPRT_H
12
13 #include <linux/uio.h>
14 #include <linux/socket.h>
15 #include <linux/in.h>
16 #include <linux/ktime.h>
17 #include <linux/kref.h>
18 #include <linux/sunrpc/sched.h>
19 #include <linux/sunrpc/xdr.h>
20 #include <linux/sunrpc/msg_prot.h>
21
22 #define RPC_MIN_SLOT_TABLE (2U)
23 #define RPC_DEF_SLOT_TABLE (16U)
24 #define RPC_MAX_SLOT_TABLE_LIMIT (65536U)
25 #define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT
26
27 #define RPC_CWNDSHIFT (8U)
28 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
29 #define RPC_INITCWND RPC_CWNDSCALE
30 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
31 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
32
33 enum rpc_display_format_t {
34 RPC_DISPLAY_ADDR = 0,
35 RPC_DISPLAY_PORT,
36 RPC_DISPLAY_PROTO,
37 RPC_DISPLAY_HEX_ADDR,
38 RPC_DISPLAY_HEX_PORT,
39 RPC_DISPLAY_NETID,
40 RPC_DISPLAY_MAX,
41 };
42
43 struct rpc_task;
44 struct rpc_xprt;
45 struct xprt_class;
46 struct seq_file;
47 struct svc_serv;
48 struct net;
49 #include <linux/lwq.h>
50
51 /*
52 * This describes a complete RPC request
53 */
54 struct rpc_rqst {
55 /*
56 * This is the user-visible part
57 */
58 struct rpc_xprt * rq_xprt; /* RPC client */
59 struct xdr_buf rq_snd_buf; /* send buffer */
60 struct xdr_buf rq_rcv_buf; /* recv buffer */
61
62 /*
63 * This is the private part
64 */
65 struct rpc_task * rq_task; /* RPC task data */
66 struct rpc_cred * rq_cred; /* Bound cred */
67 __be32 rq_xid; /* request XID */
68 int rq_cong; /* has incremented xprt->cong */
69 u32 rq_seqno; /* gss seq no. used on req. */
70 int rq_enc_pages_num;
71 struct page **rq_enc_pages; /* scratch pages for use by
72 gss privacy code */
73 void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
74
75 union {
76 struct list_head rq_list; /* Slot allocation list */
77 struct rb_node rq_recv; /* Receive queue */
78 };
79
80 struct list_head rq_xmit; /* Send queue */
81 struct list_head rq_xmit2; /* Send queue */
82
83 void *rq_buffer; /* Call XDR encode buffer */
84 size_t rq_callsize;
85 void *rq_rbuffer; /* Reply XDR decode buffer */
86 size_t rq_rcvsize;
87 size_t rq_xmit_bytes_sent; /* total bytes sent */
88 size_t rq_reply_bytes_recvd; /* total reply bytes */
89 /* received */
90
91 struct xdr_buf rq_private_buf; /* The receive buffer
92 * used in the softirq.
93 */
94 unsigned long rq_majortimeo; /* major timeout alarm */
95 unsigned long rq_minortimeo; /* minor timeout alarm */
96 unsigned long rq_timeout; /* Current timeout value */
97 ktime_t rq_rtt; /* round-trip time */
98 unsigned int rq_retries; /* # of retries */
99 unsigned int rq_connect_cookie;
100 /* A cookie used to track the
101 state of the transport
102 connection */
103 atomic_t rq_pin;
104
105 /*
106 * Partial send handling
107 */
108 u32 rq_bytes_sent; /* Bytes we have sent */
109
110 ktime_t rq_xtime; /* transmit time stamp */
111 int rq_ntrans;
112
113 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
114 struct lwq_node rq_bc_list; /* Callback service list */
115 unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
116 struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
117 #endif /* CONFIG_SUNRPC_BACKCHANEL */
118 };
119 #define rq_svec rq_snd_buf.head
120 #define rq_slen rq_snd_buf.len
121
122 /* RPC transport layer security policies */
123 enum xprtsec_policies {
124 RPC_XPRTSEC_NONE = 0,
125 RPC_XPRTSEC_TLS_ANON,
126 RPC_XPRTSEC_TLS_X509,
127 };
128
129 struct xprtsec_parms {
130 enum xprtsec_policies policy;
131
132 /* authentication material */
133 key_serial_t cert_serial;
134 key_serial_t privkey_serial;
135 };
136
137 struct rpc_xprt_ops {
138 void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
139 int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
140 void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
141 void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
142 void (*free_slot)(struct rpc_xprt *xprt,
143 struct rpc_rqst *req);
144 void (*rpcbind)(struct rpc_task *task);
145 void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
146 void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
147 int (*get_srcaddr)(struct rpc_xprt *xprt, char *buf,
148 size_t buflen);
149 unsigned short (*get_srcport)(struct rpc_xprt *xprt);
150 int (*buf_alloc)(struct rpc_task *task);
151 void (*buf_free)(struct rpc_task *task);
152 int (*prepare_request)(struct rpc_rqst *req,
153 struct xdr_buf *buf);
154 int (*send_request)(struct rpc_rqst *req);
155 void (*abort_send_request)(struct rpc_rqst *req);
156 void (*wait_for_reply_request)(struct rpc_task *task);
157 void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
158 void (*release_request)(struct rpc_task *task);
159 void (*close)(struct rpc_xprt *xprt);
160 void (*destroy)(struct rpc_xprt *xprt);
161 void (*set_connect_timeout)(struct rpc_xprt *xprt,
162 unsigned long connect_timeout,
163 unsigned long reconnect_timeout);
164 void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
165 int (*enable_swap)(struct rpc_xprt *xprt);
166 void (*disable_swap)(struct rpc_xprt *xprt);
167 void (*inject_disconnect)(struct rpc_xprt *xprt);
168 int (*bc_setup)(struct rpc_xprt *xprt,
169 unsigned int min_reqs);
170 size_t (*bc_maxpayload)(struct rpc_xprt *xprt);
171 unsigned int (*bc_num_slots)(struct rpc_xprt *xprt);
172 void (*bc_free_rqst)(struct rpc_rqst *rqst);
173 void (*bc_destroy)(struct rpc_xprt *xprt,
174 unsigned int max_reqs);
175 };
176
177 /*
178 * RPC transport identifiers
179 *
180 * To preserve compatibility with the historical use of raw IP protocol
181 * id's for transport selection, UDP and TCP identifiers are specified
182 * with the previous values. No such restriction exists for new transports,
183 * except that they may not collide with these values (17 and 6,
184 * respectively).
185 */
186 #define XPRT_TRANSPORT_BC (1 << 31)
187 enum xprt_transports {
188 XPRT_TRANSPORT_UDP = IPPROTO_UDP,
189 XPRT_TRANSPORT_TCP = IPPROTO_TCP,
190 XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC,
191 XPRT_TRANSPORT_RDMA = 256,
192 XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
193 XPRT_TRANSPORT_LOCAL = 257,
194 XPRT_TRANSPORT_TCP_TLS = 258,
195 };
196
197 struct rpc_sysfs_xprt;
198 struct rpc_xprt {
199 struct kref kref; /* Reference count */
200 const struct rpc_xprt_ops *ops; /* transport methods */
201 unsigned int id; /* transport id */
202
203 const struct rpc_timeout *timeout; /* timeout parms */
204 struct sockaddr_storage addr; /* server address */
205 size_t addrlen; /* size of server address */
206 int prot; /* IP protocol */
207
208 unsigned long cong; /* current congestion */
209 unsigned long cwnd; /* congestion window */
210
211 size_t max_payload; /* largest RPC payload size,
212 in bytes */
213
214 struct rpc_wait_queue binding; /* requests waiting on rpcbind */
215 struct rpc_wait_queue sending; /* requests waiting to send */
216 struct rpc_wait_queue pending; /* requests in flight */
217 struct rpc_wait_queue backlog; /* waiting for slot */
218 struct list_head free; /* free slots */
219 unsigned int max_reqs; /* max number of slots */
220 unsigned int min_reqs; /* min number of slots */
221 unsigned int num_reqs; /* total slots */
222 unsigned long state; /* transport state */
223 unsigned char resvport : 1, /* use a reserved port */
224 reuseport : 1; /* reuse port on reconnect */
225 atomic_t swapper; /* we're swapping over this
226 transport */
227 unsigned int bind_index; /* bind function index */
228
229 /*
230 * Multipath
231 */
232 struct list_head xprt_switch;
233
234 /*
235 * Connection of transports
236 */
237 unsigned long bind_timeout,
238 reestablish_timeout;
239 struct xprtsec_parms xprtsec;
240 unsigned int connect_cookie; /* A cookie that gets bumped
241 every time the transport
242 is reconnected */
243
244 /*
245 * Disconnection of idle transports
246 */
247 struct work_struct task_cleanup;
248 struct timer_list timer;
249 unsigned long last_used,
250 idle_timeout,
251 connect_timeout,
252 max_reconnect_timeout;
253
254 /*
255 * Send stuff
256 */
257 atomic_long_t queuelen;
258 spinlock_t transport_lock; /* lock transport info */
259 spinlock_t reserve_lock; /* lock slot table */
260 spinlock_t queue_lock; /* send/receive queue lock */
261 u32 xid; /* Next XID value to use */
262 struct rpc_task * snd_task; /* Task blocked in send */
263
264 struct list_head xmit_queue; /* Send queue */
265 atomic_long_t xmit_queuelen;
266
267 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
268 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
269 struct svc_serv *bc_serv; /* The RPC service which will */
270 /* process the callback */
271 unsigned int bc_alloc_max;
272 unsigned int bc_alloc_count; /* Total number of preallocs */
273 atomic_t bc_slot_count; /* Number of allocated slots */
274 spinlock_t bc_pa_lock; /* Protects the preallocated
275 * items */
276 struct list_head bc_pa_list; /* List of preallocated
277 * backchannel rpc_rqst's */
278 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
279
280 struct rb_root recv_queue; /* Receive queue */
281
282 struct {
283 unsigned long bind_count, /* total number of binds */
284 connect_count, /* total number of connects */
285 connect_start, /* connect start timestamp */
286 connect_time, /* jiffies waiting for connect */
287 sends, /* how many complete requests */
288 recvs, /* how many complete requests */
289 bad_xids, /* lookup_rqst didn't find XID */
290 max_slots; /* max rpc_slots used */
291
292 unsigned long long req_u, /* average requests on the wire */
293 bklog_u, /* backlog queue utilization */
294 sending_u, /* send q utilization */
295 pending_u; /* pend q utilization */
296 } stat;
297
298 struct net *xprt_net;
299 netns_tracker ns_tracker;
300 const char *servername;
301 const char *address_strings[RPC_DISPLAY_MAX];
302 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
303 struct dentry *debugfs; /* debugfs directory */
304 #endif
305 struct rcu_head rcu;
306 const struct xprt_class *xprt_class;
307 struct rpc_sysfs_xprt *xprt_sysfs;
308 bool main; /*mark if this is the 1st transport */
309 };
310
311 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
312 /*
313 * Backchannel flags
314 */
315 #define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */
316 /* buffer in use */
317 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
318
319 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
bc_prealloc(struct rpc_rqst * req)320 static inline int bc_prealloc(struct rpc_rqst *req)
321 {
322 return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
323 }
324 #else
bc_prealloc(struct rpc_rqst * req)325 static inline int bc_prealloc(struct rpc_rqst *req)
326 {
327 return 0;
328 }
329 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
330
331 #define XPRT_CREATE_INFINITE_SLOTS (1U)
332 #define XPRT_CREATE_NO_IDLE_TIMEOUT (1U << 1)
333
334 struct xprt_create {
335 int ident; /* XPRT_TRANSPORT identifier */
336 struct net * net;
337 struct sockaddr * srcaddr; /* optional local address */
338 struct sockaddr * dstaddr; /* remote peer address */
339 size_t addrlen;
340 const char *servername;
341 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
342 struct rpc_xprt_switch *bc_xps;
343 unsigned int flags;
344 struct xprtsec_parms xprtsec;
345 unsigned long connect_timeout;
346 unsigned long reconnect_timeout;
347 };
348
349 struct xprt_class {
350 struct list_head list;
351 int ident; /* XPRT_TRANSPORT identifier */
352 struct rpc_xprt * (*setup)(struct xprt_create *);
353 struct module *owner;
354 char name[32];
355 const char * netid[];
356 };
357
358 /*
359 * Generic internal transport functions
360 */
361 struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
362 void xprt_connect(struct rpc_task *task);
363 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt);
364 void xprt_reconnect_backoff(struct rpc_xprt *xprt,
365 unsigned long init_to);
366 void xprt_reserve(struct rpc_task *task);
367 void xprt_retry_reserve(struct rpc_task *task);
368 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
369 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
370 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
371 void xprt_free_slot(struct rpc_xprt *xprt,
372 struct rpc_rqst *req);
373 bool xprt_prepare_transmit(struct rpc_task *task);
374 void xprt_request_enqueue_transmit(struct rpc_task *task);
375 int xprt_request_enqueue_receive(struct rpc_task *task);
376 void xprt_request_wait_receive(struct rpc_task *task);
377 void xprt_request_dequeue_xprt(struct rpc_task *task);
378 bool xprt_request_need_retransmit(struct rpc_task *task);
379 void xprt_transmit(struct rpc_task *task);
380 void xprt_end_transmit(struct rpc_task *task);
381 int xprt_adjust_timeout(struct rpc_rqst *req);
382 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
383 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
384 void xprt_release(struct rpc_task *task);
385 struct rpc_xprt * xprt_get(struct rpc_xprt *xprt);
386 void xprt_put(struct rpc_xprt *xprt);
387 struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
388 unsigned int num_prealloc,
389 unsigned int max_req);
390 void xprt_free(struct rpc_xprt *);
391 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
392 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
393 void xprt_cleanup_ids(void);
394
395 static inline int
xprt_enable_swap(struct rpc_xprt * xprt)396 xprt_enable_swap(struct rpc_xprt *xprt)
397 {
398 return xprt->ops->enable_swap(xprt);
399 }
400
401 static inline void
xprt_disable_swap(struct rpc_xprt * xprt)402 xprt_disable_swap(struct rpc_xprt *xprt)
403 {
404 xprt->ops->disable_swap(xprt);
405 }
406
407 /*
408 * Transport switch helper functions
409 */
410 int xprt_register_transport(struct xprt_class *type);
411 int xprt_unregister_transport(struct xprt_class *type);
412 int xprt_find_transport_ident(const char *);
413 void xprt_wait_for_reply_request_def(struct rpc_task *task);
414 void xprt_wait_for_reply_request_rtt(struct rpc_task *task);
415 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
416 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
417 bool xprt_write_space(struct rpc_xprt *xprt);
418 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
419 struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
420 void xprt_update_rtt(struct rpc_task *task);
421 void xprt_complete_rqst(struct rpc_task *task, int copied);
422 void xprt_pin_rqst(struct rpc_rqst *req);
423 void xprt_unpin_rqst(struct rpc_rqst *req);
424 void xprt_release_rqst_cong(struct rpc_task *task);
425 bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
426 void xprt_disconnect_done(struct rpc_xprt *xprt);
427 void xprt_force_disconnect(struct rpc_xprt *xprt);
428 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
429
430 bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
431 void xprt_unlock_connect(struct rpc_xprt *, void *);
432 void xprt_release_write(struct rpc_xprt *, struct rpc_task *);
433
434 /*
435 * Reserved bit positions in xprt->state
436 */
437 #define XPRT_LOCKED (0)
438 #define XPRT_CONNECTED (1)
439 #define XPRT_CONNECTING (2)
440 #define XPRT_CLOSE_WAIT (3)
441 #define XPRT_BOUND (4)
442 #define XPRT_BINDING (5)
443 #define XPRT_CLOSING (6)
444 #define XPRT_OFFLINE (7)
445 #define XPRT_REMOVE (8)
446 #define XPRT_CONGESTED (9)
447 #define XPRT_CWND_WAIT (10)
448 #define XPRT_WRITE_SPACE (11)
449 #define XPRT_SND_IS_COOKIE (12)
450
xprt_set_connected(struct rpc_xprt * xprt)451 static inline void xprt_set_connected(struct rpc_xprt *xprt)
452 {
453 set_bit(XPRT_CONNECTED, &xprt->state);
454 }
455
xprt_clear_connected(struct rpc_xprt * xprt)456 static inline void xprt_clear_connected(struct rpc_xprt *xprt)
457 {
458 clear_bit(XPRT_CONNECTED, &xprt->state);
459 }
460
xprt_connected(struct rpc_xprt * xprt)461 static inline int xprt_connected(struct rpc_xprt *xprt)
462 {
463 return test_bit(XPRT_CONNECTED, &xprt->state);
464 }
465
xprt_test_and_set_connected(struct rpc_xprt * xprt)466 static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
467 {
468 return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
469 }
470
xprt_test_and_clear_connected(struct rpc_xprt * xprt)471 static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
472 {
473 return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
474 }
475
xprt_clear_connecting(struct rpc_xprt * xprt)476 static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
477 {
478 smp_mb__before_atomic();
479 clear_bit(XPRT_CONNECTING, &xprt->state);
480 smp_mb__after_atomic();
481 }
482
xprt_connecting(struct rpc_xprt * xprt)483 static inline int xprt_connecting(struct rpc_xprt *xprt)
484 {
485 return test_bit(XPRT_CONNECTING, &xprt->state);
486 }
487
xprt_test_and_set_connecting(struct rpc_xprt * xprt)488 static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
489 {
490 return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
491 }
492
xprt_set_bound(struct rpc_xprt * xprt)493 static inline void xprt_set_bound(struct rpc_xprt *xprt)
494 {
495 test_and_set_bit(XPRT_BOUND, &xprt->state);
496 }
497
xprt_bound(struct rpc_xprt * xprt)498 static inline int xprt_bound(struct rpc_xprt *xprt)
499 {
500 return test_bit(XPRT_BOUND, &xprt->state);
501 }
502
xprt_clear_bound(struct rpc_xprt * xprt)503 static inline void xprt_clear_bound(struct rpc_xprt *xprt)
504 {
505 clear_bit(XPRT_BOUND, &xprt->state);
506 }
507
xprt_clear_binding(struct rpc_xprt * xprt)508 static inline void xprt_clear_binding(struct rpc_xprt *xprt)
509 {
510 smp_mb__before_atomic();
511 clear_bit(XPRT_BINDING, &xprt->state);
512 smp_mb__after_atomic();
513 }
514
xprt_test_and_set_binding(struct rpc_xprt * xprt)515 static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
516 {
517 return test_and_set_bit(XPRT_BINDING, &xprt->state);
518 }
519
520 void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
521 void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
522 void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
523 #endif /* _LINUX_SUNRPC_XPRT_H */
524