xref: /linux/net/rds/rds.h (revision 0408c58be5a475c99b271f08d85859f7b59ec767)
1 #ifndef _RDS_RDS_H
2 #define _RDS_RDS_H
3 
4 #include <net/sock.h>
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
9 #include <linux/rds.h>
10 #include <linux/rhashtable.h>
11 
12 #include "info.h"
13 
14 /*
15  * RDS Network protocol version
16  */
17 #define RDS_PROTOCOL_3_0	0x0300
18 #define RDS_PROTOCOL_3_1	0x0301
19 #define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
20 #define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
21 #define RDS_PROTOCOL_MINOR(v)	((v) & 255)
22 #define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
23 
24 /*
25  * XXX randomly chosen, but at least seems to be unused:
26  * #               18464-18768 Unassigned
27  * We should do better.  We want a reserved port to discourage unpriv'ed
28  * userspace from listening.
29  */
30 #define RDS_PORT	18634
31 
32 #ifdef ATOMIC64_INIT
33 #define KERNEL_HAS_ATOMIC64
34 #endif
35 
36 #ifdef RDS_DEBUG
37 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38 #else
39 /* sigh, pr_debug() causes unused variable warnings */
40 static inline __printf(1, 2)
41 void rdsdebug(char *fmt, ...)
42 {
43 }
44 #endif
45 
46 /* XXX is there one of these somewhere? */
47 #define ceil(x, y) \
48 	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49 
50 #define RDS_FRAG_SHIFT	12
51 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
52 
53 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
54 #define RDS_MAX_MSG_SIZE	((unsigned int)(1 << 20))
55 
56 #define RDS_CONG_MAP_BYTES	(65536 / 8)
57 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
58 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
59 
60 struct rds_cong_map {
61 	struct rb_node		m_rb_node;
62 	__be32			m_addr;
63 	wait_queue_head_t	m_waitq;
64 	struct list_head	m_conn_list;
65 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
66 };
67 
68 
69 /*
70  * This is how we will track the connection state:
71  * A connection is always in one of the following
72  * states. Updates to the state are atomic and imply
73  * a memory barrier.
74  */
75 enum {
76 	RDS_CONN_DOWN = 0,
77 	RDS_CONN_CONNECTING,
78 	RDS_CONN_DISCONNECTING,
79 	RDS_CONN_UP,
80 	RDS_CONN_RESETTING,
81 	RDS_CONN_ERROR,
82 };
83 
84 /* Bits for c_flags */
85 #define RDS_LL_SEND_FULL	0
86 #define RDS_RECONNECT_PENDING	1
87 #define RDS_IN_XMIT		2
88 #define RDS_RECV_REFILL		3
89 
90 /* Max number of multipaths per RDS connection. Must be a power of 2 */
91 #define	RDS_MPATH_WORKERS	8
92 #define	RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
93 			       (rs)->rs_hash_initval) & ((n) - 1))
94 
95 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
96 
97 /* Per mpath connection state */
98 struct rds_conn_path {
99 	struct rds_connection	*cp_conn;
100 	struct rds_message	*cp_xmit_rm;
101 	unsigned long		cp_xmit_sg;
102 	unsigned int		cp_xmit_hdr_off;
103 	unsigned int		cp_xmit_data_off;
104 	unsigned int		cp_xmit_atomic_sent;
105 	unsigned int		cp_xmit_rdma_sent;
106 	unsigned int		cp_xmit_data_sent;
107 
108 	spinlock_t		cp_lock;		/* protect msg queues */
109 	u64			cp_next_tx_seq;
110 	struct list_head	cp_send_queue;
111 	struct list_head	cp_retrans;
112 
113 	u64			cp_next_rx_seq;
114 
115 	void			*cp_transport_data;
116 
117 	atomic_t		cp_state;
118 	unsigned long		cp_send_gen;
119 	unsigned long		cp_flags;
120 	unsigned long		cp_reconnect_jiffies;
121 	struct delayed_work	cp_send_w;
122 	struct delayed_work	cp_recv_w;
123 	struct delayed_work	cp_conn_w;
124 	struct work_struct	cp_down_w;
125 	struct mutex		cp_cm_lock;	/* protect cp_state & cm */
126 	wait_queue_head_t	cp_waitq;
127 
128 	unsigned int		cp_unacked_packets;
129 	unsigned int		cp_unacked_bytes;
130 	unsigned int		cp_index;
131 };
132 
133 /* One rds_connection per RDS address pair */
134 struct rds_connection {
135 	struct hlist_node	c_hash_node;
136 	__be32			c_laddr;
137 	__be32			c_faddr;
138 	unsigned int		c_loopback:1,
139 				c_ping_triggered:1,
140 				c_destroy_in_prog:1,
141 				c_pad_to_32:29;
142 	int			c_npaths;
143 	struct rds_connection	*c_passive;
144 	struct rds_transport	*c_trans;
145 
146 	struct rds_cong_map	*c_lcong;
147 	struct rds_cong_map	*c_fcong;
148 
149 	/* Protocol version */
150 	unsigned int		c_version;
151 	struct net		*c_net;
152 
153 	struct list_head	c_map_item;
154 	unsigned long		c_map_queued;
155 
156 	struct rds_conn_path	c_path[RDS_MPATH_WORKERS];
157 	wait_queue_head_t	c_hs_waitq; /* handshake waitq */
158 
159 	u32			c_my_gen_num;
160 	u32			c_peer_gen_num;
161 };
162 
163 static inline
164 struct net *rds_conn_net(struct rds_connection *conn)
165 {
166 	return conn->c_net;
167 }
168 
169 static inline
170 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
171 {
172 	conn->c_net = get_net(net);
173 }
174 
175 #define RDS_FLAG_CONG_BITMAP	0x01
176 #define RDS_FLAG_ACK_REQUIRED	0x02
177 #define RDS_FLAG_RETRANSMITTED	0x04
178 #define RDS_MAX_ADV_CREDIT	255
179 
180 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
181  * probe to exchange control information before establishing a connection.
182  * Currently the control information that is exchanged is the number of
183  * supported paths. If the peer is a legacy (older kernel revision) peer,
184  * it would return a pong message without additional control information
185  * that would then alert the sender that the peer was an older rev.
186  */
187 #define RDS_FLAG_PROBE_PORT	1
188 #define	RDS_HS_PROBE(sport, dport) \
189 		((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
190 		 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
191 /*
192  * Maximum space available for extension headers.
193  */
194 #define RDS_HEADER_EXT_SPACE	16
195 
196 struct rds_header {
197 	__be64	h_sequence;
198 	__be64	h_ack;
199 	__be32	h_len;
200 	__be16	h_sport;
201 	__be16	h_dport;
202 	u8	h_flags;
203 	u8	h_credit;
204 	u8	h_padding[4];
205 	__sum16	h_csum;
206 
207 	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
208 };
209 
210 /*
211  * Reserved - indicates end of extensions
212  */
213 #define RDS_EXTHDR_NONE		0
214 
215 /*
216  * This extension header is included in the very
217  * first message that is sent on a new connection,
218  * and identifies the protocol level. This will help
219  * rolling updates if a future change requires breaking
220  * the protocol.
221  * NB: This is no longer true for IB, where we do a version
222  * negotiation during the connection setup phase (protocol
223  * version information is included in the RDMA CM private data).
224  */
225 #define RDS_EXTHDR_VERSION	1
226 struct rds_ext_header_version {
227 	__be32			h_version;
228 };
229 
230 /*
231  * This extension header is included in the RDS message
232  * chasing an RDMA operation.
233  */
234 #define RDS_EXTHDR_RDMA		2
235 struct rds_ext_header_rdma {
236 	__be32			h_rdma_rkey;
237 };
238 
239 /*
240  * This extension header tells the peer about the
241  * destination <R_Key,offset> of the requested RDMA
242  * operation.
243  */
244 #define RDS_EXTHDR_RDMA_DEST	3
245 struct rds_ext_header_rdma_dest {
246 	__be32			h_rdma_rkey;
247 	__be32			h_rdma_offset;
248 };
249 
250 /* Extension header announcing number of paths.
251  * Implicit length = 2 bytes.
252  */
253 #define RDS_EXTHDR_NPATHS	5
254 #define RDS_EXTHDR_GEN_NUM	6
255 
256 #define __RDS_EXTHDR_MAX	16 /* for now */
257 #define RDS_RX_MAX_TRACES	(RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
258 #define	RDS_MSG_RX_HDR		0
259 #define	RDS_MSG_RX_START	1
260 #define	RDS_MSG_RX_END		2
261 #define	RDS_MSG_RX_CMSG		3
262 
263 struct rds_incoming {
264 	atomic_t		i_refcount;
265 	struct list_head	i_item;
266 	struct rds_connection	*i_conn;
267 	struct rds_conn_path	*i_conn_path;
268 	struct rds_header	i_hdr;
269 	unsigned long		i_rx_jiffies;
270 	__be32			i_saddr;
271 
272 	rds_rdma_cookie_t	i_rdma_cookie;
273 	struct timeval		i_rx_tstamp;
274 	u64			i_rx_lat_trace[RDS_RX_MAX_TRACES];
275 };
276 
277 struct rds_mr {
278 	struct rb_node		r_rb_node;
279 	atomic_t		r_refcount;
280 	u32			r_key;
281 
282 	/* A copy of the creation flags */
283 	unsigned int		r_use_once:1;
284 	unsigned int		r_invalidate:1;
285 	unsigned int		r_write:1;
286 
287 	/* This is for RDS_MR_DEAD.
288 	 * It would be nice & consistent to make this part of the above
289 	 * bit field here, but we need to use test_and_set_bit.
290 	 */
291 	unsigned long		r_state;
292 	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
293 	struct rds_transport	*r_trans;
294 	void			*r_trans_private;
295 };
296 
297 /* Flags for mr->r_state */
298 #define RDS_MR_DEAD		0
299 
300 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
301 {
302 	return r_key | (((u64) offset) << 32);
303 }
304 
305 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
306 {
307 	return cookie;
308 }
309 
310 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
311 {
312 	return cookie >> 32;
313 }
314 
315 /* atomic operation types */
316 #define RDS_ATOMIC_TYPE_CSWP		0
317 #define RDS_ATOMIC_TYPE_FADD		1
318 
319 /*
320  * m_sock_item and m_conn_item are on lists that are serialized under
321  * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
322  * the message will not be put back on the retransmit list after being sent.
323  * messages that are canceled while being sent rely on this.
324  *
325  * m_inc is used by loopback so that it can pass an incoming message straight
326  * back up into the rx path.  It embeds a wire header which is also used by
327  * the send path, which is kind of awkward.
328  *
329  * m_sock_item indicates the message's presence on a socket's send or receive
330  * queue.  m_rs will point to that socket.
331  *
332  * m_daddr is used by cancellation to prune messages to a given destination.
333  *
334  * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
335  * nesting.  As paths iterate over messages on a sock, or conn, they must
336  * also lock the conn, or sock, to remove the message from those lists too.
337  * Testing the flag to determine if the message is still on the lists lets
338  * us avoid testing the list_head directly.  That means each path can use
339  * the message's list_head to keep it on a local list while juggling locks
340  * without confusing the other path.
341  *
342  * m_ack_seq is an optional field set by transports who need a different
343  * sequence number range to invalidate.  They can use this in a callback
344  * that they pass to rds_send_drop_acked() to see if each message has been
345  * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
346  * had ack_seq set yet.
347  */
348 #define RDS_MSG_ON_SOCK		1
349 #define RDS_MSG_ON_CONN		2
350 #define RDS_MSG_HAS_ACK_SEQ	3
351 #define RDS_MSG_ACK_REQUIRED	4
352 #define RDS_MSG_RETRANSMITTED	5
353 #define RDS_MSG_MAPPED		6
354 #define RDS_MSG_PAGEVEC		7
355 #define RDS_MSG_FLUSH		8
356 
357 struct rds_message {
358 	atomic_t		m_refcount;
359 	struct list_head	m_sock_item;
360 	struct list_head	m_conn_item;
361 	struct rds_incoming	m_inc;
362 	u64			m_ack_seq;
363 	__be32			m_daddr;
364 	unsigned long		m_flags;
365 
366 	/* Never access m_rs without holding m_rs_lock.
367 	 * Lock nesting is
368 	 *  rm->m_rs_lock
369 	 *   -> rs->rs_lock
370 	 */
371 	spinlock_t		m_rs_lock;
372 	wait_queue_head_t	m_flush_wait;
373 
374 	struct rds_sock		*m_rs;
375 
376 	/* cookie to send to remote, in rds header */
377 	rds_rdma_cookie_t	m_rdma_cookie;
378 
379 	unsigned int		m_used_sgs;
380 	unsigned int		m_total_sgs;
381 
382 	void			*m_final_op;
383 
384 	struct {
385 		struct rm_atomic_op {
386 			int			op_type;
387 			union {
388 				struct {
389 					uint64_t	compare;
390 					uint64_t	swap;
391 					uint64_t	compare_mask;
392 					uint64_t	swap_mask;
393 				} op_m_cswp;
394 				struct {
395 					uint64_t	add;
396 					uint64_t	nocarry_mask;
397 				} op_m_fadd;
398 			};
399 
400 			u32			op_rkey;
401 			u64			op_remote_addr;
402 			unsigned int		op_notify:1;
403 			unsigned int		op_recverr:1;
404 			unsigned int		op_mapped:1;
405 			unsigned int		op_silent:1;
406 			unsigned int		op_active:1;
407 			struct scatterlist	*op_sg;
408 			struct rds_notifier	*op_notifier;
409 
410 			struct rds_mr		*op_rdma_mr;
411 		} atomic;
412 		struct rm_rdma_op {
413 			u32			op_rkey;
414 			u64			op_remote_addr;
415 			unsigned int		op_write:1;
416 			unsigned int		op_fence:1;
417 			unsigned int		op_notify:1;
418 			unsigned int		op_recverr:1;
419 			unsigned int		op_mapped:1;
420 			unsigned int		op_silent:1;
421 			unsigned int		op_active:1;
422 			unsigned int		op_bytes;
423 			unsigned int		op_nents;
424 			unsigned int		op_count;
425 			struct scatterlist	*op_sg;
426 			struct rds_notifier	*op_notifier;
427 
428 			struct rds_mr		*op_rdma_mr;
429 		} rdma;
430 		struct rm_data_op {
431 			unsigned int		op_active:1;
432 			unsigned int		op_notify:1;
433 			unsigned int		op_nents;
434 			unsigned int		op_count;
435 			unsigned int		op_dmasg;
436 			unsigned int		op_dmaoff;
437 			struct scatterlist	*op_sg;
438 		} data;
439 	};
440 };
441 
442 /*
443  * The RDS notifier is used (optionally) to tell the application about
444  * completed RDMA operations. Rather than keeping the whole rds message
445  * around on the queue, we allocate a small notifier that is put on the
446  * socket's notifier_list. Notifications are delivered to the application
447  * through control messages.
448  */
449 struct rds_notifier {
450 	struct list_head	n_list;
451 	uint64_t		n_user_token;
452 	int			n_status;
453 };
454 
455 /**
456  * struct rds_transport -  transport specific behavioural hooks
457  *
458  * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
459  *        part of a message.  The caller serializes on the send_sem so this
460  *        doesn't need to be reentrant for a given conn.  The header must be
461  *        sent before the data payload.  .xmit must be prepared to send a
462  *        message with no data payload.  .xmit should return the number of
463  *        bytes that were sent down the connection, including header bytes.
464  *        Returning 0 tells the caller that it doesn't need to perform any
465  *        additional work now.  This is usually the case when the transport has
466  *        filled the sending queue for its connection and will handle
467  *        triggering the rds thread to continue the send when space becomes
468  *        available.  Returning -EAGAIN tells the caller to retry the send
469  *        immediately.  Returning -ENOMEM tells the caller to retry the send at
470  *        some point in the future.
471  *
472  * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
473  *                 it returns the connection can not call rds_recv_incoming().
474  *                 This will only be called once after conn_connect returns
475  *                 non-zero success and will The caller serializes this with
476  *                 the send and connecting paths (xmit_* and conn_*).  The
477  *                 transport is responsible for other serialization, including
478  *                 rds_recv_incoming().  This is called in process context but
479  *                 should try hard not to block.
480  */
481 
482 struct rds_transport {
483 	char			t_name[TRANSNAMSIZ];
484 	struct list_head	t_item;
485 	struct module		*t_owner;
486 	unsigned int		t_prefer_loopback:1,
487 				t_mp_capable:1;
488 	unsigned int		t_type;
489 
490 	int (*laddr_check)(struct net *net, __be32 addr);
491 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
492 	void (*conn_free)(void *data);
493 	int (*conn_path_connect)(struct rds_conn_path *cp);
494 	void (*conn_path_shutdown)(struct rds_conn_path *conn);
495 	void (*xmit_path_prepare)(struct rds_conn_path *cp);
496 	void (*xmit_path_complete)(struct rds_conn_path *cp);
497 	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
498 		    unsigned int hdr_off, unsigned int sg, unsigned int off);
499 	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
500 	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
501 	int (*recv_path)(struct rds_conn_path *cp);
502 	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
503 	void (*inc_free)(struct rds_incoming *inc);
504 
505 	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
506 				 struct rdma_cm_event *event);
507 	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
508 	void (*cm_connect_complete)(struct rds_connection *conn,
509 				    struct rdma_cm_event *event);
510 
511 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
512 					unsigned int avail);
513 	void (*exit)(void);
514 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
515 			struct rds_sock *rs, u32 *key_ret);
516 	void (*sync_mr)(void *trans_private, int direction);
517 	void (*free_mr)(void *trans_private, int invalidate);
518 	void (*flush_mrs)(void);
519 };
520 
521 struct rds_sock {
522 	struct sock		rs_sk;
523 
524 	u64			rs_user_addr;
525 	u64			rs_user_bytes;
526 
527 	/*
528 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
529 	 * support.
530 	 */
531 	struct rhash_head	rs_bound_node;
532 	u64			rs_bound_key;
533 	__be32			rs_bound_addr;
534 	__be32			rs_conn_addr;
535 	__be16			rs_bound_port;
536 	__be16			rs_conn_port;
537 	struct rds_transport    *rs_transport;
538 
539 	/*
540 	 * rds_sendmsg caches the conn it used the last time around.
541 	 * This helps avoid costly lookups.
542 	 */
543 	struct rds_connection	*rs_conn;
544 
545 	/* flag indicating we were congested or not */
546 	int			rs_congested;
547 	/* seen congestion (ENOBUFS) when sending? */
548 	int			rs_seen_congestion;
549 
550 	/* rs_lock protects all these adjacent members before the newline */
551 	spinlock_t		rs_lock;
552 	struct list_head	rs_send_queue;
553 	u32			rs_snd_bytes;
554 	int			rs_rcv_bytes;
555 	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
556 
557 	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
558 	 * to decide whether the application should be woken up.
559 	 * If not set, we use rs_cong_track to find out whether a cong map
560 	 * update arrived.
561 	 */
562 	uint64_t		rs_cong_mask;
563 	uint64_t		rs_cong_notify;
564 	struct list_head	rs_cong_list;
565 	unsigned long		rs_cong_track;
566 
567 	/*
568 	 * rs_recv_lock protects the receive queue, and is
569 	 * used to serialize with rds_release.
570 	 */
571 	rwlock_t		rs_recv_lock;
572 	struct list_head	rs_recv_queue;
573 
574 	/* just for stats reporting */
575 	struct list_head	rs_item;
576 
577 	/* these have their own lock */
578 	spinlock_t		rs_rdma_lock;
579 	struct rb_root		rs_rdma_keys;
580 
581 	/* Socket options - in case there will be more */
582 	unsigned char		rs_recverr,
583 				rs_cong_monitor;
584 	u32			rs_hash_initval;
585 
586 	/* Socket receive path trace points*/
587 	u8			rs_rx_traces;
588 	u8			rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
589 };
590 
591 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
592 {
593 	return container_of(sk, struct rds_sock, rs_sk);
594 }
595 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
596 {
597 	return &rs->rs_sk;
598 }
599 
600 /*
601  * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
602  * to account for overhead.  We don't account for overhead, we just apply
603  * the number of payload bytes to the specified value.
604  */
605 static inline int rds_sk_sndbuf(struct rds_sock *rs)
606 {
607 	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
608 }
609 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
610 {
611 	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
612 }
613 
614 struct rds_statistics {
615 	uint64_t	s_conn_reset;
616 	uint64_t	s_recv_drop_bad_checksum;
617 	uint64_t	s_recv_drop_old_seq;
618 	uint64_t	s_recv_drop_no_sock;
619 	uint64_t	s_recv_drop_dead_sock;
620 	uint64_t	s_recv_deliver_raced;
621 	uint64_t	s_recv_delivered;
622 	uint64_t	s_recv_queued;
623 	uint64_t	s_recv_immediate_retry;
624 	uint64_t	s_recv_delayed_retry;
625 	uint64_t	s_recv_ack_required;
626 	uint64_t	s_recv_rdma_bytes;
627 	uint64_t	s_recv_ping;
628 	uint64_t	s_send_queue_empty;
629 	uint64_t	s_send_queue_full;
630 	uint64_t	s_send_lock_contention;
631 	uint64_t	s_send_lock_queue_raced;
632 	uint64_t	s_send_immediate_retry;
633 	uint64_t	s_send_delayed_retry;
634 	uint64_t	s_send_drop_acked;
635 	uint64_t	s_send_ack_required;
636 	uint64_t	s_send_queued;
637 	uint64_t	s_send_rdma;
638 	uint64_t	s_send_rdma_bytes;
639 	uint64_t	s_send_pong;
640 	uint64_t	s_page_remainder_hit;
641 	uint64_t	s_page_remainder_miss;
642 	uint64_t	s_copy_to_user;
643 	uint64_t	s_copy_from_user;
644 	uint64_t	s_cong_update_queued;
645 	uint64_t	s_cong_update_received;
646 	uint64_t	s_cong_send_error;
647 	uint64_t	s_cong_send_blocked;
648 	uint64_t	s_recv_bytes_added_to_socket;
649 	uint64_t	s_recv_bytes_removed_from_socket;
650 
651 };
652 
653 /* af_rds.c */
654 void rds_sock_addref(struct rds_sock *rs);
655 void rds_sock_put(struct rds_sock *rs);
656 void rds_wake_sk_sleep(struct rds_sock *rs);
657 static inline void __rds_wake_sk_sleep(struct sock *sk)
658 {
659 	wait_queue_head_t *waitq = sk_sleep(sk);
660 
661 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
662 		wake_up(waitq);
663 }
664 extern wait_queue_head_t rds_poll_waitq;
665 
666 
667 /* bind.c */
668 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
669 void rds_remove_bound(struct rds_sock *rs);
670 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
671 int rds_bind_lock_init(void);
672 void rds_bind_lock_destroy(void);
673 
674 /* cong.c */
675 int rds_cong_get_maps(struct rds_connection *conn);
676 void rds_cong_add_conn(struct rds_connection *conn);
677 void rds_cong_remove_conn(struct rds_connection *conn);
678 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
679 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
680 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
681 void rds_cong_queue_updates(struct rds_cong_map *map);
682 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
683 int rds_cong_updated_since(unsigned long *recent);
684 void rds_cong_add_socket(struct rds_sock *);
685 void rds_cong_remove_socket(struct rds_sock *);
686 void rds_cong_exit(void);
687 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
688 
689 /* conn.c */
690 extern u32 rds_gen_num;
691 int rds_conn_init(void);
692 void rds_conn_exit(void);
693 struct rds_connection *rds_conn_create(struct net *net,
694 				       __be32 laddr, __be32 faddr,
695 				       struct rds_transport *trans, gfp_t gfp);
696 struct rds_connection *rds_conn_create_outgoing(struct net *net,
697 						__be32 laddr, __be32 faddr,
698 			       struct rds_transport *trans, gfp_t gfp);
699 void rds_conn_shutdown(struct rds_conn_path *cpath);
700 void rds_conn_destroy(struct rds_connection *conn);
701 void rds_conn_drop(struct rds_connection *conn);
702 void rds_conn_path_drop(struct rds_conn_path *cpath);
703 void rds_conn_connect_if_down(struct rds_connection *conn);
704 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
705 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
706 			  struct rds_info_iterator *iter,
707 			  struct rds_info_lengths *lens,
708 			  int (*visitor)(struct rds_connection *, void *),
709 			  size_t item_len);
710 
711 __printf(2, 3)
712 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
713 #define rds_conn_path_error(cp, fmt...) \
714 	__rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
715 
716 static inline int
717 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
718 {
719 	return atomic_cmpxchg(&cp->cp_state, old, new) == old;
720 }
721 
722 static inline int
723 rds_conn_transition(struct rds_connection *conn, int old, int new)
724 {
725 	WARN_ON(conn->c_trans->t_mp_capable);
726 	return rds_conn_path_transition(&conn->c_path[0], old, new);
727 }
728 
729 static inline int
730 rds_conn_path_state(struct rds_conn_path *cp)
731 {
732 	return atomic_read(&cp->cp_state);
733 }
734 
735 static inline int
736 rds_conn_state(struct rds_connection *conn)
737 {
738 	WARN_ON(conn->c_trans->t_mp_capable);
739 	return rds_conn_path_state(&conn->c_path[0]);
740 }
741 
742 static inline int
743 rds_conn_path_up(struct rds_conn_path *cp)
744 {
745 	return atomic_read(&cp->cp_state) == RDS_CONN_UP;
746 }
747 
748 static inline int
749 rds_conn_up(struct rds_connection *conn)
750 {
751 	WARN_ON(conn->c_trans->t_mp_capable);
752 	return rds_conn_path_up(&conn->c_path[0]);
753 }
754 
755 static inline int
756 rds_conn_path_connecting(struct rds_conn_path *cp)
757 {
758 	return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
759 }
760 
761 static inline int
762 rds_conn_connecting(struct rds_connection *conn)
763 {
764 	WARN_ON(conn->c_trans->t_mp_capable);
765 	return rds_conn_path_connecting(&conn->c_path[0]);
766 }
767 
768 /* message.c */
769 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
770 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
771 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
772 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
773 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
774 				 __be16 dport, u64 seq);
775 int rds_message_add_extension(struct rds_header *hdr,
776 			      unsigned int type, const void *data, unsigned int len);
777 int rds_message_next_extension(struct rds_header *hdr,
778 			       unsigned int *pos, void *buf, unsigned int *buflen);
779 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
780 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
781 void rds_message_inc_free(struct rds_incoming *inc);
782 void rds_message_addref(struct rds_message *rm);
783 void rds_message_put(struct rds_message *rm);
784 void rds_message_wait(struct rds_message *rm);
785 void rds_message_unmapped(struct rds_message *rm);
786 
787 static inline void rds_message_make_checksum(struct rds_header *hdr)
788 {
789 	hdr->h_csum = 0;
790 	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
791 }
792 
793 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
794 {
795 	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
796 }
797 
798 
799 /* page.c */
800 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
801 			     gfp_t gfp);
802 void rds_page_exit(void);
803 
804 /* recv.c */
805 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
806 		  __be32 saddr);
807 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
808 		       __be32 saddr);
809 void rds_inc_put(struct rds_incoming *inc);
810 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
811 		       struct rds_incoming *inc, gfp_t gfp);
812 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
813 		int msg_flags);
814 void rds_clear_recv_queue(struct rds_sock *rs);
815 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
816 void rds_inc_info_copy(struct rds_incoming *inc,
817 		       struct rds_info_iterator *iter,
818 		       __be32 saddr, __be32 daddr, int flip);
819 
820 /* send.c */
821 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
822 void rds_send_path_reset(struct rds_conn_path *conn);
823 int rds_send_xmit(struct rds_conn_path *cp);
824 struct sockaddr_in;
825 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
826 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
827 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
828 			 is_acked_func is_acked);
829 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
830 			      is_acked_func is_acked);
831 void rds_send_ping(struct rds_connection *conn, int cp_index);
832 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
833 
834 /* rdma.c */
835 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
836 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
837 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
838 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
839 void rds_rdma_drop_keys(struct rds_sock *rs);
840 int rds_rdma_extra_size(struct rds_rdma_args *args);
841 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
842 			  struct cmsghdr *cmsg);
843 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
844 			  struct cmsghdr *cmsg);
845 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
846 			  struct cmsghdr *cmsg);
847 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
848 			  struct cmsghdr *cmsg);
849 void rds_rdma_free_op(struct rm_rdma_op *ro);
850 void rds_atomic_free_op(struct rm_atomic_op *ao);
851 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
852 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
853 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
854 		    struct cmsghdr *cmsg);
855 
856 void __rds_put_mr_final(struct rds_mr *mr);
857 static inline void rds_mr_put(struct rds_mr *mr)
858 {
859 	if (atomic_dec_and_test(&mr->r_refcount))
860 		__rds_put_mr_final(mr);
861 }
862 
863 /* stats.c */
864 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
865 #define rds_stats_inc_which(which, member) do {		\
866 	per_cpu(which, get_cpu()).member++;		\
867 	put_cpu();					\
868 } while (0)
869 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
870 #define rds_stats_add_which(which, member, count) do {		\
871 	per_cpu(which, get_cpu()).member += count;	\
872 	put_cpu();					\
873 } while (0)
874 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
875 int rds_stats_init(void);
876 void rds_stats_exit(void);
877 void rds_stats_info_copy(struct rds_info_iterator *iter,
878 			 uint64_t *values, const char *const *names,
879 			 size_t nr);
880 
881 /* sysctl.c */
882 int rds_sysctl_init(void);
883 void rds_sysctl_exit(void);
884 extern unsigned long rds_sysctl_sndbuf_min;
885 extern unsigned long rds_sysctl_sndbuf_default;
886 extern unsigned long rds_sysctl_sndbuf_max;
887 extern unsigned long rds_sysctl_reconnect_min_jiffies;
888 extern unsigned long rds_sysctl_reconnect_max_jiffies;
889 extern unsigned int  rds_sysctl_max_unacked_packets;
890 extern unsigned int  rds_sysctl_max_unacked_bytes;
891 extern unsigned int  rds_sysctl_ping_enable;
892 extern unsigned long rds_sysctl_trace_flags;
893 extern unsigned int  rds_sysctl_trace_level;
894 
895 /* threads.c */
896 int rds_threads_init(void);
897 void rds_threads_exit(void);
898 extern struct workqueue_struct *rds_wq;
899 void rds_queue_reconnect(struct rds_conn_path *cp);
900 void rds_connect_worker(struct work_struct *);
901 void rds_shutdown_worker(struct work_struct *);
902 void rds_send_worker(struct work_struct *);
903 void rds_recv_worker(struct work_struct *);
904 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
905 void rds_connect_complete(struct rds_connection *conn);
906 
907 /* transport.c */
908 void rds_trans_register(struct rds_transport *trans);
909 void rds_trans_unregister(struct rds_transport *trans);
910 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
911 void rds_trans_put(struct rds_transport *trans);
912 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
913 				       unsigned int avail);
914 struct rds_transport *rds_trans_get(int t_type);
915 int rds_trans_init(void);
916 void rds_trans_exit(void);
917 
918 #endif
919