xref: /linux/net/rds/rds.h (revision d96fc832bcb6269d96e33d506f33033d7ed08598)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _RDS_RDS_H
3 #define _RDS_RDS_H
4 
5 #include <net/sock.h>
6 #include <linux/scatterlist.h>
7 #include <linux/highmem.h>
8 #include <rdma/rdma_cm.h>
9 #include <linux/mutex.h>
10 #include <linux/rds.h>
11 #include <linux/rhashtable.h>
12 #include <linux/refcount.h>
13 
14 #include "info.h"
15 
16 /*
17  * RDS Network protocol version
18  */
19 #define RDS_PROTOCOL_3_0	0x0300
20 #define RDS_PROTOCOL_3_1	0x0301
21 #define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
22 #define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
23 #define RDS_PROTOCOL_MINOR(v)	((v) & 255)
24 #define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
25 
26 /*
27  * XXX randomly chosen, but at least seems to be unused:
28  * #               18464-18768 Unassigned
29  * We should do better.  We want a reserved port to discourage unpriv'ed
30  * userspace from listening.
31  */
32 #define RDS_PORT	18634
33 
34 #ifdef ATOMIC64_INIT
35 #define KERNEL_HAS_ATOMIC64
36 #endif
37 
38 #ifdef RDS_DEBUG
39 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
40 #else
41 /* sigh, pr_debug() causes unused variable warnings */
42 static inline __printf(1, 2)
43 void rdsdebug(char *fmt, ...)
44 {
45 }
46 #endif
47 
48 /* XXX is there one of these somewhere? */
49 #define ceil(x, y) \
50 	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
51 
52 #define RDS_FRAG_SHIFT	12
53 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
54 
55 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
56 #define RDS_MAX_MSG_SIZE	((unsigned int)(1 << 20))
57 
58 #define RDS_CONG_MAP_BYTES	(65536 / 8)
59 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
60 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
61 
62 struct rds_cong_map {
63 	struct rb_node		m_rb_node;
64 	__be32			m_addr;
65 	wait_queue_head_t	m_waitq;
66 	struct list_head	m_conn_list;
67 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
68 };
69 
70 
71 /*
72  * This is how we will track the connection state:
73  * A connection is always in one of the following
74  * states. Updates to the state are atomic and imply
75  * a memory barrier.
76  */
77 enum {
78 	RDS_CONN_DOWN = 0,
79 	RDS_CONN_CONNECTING,
80 	RDS_CONN_DISCONNECTING,
81 	RDS_CONN_UP,
82 	RDS_CONN_RESETTING,
83 	RDS_CONN_ERROR,
84 };
85 
86 /* Bits for c_flags */
87 #define RDS_LL_SEND_FULL	0
88 #define RDS_RECONNECT_PENDING	1
89 #define RDS_IN_XMIT		2
90 #define RDS_RECV_REFILL		3
91 #define	RDS_DESTROY_PENDING	4
92 
93 /* Max number of multipaths per RDS connection. Must be a power of 2 */
94 #define	RDS_MPATH_WORKERS	8
95 #define	RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
96 			       (rs)->rs_hash_initval) & ((n) - 1))
97 
98 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
99 
100 /* Per mpath connection state */
101 struct rds_conn_path {
102 	struct rds_connection	*cp_conn;
103 	struct rds_message	*cp_xmit_rm;
104 	unsigned long		cp_xmit_sg;
105 	unsigned int		cp_xmit_hdr_off;
106 	unsigned int		cp_xmit_data_off;
107 	unsigned int		cp_xmit_atomic_sent;
108 	unsigned int		cp_xmit_rdma_sent;
109 	unsigned int		cp_xmit_data_sent;
110 
111 	spinlock_t		cp_lock;		/* protect msg queues */
112 	u64			cp_next_tx_seq;
113 	struct list_head	cp_send_queue;
114 	struct list_head	cp_retrans;
115 
116 	u64			cp_next_rx_seq;
117 
118 	void			*cp_transport_data;
119 
120 	atomic_t		cp_state;
121 	unsigned long		cp_send_gen;
122 	unsigned long		cp_flags;
123 	unsigned long		cp_reconnect_jiffies;
124 	struct delayed_work	cp_send_w;
125 	struct delayed_work	cp_recv_w;
126 	struct delayed_work	cp_conn_w;
127 	struct work_struct	cp_down_w;
128 	struct mutex		cp_cm_lock;	/* protect cp_state & cm */
129 	wait_queue_head_t	cp_waitq;
130 
131 	unsigned int		cp_unacked_packets;
132 	unsigned int		cp_unacked_bytes;
133 	unsigned int		cp_index;
134 };
135 
136 /* One rds_connection per RDS address pair */
137 struct rds_connection {
138 	struct hlist_node	c_hash_node;
139 	__be32			c_laddr;
140 	__be32			c_faddr;
141 	unsigned int		c_loopback:1,
142 				c_ping_triggered:1,
143 				c_pad_to_32:30;
144 	int			c_npaths;
145 	struct rds_connection	*c_passive;
146 	struct rds_transport	*c_trans;
147 
148 	struct rds_cong_map	*c_lcong;
149 	struct rds_cong_map	*c_fcong;
150 
151 	/* Protocol version */
152 	unsigned int		c_version;
153 	possible_net_t		c_net;
154 
155 	struct list_head	c_map_item;
156 	unsigned long		c_map_queued;
157 
158 	struct rds_conn_path	*c_path;
159 	wait_queue_head_t	c_hs_waitq; /* handshake waitq */
160 
161 	u32			c_my_gen_num;
162 	u32			c_peer_gen_num;
163 };
164 
165 static inline
166 struct net *rds_conn_net(struct rds_connection *conn)
167 {
168 	return read_pnet(&conn->c_net);
169 }
170 
171 static inline
172 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
173 {
174 	write_pnet(&conn->c_net, net);
175 }
176 
177 #define RDS_FLAG_CONG_BITMAP	0x01
178 #define RDS_FLAG_ACK_REQUIRED	0x02
179 #define RDS_FLAG_RETRANSMITTED	0x04
180 #define RDS_MAX_ADV_CREDIT	255
181 
182 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
183  * probe to exchange control information before establishing a connection.
184  * Currently the control information that is exchanged is the number of
185  * supported paths. If the peer is a legacy (older kernel revision) peer,
186  * it would return a pong message without additional control information
187  * that would then alert the sender that the peer was an older rev.
188  */
189 #define RDS_FLAG_PROBE_PORT	1
190 #define	RDS_HS_PROBE(sport, dport) \
191 		((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
192 		 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
193 /*
194  * Maximum space available for extension headers.
195  */
196 #define RDS_HEADER_EXT_SPACE	16
197 
198 struct rds_header {
199 	__be64	h_sequence;
200 	__be64	h_ack;
201 	__be32	h_len;
202 	__be16	h_sport;
203 	__be16	h_dport;
204 	u8	h_flags;
205 	u8	h_credit;
206 	u8	h_padding[4];
207 	__sum16	h_csum;
208 
209 	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
210 };
211 
212 /*
213  * Reserved - indicates end of extensions
214  */
215 #define RDS_EXTHDR_NONE		0
216 
217 /*
218  * This extension header is included in the very
219  * first message that is sent on a new connection,
220  * and identifies the protocol level. This will help
221  * rolling updates if a future change requires breaking
222  * the protocol.
223  * NB: This is no longer true for IB, where we do a version
224  * negotiation during the connection setup phase (protocol
225  * version information is included in the RDMA CM private data).
226  */
227 #define RDS_EXTHDR_VERSION	1
228 struct rds_ext_header_version {
229 	__be32			h_version;
230 };
231 
232 /*
233  * This extension header is included in the RDS message
234  * chasing an RDMA operation.
235  */
236 #define RDS_EXTHDR_RDMA		2
237 struct rds_ext_header_rdma {
238 	__be32			h_rdma_rkey;
239 };
240 
241 /*
242  * This extension header tells the peer about the
243  * destination <R_Key,offset> of the requested RDMA
244  * operation.
245  */
246 #define RDS_EXTHDR_RDMA_DEST	3
247 struct rds_ext_header_rdma_dest {
248 	__be32			h_rdma_rkey;
249 	__be32			h_rdma_offset;
250 };
251 
252 /* Extension header announcing number of paths.
253  * Implicit length = 2 bytes.
254  */
255 #define RDS_EXTHDR_NPATHS	5
256 #define RDS_EXTHDR_GEN_NUM	6
257 
258 #define __RDS_EXTHDR_MAX	16 /* for now */
259 #define RDS_RX_MAX_TRACES	(RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
260 #define	RDS_MSG_RX_HDR		0
261 #define	RDS_MSG_RX_START	1
262 #define	RDS_MSG_RX_END		2
263 #define	RDS_MSG_RX_CMSG		3
264 
265 struct rds_incoming {
266 	refcount_t		i_refcount;
267 	struct list_head	i_item;
268 	struct rds_connection	*i_conn;
269 	struct rds_conn_path	*i_conn_path;
270 	struct rds_header	i_hdr;
271 	unsigned long		i_rx_jiffies;
272 	__be32			i_saddr;
273 
274 	rds_rdma_cookie_t	i_rdma_cookie;
275 	struct timeval		i_rx_tstamp;
276 	u64			i_rx_lat_trace[RDS_RX_MAX_TRACES];
277 };
278 
279 struct rds_mr {
280 	struct rb_node		r_rb_node;
281 	refcount_t		r_refcount;
282 	u32			r_key;
283 
284 	/* A copy of the creation flags */
285 	unsigned int		r_use_once:1;
286 	unsigned int		r_invalidate:1;
287 	unsigned int		r_write:1;
288 
289 	/* This is for RDS_MR_DEAD.
290 	 * It would be nice & consistent to make this part of the above
291 	 * bit field here, but we need to use test_and_set_bit.
292 	 */
293 	unsigned long		r_state;
294 	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
295 	struct rds_transport	*r_trans;
296 	void			*r_trans_private;
297 };
298 
299 /* Flags for mr->r_state */
300 #define RDS_MR_DEAD		0
301 
302 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
303 {
304 	return r_key | (((u64) offset) << 32);
305 }
306 
307 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
308 {
309 	return cookie;
310 }
311 
312 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
313 {
314 	return cookie >> 32;
315 }
316 
317 /* atomic operation types */
318 #define RDS_ATOMIC_TYPE_CSWP		0
319 #define RDS_ATOMIC_TYPE_FADD		1
320 
321 /*
322  * m_sock_item and m_conn_item are on lists that are serialized under
323  * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
324  * the message will not be put back on the retransmit list after being sent.
325  * messages that are canceled while being sent rely on this.
326  *
327  * m_inc is used by loopback so that it can pass an incoming message straight
328  * back up into the rx path.  It embeds a wire header which is also used by
329  * the send path, which is kind of awkward.
330  *
331  * m_sock_item indicates the message's presence on a socket's send or receive
332  * queue.  m_rs will point to that socket.
333  *
334  * m_daddr is used by cancellation to prune messages to a given destination.
335  *
336  * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
337  * nesting.  As paths iterate over messages on a sock, or conn, they must
338  * also lock the conn, or sock, to remove the message from those lists too.
339  * Testing the flag to determine if the message is still on the lists lets
340  * us avoid testing the list_head directly.  That means each path can use
341  * the message's list_head to keep it on a local list while juggling locks
342  * without confusing the other path.
343  *
344  * m_ack_seq is an optional field set by transports who need a different
345  * sequence number range to invalidate.  They can use this in a callback
346  * that they pass to rds_send_drop_acked() to see if each message has been
347  * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
348  * had ack_seq set yet.
349  */
350 #define RDS_MSG_ON_SOCK		1
351 #define RDS_MSG_ON_CONN		2
352 #define RDS_MSG_HAS_ACK_SEQ	3
353 #define RDS_MSG_ACK_REQUIRED	4
354 #define RDS_MSG_RETRANSMITTED	5
355 #define RDS_MSG_MAPPED		6
356 #define RDS_MSG_PAGEVEC		7
357 #define RDS_MSG_FLUSH		8
358 
359 struct rds_znotifier {
360 	struct list_head	z_list;
361 	struct mmpin		z_mmp;
362 	u32			z_cookie;
363 };
364 
365 #define	RDS_ZCOPY_SKB(__skb)	((struct rds_znotifier *)&((__skb)->cb[0]))
366 
367 static inline struct sk_buff *rds_skb_from_znotifier(struct rds_znotifier *z)
368 {
369 	return container_of((void *)z, struct sk_buff, cb);
370 }
371 
372 struct rds_message {
373 	refcount_t		m_refcount;
374 	struct list_head	m_sock_item;
375 	struct list_head	m_conn_item;
376 	struct rds_incoming	m_inc;
377 	u64			m_ack_seq;
378 	__be32			m_daddr;
379 	unsigned long		m_flags;
380 
381 	/* Never access m_rs without holding m_rs_lock.
382 	 * Lock nesting is
383 	 *  rm->m_rs_lock
384 	 *   -> rs->rs_lock
385 	 */
386 	spinlock_t		m_rs_lock;
387 	wait_queue_head_t	m_flush_wait;
388 
389 	struct rds_sock		*m_rs;
390 
391 	/* cookie to send to remote, in rds header */
392 	rds_rdma_cookie_t	m_rdma_cookie;
393 
394 	unsigned int		m_used_sgs;
395 	unsigned int		m_total_sgs;
396 
397 	void			*m_final_op;
398 
399 	struct {
400 		struct rm_atomic_op {
401 			int			op_type;
402 			union {
403 				struct {
404 					uint64_t	compare;
405 					uint64_t	swap;
406 					uint64_t	compare_mask;
407 					uint64_t	swap_mask;
408 				} op_m_cswp;
409 				struct {
410 					uint64_t	add;
411 					uint64_t	nocarry_mask;
412 				} op_m_fadd;
413 			};
414 
415 			u32			op_rkey;
416 			u64			op_remote_addr;
417 			unsigned int		op_notify:1;
418 			unsigned int		op_recverr:1;
419 			unsigned int		op_mapped:1;
420 			unsigned int		op_silent:1;
421 			unsigned int		op_active:1;
422 			struct scatterlist	*op_sg;
423 			struct rds_notifier	*op_notifier;
424 
425 			struct rds_mr		*op_rdma_mr;
426 		} atomic;
427 		struct rm_rdma_op {
428 			u32			op_rkey;
429 			u64			op_remote_addr;
430 			unsigned int		op_write:1;
431 			unsigned int		op_fence:1;
432 			unsigned int		op_notify:1;
433 			unsigned int		op_recverr:1;
434 			unsigned int		op_mapped:1;
435 			unsigned int		op_silent:1;
436 			unsigned int		op_active:1;
437 			unsigned int		op_bytes;
438 			unsigned int		op_nents;
439 			unsigned int		op_count;
440 			struct scatterlist	*op_sg;
441 			struct rds_notifier	*op_notifier;
442 
443 			struct rds_mr		*op_rdma_mr;
444 		} rdma;
445 		struct rm_data_op {
446 			unsigned int		op_active:1;
447 			unsigned int		op_notify:1;
448 			unsigned int		op_nents;
449 			unsigned int		op_count;
450 			unsigned int		op_dmasg;
451 			unsigned int		op_dmaoff;
452 			struct rds_znotifier	*op_mmp_znotifier;
453 			struct scatterlist	*op_sg;
454 		} data;
455 	};
456 };
457 
458 /*
459  * The RDS notifier is used (optionally) to tell the application about
460  * completed RDMA operations. Rather than keeping the whole rds message
461  * around on the queue, we allocate a small notifier that is put on the
462  * socket's notifier_list. Notifications are delivered to the application
463  * through control messages.
464  */
465 struct rds_notifier {
466 	struct list_head	n_list;
467 	uint64_t		n_user_token;
468 	int			n_status;
469 };
470 
471 /**
472  * struct rds_transport -  transport specific behavioural hooks
473  *
474  * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
475  *        part of a message.  The caller serializes on the send_sem so this
476  *        doesn't need to be reentrant for a given conn.  The header must be
477  *        sent before the data payload.  .xmit must be prepared to send a
478  *        message with no data payload.  .xmit should return the number of
479  *        bytes that were sent down the connection, including header bytes.
480  *        Returning 0 tells the caller that it doesn't need to perform any
481  *        additional work now.  This is usually the case when the transport has
482  *        filled the sending queue for its connection and will handle
483  *        triggering the rds thread to continue the send when space becomes
484  *        available.  Returning -EAGAIN tells the caller to retry the send
485  *        immediately.  Returning -ENOMEM tells the caller to retry the send at
486  *        some point in the future.
487  *
488  * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
489  *                 it returns the connection can not call rds_recv_incoming().
490  *                 This will only be called once after conn_connect returns
491  *                 non-zero success and will The caller serializes this with
492  *                 the send and connecting paths (xmit_* and conn_*).  The
493  *                 transport is responsible for other serialization, including
494  *                 rds_recv_incoming().  This is called in process context but
495  *                 should try hard not to block.
496  */
497 
498 struct rds_transport {
499 	char			t_name[TRANSNAMSIZ];
500 	struct list_head	t_item;
501 	struct module		*t_owner;
502 	unsigned int		t_prefer_loopback:1,
503 				t_mp_capable:1;
504 	unsigned int		t_type;
505 
506 	int (*laddr_check)(struct net *net, __be32 addr);
507 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
508 	void (*conn_free)(void *data);
509 	int (*conn_path_connect)(struct rds_conn_path *cp);
510 	void (*conn_path_shutdown)(struct rds_conn_path *conn);
511 	void (*xmit_path_prepare)(struct rds_conn_path *cp);
512 	void (*xmit_path_complete)(struct rds_conn_path *cp);
513 	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
514 		    unsigned int hdr_off, unsigned int sg, unsigned int off);
515 	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
516 	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
517 	int (*recv_path)(struct rds_conn_path *cp);
518 	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
519 	void (*inc_free)(struct rds_incoming *inc);
520 
521 	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
522 				 struct rdma_cm_event *event);
523 	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
524 	void (*cm_connect_complete)(struct rds_connection *conn,
525 				    struct rdma_cm_event *event);
526 
527 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
528 					unsigned int avail);
529 	void (*exit)(void);
530 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
531 			struct rds_sock *rs, u32 *key_ret);
532 	void (*sync_mr)(void *trans_private, int direction);
533 	void (*free_mr)(void *trans_private, int invalidate);
534 	void (*flush_mrs)(void);
535 	bool (*t_unloading)(struct rds_connection *conn);
536 };
537 
538 struct rds_sock {
539 	struct sock		rs_sk;
540 
541 	u64			rs_user_addr;
542 	u64			rs_user_bytes;
543 
544 	/*
545 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
546 	 * support.
547 	 */
548 	struct rhash_head	rs_bound_node;
549 	u64			rs_bound_key;
550 	__be32			rs_bound_addr;
551 	__be32			rs_conn_addr;
552 	__be16			rs_bound_port;
553 	__be16			rs_conn_port;
554 	struct rds_transport    *rs_transport;
555 
556 	/*
557 	 * rds_sendmsg caches the conn it used the last time around.
558 	 * This helps avoid costly lookups.
559 	 */
560 	struct rds_connection	*rs_conn;
561 
562 	/* flag indicating we were congested or not */
563 	int			rs_congested;
564 	/* seen congestion (ENOBUFS) when sending? */
565 	int			rs_seen_congestion;
566 
567 	/* rs_lock protects all these adjacent members before the newline */
568 	spinlock_t		rs_lock;
569 	struct list_head	rs_send_queue;
570 	u32			rs_snd_bytes;
571 	int			rs_rcv_bytes;
572 	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
573 
574 	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
575 	 * to decide whether the application should be woken up.
576 	 * If not set, we use rs_cong_track to find out whether a cong map
577 	 * update arrived.
578 	 */
579 	uint64_t		rs_cong_mask;
580 	uint64_t		rs_cong_notify;
581 	struct list_head	rs_cong_list;
582 	unsigned long		rs_cong_track;
583 
584 	/*
585 	 * rs_recv_lock protects the receive queue, and is
586 	 * used to serialize with rds_release.
587 	 */
588 	rwlock_t		rs_recv_lock;
589 	struct list_head	rs_recv_queue;
590 
591 	/* just for stats reporting */
592 	struct list_head	rs_item;
593 
594 	/* these have their own lock */
595 	spinlock_t		rs_rdma_lock;
596 	struct rb_root		rs_rdma_keys;
597 
598 	/* Socket options - in case there will be more */
599 	unsigned char		rs_recverr,
600 				rs_cong_monitor;
601 	u32			rs_hash_initval;
602 
603 	/* Socket receive path trace points*/
604 	u8			rs_rx_traces;
605 	u8			rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
606 };
607 
608 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
609 {
610 	return container_of(sk, struct rds_sock, rs_sk);
611 }
612 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
613 {
614 	return &rs->rs_sk;
615 }
616 
617 /*
618  * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
619  * to account for overhead.  We don't account for overhead, we just apply
620  * the number of payload bytes to the specified value.
621  */
622 static inline int rds_sk_sndbuf(struct rds_sock *rs)
623 {
624 	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
625 }
626 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
627 {
628 	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
629 }
630 
631 struct rds_statistics {
632 	uint64_t	s_conn_reset;
633 	uint64_t	s_recv_drop_bad_checksum;
634 	uint64_t	s_recv_drop_old_seq;
635 	uint64_t	s_recv_drop_no_sock;
636 	uint64_t	s_recv_drop_dead_sock;
637 	uint64_t	s_recv_deliver_raced;
638 	uint64_t	s_recv_delivered;
639 	uint64_t	s_recv_queued;
640 	uint64_t	s_recv_immediate_retry;
641 	uint64_t	s_recv_delayed_retry;
642 	uint64_t	s_recv_ack_required;
643 	uint64_t	s_recv_rdma_bytes;
644 	uint64_t	s_recv_ping;
645 	uint64_t	s_send_queue_empty;
646 	uint64_t	s_send_queue_full;
647 	uint64_t	s_send_lock_contention;
648 	uint64_t	s_send_lock_queue_raced;
649 	uint64_t	s_send_immediate_retry;
650 	uint64_t	s_send_delayed_retry;
651 	uint64_t	s_send_drop_acked;
652 	uint64_t	s_send_ack_required;
653 	uint64_t	s_send_queued;
654 	uint64_t	s_send_rdma;
655 	uint64_t	s_send_rdma_bytes;
656 	uint64_t	s_send_pong;
657 	uint64_t	s_page_remainder_hit;
658 	uint64_t	s_page_remainder_miss;
659 	uint64_t	s_copy_to_user;
660 	uint64_t	s_copy_from_user;
661 	uint64_t	s_cong_update_queued;
662 	uint64_t	s_cong_update_received;
663 	uint64_t	s_cong_send_error;
664 	uint64_t	s_cong_send_blocked;
665 	uint64_t	s_recv_bytes_added_to_socket;
666 	uint64_t	s_recv_bytes_removed_from_socket;
667 
668 };
669 
670 /* af_rds.c */
671 void rds_sock_addref(struct rds_sock *rs);
672 void rds_sock_put(struct rds_sock *rs);
673 void rds_wake_sk_sleep(struct rds_sock *rs);
674 static inline void __rds_wake_sk_sleep(struct sock *sk)
675 {
676 	wait_queue_head_t *waitq = sk_sleep(sk);
677 
678 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
679 		wake_up(waitq);
680 }
681 extern wait_queue_head_t rds_poll_waitq;
682 
683 
684 /* bind.c */
685 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
686 void rds_remove_bound(struct rds_sock *rs);
687 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
688 int rds_bind_lock_init(void);
689 void rds_bind_lock_destroy(void);
690 
691 /* cong.c */
692 int rds_cong_get_maps(struct rds_connection *conn);
693 void rds_cong_add_conn(struct rds_connection *conn);
694 void rds_cong_remove_conn(struct rds_connection *conn);
695 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
696 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
697 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
698 void rds_cong_queue_updates(struct rds_cong_map *map);
699 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
700 int rds_cong_updated_since(unsigned long *recent);
701 void rds_cong_add_socket(struct rds_sock *);
702 void rds_cong_remove_socket(struct rds_sock *);
703 void rds_cong_exit(void);
704 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
705 
706 /* conn.c */
707 extern u32 rds_gen_num;
708 int rds_conn_init(void);
709 void rds_conn_exit(void);
710 struct rds_connection *rds_conn_create(struct net *net,
711 				       __be32 laddr, __be32 faddr,
712 				       struct rds_transport *trans, gfp_t gfp);
713 struct rds_connection *rds_conn_create_outgoing(struct net *net,
714 						__be32 laddr, __be32 faddr,
715 			       struct rds_transport *trans, gfp_t gfp);
716 void rds_conn_shutdown(struct rds_conn_path *cpath);
717 void rds_conn_destroy(struct rds_connection *conn);
718 void rds_conn_drop(struct rds_connection *conn);
719 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
720 void rds_conn_connect_if_down(struct rds_connection *conn);
721 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
722 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
723 			  struct rds_info_iterator *iter,
724 			  struct rds_info_lengths *lens,
725 			  int (*visitor)(struct rds_connection *, void *),
726 			  size_t item_len);
727 
728 __printf(2, 3)
729 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
730 #define rds_conn_path_error(cp, fmt...) \
731 	__rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
732 
733 static inline int
734 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
735 {
736 	return atomic_cmpxchg(&cp->cp_state, old, new) == old;
737 }
738 
739 static inline int
740 rds_conn_transition(struct rds_connection *conn, int old, int new)
741 {
742 	WARN_ON(conn->c_trans->t_mp_capable);
743 	return rds_conn_path_transition(&conn->c_path[0], old, new);
744 }
745 
746 static inline int
747 rds_conn_path_state(struct rds_conn_path *cp)
748 {
749 	return atomic_read(&cp->cp_state);
750 }
751 
752 static inline int
753 rds_conn_state(struct rds_connection *conn)
754 {
755 	WARN_ON(conn->c_trans->t_mp_capable);
756 	return rds_conn_path_state(&conn->c_path[0]);
757 }
758 
759 static inline int
760 rds_conn_path_up(struct rds_conn_path *cp)
761 {
762 	return atomic_read(&cp->cp_state) == RDS_CONN_UP;
763 }
764 
765 static inline int
766 rds_conn_up(struct rds_connection *conn)
767 {
768 	WARN_ON(conn->c_trans->t_mp_capable);
769 	return rds_conn_path_up(&conn->c_path[0]);
770 }
771 
772 static inline int
773 rds_conn_path_connecting(struct rds_conn_path *cp)
774 {
775 	return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
776 }
777 
778 static inline int
779 rds_conn_connecting(struct rds_connection *conn)
780 {
781 	WARN_ON(conn->c_trans->t_mp_capable);
782 	return rds_conn_path_connecting(&conn->c_path[0]);
783 }
784 
785 /* message.c */
786 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
787 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
788 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
789 			       bool zcopy);
790 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
791 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
792 				 __be16 dport, u64 seq);
793 int rds_message_add_extension(struct rds_header *hdr,
794 			      unsigned int type, const void *data, unsigned int len);
795 int rds_message_next_extension(struct rds_header *hdr,
796 			       unsigned int *pos, void *buf, unsigned int *buflen);
797 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
798 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
799 void rds_message_inc_free(struct rds_incoming *inc);
800 void rds_message_addref(struct rds_message *rm);
801 void rds_message_put(struct rds_message *rm);
802 void rds_message_wait(struct rds_message *rm);
803 void rds_message_unmapped(struct rds_message *rm);
804 
805 static inline void rds_message_make_checksum(struct rds_header *hdr)
806 {
807 	hdr->h_csum = 0;
808 	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
809 }
810 
811 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
812 {
813 	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
814 }
815 
816 
817 /* page.c */
818 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
819 			     gfp_t gfp);
820 void rds_page_exit(void);
821 
822 /* recv.c */
823 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
824 		  __be32 saddr);
825 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
826 		       __be32 saddr);
827 void rds_inc_put(struct rds_incoming *inc);
828 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
829 		       struct rds_incoming *inc, gfp_t gfp);
830 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
831 		int msg_flags);
832 void rds_clear_recv_queue(struct rds_sock *rs);
833 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
834 void rds_inc_info_copy(struct rds_incoming *inc,
835 		       struct rds_info_iterator *iter,
836 		       __be32 saddr, __be32 daddr, int flip);
837 
838 /* send.c */
839 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
840 void rds_send_path_reset(struct rds_conn_path *conn);
841 int rds_send_xmit(struct rds_conn_path *cp);
842 struct sockaddr_in;
843 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
844 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
845 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
846 			 is_acked_func is_acked);
847 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
848 			      is_acked_func is_acked);
849 void rds_send_ping(struct rds_connection *conn, int cp_index);
850 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
851 
852 /* rdma.c */
853 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
854 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
855 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
856 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
857 void rds_rdma_drop_keys(struct rds_sock *rs);
858 int rds_rdma_extra_size(struct rds_rdma_args *args);
859 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
860 			  struct cmsghdr *cmsg);
861 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
862 			  struct cmsghdr *cmsg);
863 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
864 			  struct cmsghdr *cmsg);
865 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
866 			  struct cmsghdr *cmsg);
867 void rds_rdma_free_op(struct rm_rdma_op *ro);
868 void rds_atomic_free_op(struct rm_atomic_op *ao);
869 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
870 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
871 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
872 		    struct cmsghdr *cmsg);
873 
874 void __rds_put_mr_final(struct rds_mr *mr);
875 static inline void rds_mr_put(struct rds_mr *mr)
876 {
877 	if (refcount_dec_and_test(&mr->r_refcount))
878 		__rds_put_mr_final(mr);
879 }
880 
881 static inline bool rds_destroy_pending(struct rds_connection *conn)
882 {
883 	return !check_net(rds_conn_net(conn)) ||
884 	       (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
885 }
886 
887 /* stats.c */
888 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
889 #define rds_stats_inc_which(which, member) do {		\
890 	per_cpu(which, get_cpu()).member++;		\
891 	put_cpu();					\
892 } while (0)
893 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
894 #define rds_stats_add_which(which, member, count) do {		\
895 	per_cpu(which, get_cpu()).member += count;	\
896 	put_cpu();					\
897 } while (0)
898 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
899 int rds_stats_init(void);
900 void rds_stats_exit(void);
901 void rds_stats_info_copy(struct rds_info_iterator *iter,
902 			 uint64_t *values, const char *const *names,
903 			 size_t nr);
904 
905 /* sysctl.c */
906 int rds_sysctl_init(void);
907 void rds_sysctl_exit(void);
908 extern unsigned long rds_sysctl_sndbuf_min;
909 extern unsigned long rds_sysctl_sndbuf_default;
910 extern unsigned long rds_sysctl_sndbuf_max;
911 extern unsigned long rds_sysctl_reconnect_min_jiffies;
912 extern unsigned long rds_sysctl_reconnect_max_jiffies;
913 extern unsigned int  rds_sysctl_max_unacked_packets;
914 extern unsigned int  rds_sysctl_max_unacked_bytes;
915 extern unsigned int  rds_sysctl_ping_enable;
916 extern unsigned long rds_sysctl_trace_flags;
917 extern unsigned int  rds_sysctl_trace_level;
918 
919 /* threads.c */
920 int rds_threads_init(void);
921 void rds_threads_exit(void);
922 extern struct workqueue_struct *rds_wq;
923 void rds_queue_reconnect(struct rds_conn_path *cp);
924 void rds_connect_worker(struct work_struct *);
925 void rds_shutdown_worker(struct work_struct *);
926 void rds_send_worker(struct work_struct *);
927 void rds_recv_worker(struct work_struct *);
928 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
929 void rds_connect_complete(struct rds_connection *conn);
930 
931 /* transport.c */
932 void rds_trans_register(struct rds_transport *trans);
933 void rds_trans_unregister(struct rds_transport *trans);
934 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
935 void rds_trans_put(struct rds_transport *trans);
936 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
937 				       unsigned int avail);
938 struct rds_transport *rds_trans_get(int t_type);
939 int rds_trans_init(void);
940 void rds_trans_exit(void);
941 
942 #endif
943