1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _RDS_RDS_H 3 #define _RDS_RDS_H 4 5 #include <net/sock.h> 6 #include <linux/scatterlist.h> 7 #include <linux/highmem.h> 8 #include <rdma/rdma_cm.h> 9 #include <linux/mutex.h> 10 #include <linux/rds.h> 11 #include <linux/rhashtable.h> 12 #include <linux/refcount.h> 13 #include <linux/in6.h> 14 15 #include "info.h" 16 17 /* 18 * RDS Network protocol version 19 */ 20 #define RDS_PROTOCOL_3_0 0x0300 21 #define RDS_PROTOCOL_3_1 0x0301 22 #define RDS_PROTOCOL_4_0 0x0400 23 #define RDS_PROTOCOL_4_1 0x0401 24 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1 25 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8) 26 #define RDS_PROTOCOL_MINOR(v) ((v) & 255) 27 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min) 28 #define RDS_PROTOCOL_COMPAT_VERSION RDS_PROTOCOL_3_1 29 30 /* The following ports, 16385, 18634, 18635, are registered with IANA as 31 * the ports to be used for RDS over TCP and UDP. Currently, only RDS over 32 * TCP and RDS over IB/RDMA are implemented. 18634 is the historical value 33 * used for the RDMA_CM listener port. RDS/TCP uses port 16385. After 34 * IPv6 work, RDMA_CM also uses 16385 as the listener port. 18634 is kept 35 * to ensure compatibility with older RDS modules. Those ports are defined 36 * in each transport's header file. 37 */ 38 #define RDS_PORT 18634 39 40 #ifdef ATOMIC64_INIT 41 #define KERNEL_HAS_ATOMIC64 42 #endif 43 #ifdef RDS_DEBUG 44 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) 45 #else 46 /* sigh, pr_debug() causes unused variable warnings */ 47 static inline __printf(1, 2) 48 void rdsdebug(char *fmt, ...) 49 { 50 } 51 #endif 52 53 #define RDS_FRAG_SHIFT 12 54 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) 55 56 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */ 57 #define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20)) 58 59 #define RDS_CONG_MAP_BYTES (65536 / 8) 60 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE) 61 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8) 62 63 struct rds_cong_map { 64 struct rb_node m_rb_node; 65 struct in6_addr m_addr; 66 wait_queue_head_t m_waitq; 67 struct list_head m_conn_list; 68 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES]; 69 }; 70 71 72 /* 73 * This is how we will track the connection state: 74 * A connection is always in one of the following 75 * states. Updates to the state are atomic and imply 76 * a memory barrier. 77 */ 78 enum { 79 RDS_CONN_DOWN = 0, 80 RDS_CONN_CONNECTING, 81 RDS_CONN_DISCONNECTING, 82 RDS_CONN_UP, 83 RDS_CONN_RESETTING, 84 RDS_CONN_ERROR, 85 }; 86 87 /* Bits for c_flags */ 88 #define RDS_LL_SEND_FULL 0 89 #define RDS_RECONNECT_PENDING 1 90 #define RDS_IN_XMIT 2 91 #define RDS_RECV_REFILL 3 92 #define RDS_DESTROY_PENDING 4 93 94 /* Max number of multipaths per RDS connection. Must be a power of 2 */ 95 #define RDS_MPATH_WORKERS 8 96 #define RDS_MPATH_HASH(rs, n) (jhash_1word(ntohs((rs)->rs_bound_port), \ 97 (rs)->rs_hash_initval) & ((n) - 1)) 98 99 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr)) 100 101 /* Per mpath connection state */ 102 struct rds_conn_path { 103 struct rds_connection *cp_conn; 104 struct rds_message *cp_xmit_rm; 105 unsigned long cp_xmit_sg; 106 unsigned int cp_xmit_hdr_off; 107 unsigned int cp_xmit_data_off; 108 unsigned int cp_xmit_atomic_sent; 109 unsigned int cp_xmit_rdma_sent; 110 unsigned int cp_xmit_data_sent; 111 112 spinlock_t cp_lock; /* protect msg queues */ 113 u64 cp_next_tx_seq; 114 struct list_head cp_send_queue; 115 struct list_head cp_retrans; 116 117 u64 cp_next_rx_seq; 118 119 void *cp_transport_data; 120 121 struct workqueue_struct *cp_wq; 122 atomic_t cp_state; 123 unsigned long cp_send_gen; 124 unsigned long cp_flags; 125 unsigned long cp_reconnect_jiffies; 126 struct delayed_work cp_send_w; 127 struct delayed_work cp_recv_w; 128 struct delayed_work cp_conn_w; 129 struct work_struct cp_down_w; 130 struct mutex cp_cm_lock; /* protect cp_state & cm */ 131 wait_queue_head_t cp_waitq; 132 133 unsigned int cp_unacked_packets; 134 unsigned int cp_unacked_bytes; 135 unsigned int cp_index; 136 }; 137 138 /* One rds_connection per RDS address pair */ 139 struct rds_connection { 140 struct hlist_node c_hash_node; 141 struct in6_addr c_laddr; 142 struct in6_addr c_faddr; 143 int c_dev_if; /* ifindex used for this conn */ 144 int c_bound_if; /* ifindex of c_laddr */ 145 unsigned int c_loopback:1, 146 c_isv6:1, 147 c_ping_triggered:1, 148 c_pad_to_32:29; 149 int c_npaths; 150 struct rds_connection *c_passive; 151 struct rds_transport *c_trans; 152 153 struct rds_cong_map *c_lcong; 154 struct rds_cong_map *c_fcong; 155 156 /* Protocol version */ 157 unsigned int c_proposed_version; 158 unsigned int c_version; 159 possible_net_t c_net; 160 161 /* TOS */ 162 u8 c_tos; 163 164 struct list_head c_map_item; 165 unsigned long c_map_queued; 166 167 struct rds_conn_path *c_path; 168 wait_queue_head_t c_hs_waitq; /* handshake waitq */ 169 170 u32 c_my_gen_num; 171 u32 c_peer_gen_num; 172 }; 173 174 static inline 175 struct net *rds_conn_net(struct rds_connection *conn) 176 { 177 return read_pnet(&conn->c_net); 178 } 179 180 static inline 181 void rds_conn_net_set(struct rds_connection *conn, struct net *net) 182 { 183 write_pnet(&conn->c_net, net); 184 } 185 186 #define RDS_FLAG_CONG_BITMAP 0x01 187 #define RDS_FLAG_ACK_REQUIRED 0x02 188 #define RDS_FLAG_RETRANSMITTED 0x04 189 #define RDS_MAX_ADV_CREDIT 255 190 191 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping 192 * probe to exchange control information before establishing a connection. 193 * Currently the control information that is exchanged is the number of 194 * supported paths. If the peer is a legacy (older kernel revision) peer, 195 * it would return a pong message without additional control information 196 * that would then alert the sender that the peer was an older rev. 197 */ 198 #define RDS_FLAG_PROBE_PORT 1 199 #define RDS_HS_PROBE(sport, dport) \ 200 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \ 201 (sport == 0 && dport == RDS_FLAG_PROBE_PORT)) 202 /* 203 * Maximum space available for extension headers. 204 */ 205 #define RDS_HEADER_EXT_SPACE 16 206 207 struct rds_header { 208 __be64 h_sequence; 209 __be64 h_ack; 210 __be32 h_len; 211 __be16 h_sport; 212 __be16 h_dport; 213 u8 h_flags; 214 u8 h_credit; 215 u8 h_padding[4]; 216 __sum16 h_csum; 217 218 u8 h_exthdr[RDS_HEADER_EXT_SPACE]; 219 }; 220 221 /* 222 * Reserved - indicates end of extensions 223 */ 224 #define RDS_EXTHDR_NONE 0 225 226 /* 227 * This extension header is included in the very 228 * first message that is sent on a new connection, 229 * and identifies the protocol level. This will help 230 * rolling updates if a future change requires breaking 231 * the protocol. 232 * NB: This is no longer true for IB, where we do a version 233 * negotiation during the connection setup phase (protocol 234 * version information is included in the RDMA CM private data). 235 */ 236 #define RDS_EXTHDR_VERSION 1 237 struct rds_ext_header_version { 238 __be32 h_version; 239 }; 240 241 /* 242 * This extension header is included in the RDS message 243 * chasing an RDMA operation. 244 */ 245 #define RDS_EXTHDR_RDMA 2 246 struct rds_ext_header_rdma { 247 __be32 h_rdma_rkey; 248 }; 249 250 /* 251 * This extension header tells the peer about the 252 * destination <R_Key,offset> of the requested RDMA 253 * operation. 254 */ 255 #define RDS_EXTHDR_RDMA_DEST 3 256 struct rds_ext_header_rdma_dest { 257 __be32 h_rdma_rkey; 258 __be32 h_rdma_offset; 259 }; 260 261 /* Extension header announcing number of paths. 262 * Implicit length = 2 bytes. 263 */ 264 #define RDS_EXTHDR_NPATHS 5 265 #define RDS_EXTHDR_GEN_NUM 6 266 267 #define __RDS_EXTHDR_MAX 16 /* for now */ 268 #define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1) 269 #define RDS_MSG_RX_HDR 0 270 #define RDS_MSG_RX_START 1 271 #define RDS_MSG_RX_END 2 272 #define RDS_MSG_RX_CMSG 3 273 274 /* The following values are whitelisted for usercopy */ 275 struct rds_inc_usercopy { 276 rds_rdma_cookie_t rdma_cookie; 277 ktime_t rx_tstamp; 278 }; 279 280 struct rds_incoming { 281 refcount_t i_refcount; 282 struct list_head i_item; 283 struct rds_connection *i_conn; 284 struct rds_conn_path *i_conn_path; 285 struct rds_header i_hdr; 286 unsigned long i_rx_jiffies; 287 struct in6_addr i_saddr; 288 289 struct rds_inc_usercopy i_usercopy; 290 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES]; 291 }; 292 293 struct rds_mr { 294 struct rb_node r_rb_node; 295 struct kref r_kref; 296 u32 r_key; 297 298 /* A copy of the creation flags */ 299 unsigned int r_use_once:1; 300 unsigned int r_invalidate:1; 301 unsigned int r_write:1; 302 303 struct rds_sock *r_sock; /* back pointer to the socket that owns us */ 304 struct rds_transport *r_trans; 305 void *r_trans_private; 306 }; 307 308 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset) 309 { 310 return r_key | (((u64) offset) << 32); 311 } 312 313 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie) 314 { 315 return cookie; 316 } 317 318 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie) 319 { 320 return cookie >> 32; 321 } 322 323 /* atomic operation types */ 324 #define RDS_ATOMIC_TYPE_CSWP 0 325 #define RDS_ATOMIC_TYPE_FADD 1 326 327 /* 328 * m_sock_item and m_conn_item are on lists that are serialized under 329 * conn->c_lock. m_sock_item has additional meaning in that once it is empty 330 * the message will not be put back on the retransmit list after being sent. 331 * messages that are canceled while being sent rely on this. 332 * 333 * m_inc is used by loopback so that it can pass an incoming message straight 334 * back up into the rx path. It embeds a wire header which is also used by 335 * the send path, which is kind of awkward. 336 * 337 * m_sock_item indicates the message's presence on a socket's send or receive 338 * queue. m_rs will point to that socket. 339 * 340 * m_daddr is used by cancellation to prune messages to a given destination. 341 * 342 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock 343 * nesting. As paths iterate over messages on a sock, or conn, they must 344 * also lock the conn, or sock, to remove the message from those lists too. 345 * Testing the flag to determine if the message is still on the lists lets 346 * us avoid testing the list_head directly. That means each path can use 347 * the message's list_head to keep it on a local list while juggling locks 348 * without confusing the other path. 349 * 350 * m_ack_seq is an optional field set by transports who need a different 351 * sequence number range to invalidate. They can use this in a callback 352 * that they pass to rds_send_drop_acked() to see if each message has been 353 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't 354 * had ack_seq set yet. 355 */ 356 #define RDS_MSG_ON_SOCK 1 357 #define RDS_MSG_ON_CONN 2 358 #define RDS_MSG_HAS_ACK_SEQ 3 359 #define RDS_MSG_ACK_REQUIRED 4 360 #define RDS_MSG_RETRANSMITTED 5 361 #define RDS_MSG_MAPPED 6 362 #define RDS_MSG_PAGEVEC 7 363 #define RDS_MSG_FLUSH 8 364 365 struct rds_znotifier { 366 struct mmpin z_mmp; 367 u32 z_cookie; 368 }; 369 370 struct rds_msg_zcopy_info { 371 struct list_head rs_zcookie_next; 372 union { 373 struct rds_znotifier znotif; 374 struct rds_zcopy_cookies zcookies; 375 }; 376 }; 377 378 struct rds_msg_zcopy_queue { 379 struct list_head zcookie_head; 380 spinlock_t lock; /* protects zcookie_head queue */ 381 }; 382 383 static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q) 384 { 385 spin_lock_init(&q->lock); 386 INIT_LIST_HEAD(&q->zcookie_head); 387 } 388 389 struct rds_iov_vector { 390 struct rds_iovec *iov; 391 int len; 392 }; 393 394 struct rds_iov_vector_arr { 395 struct rds_iov_vector *vec; 396 int len; 397 int indx; 398 int incr; 399 }; 400 401 struct rds_message { 402 refcount_t m_refcount; 403 struct list_head m_sock_item; 404 struct list_head m_conn_item; 405 struct rds_incoming m_inc; 406 u64 m_ack_seq; 407 struct in6_addr m_daddr; 408 unsigned long m_flags; 409 410 /* Never access m_rs without holding m_rs_lock. 411 * Lock nesting is 412 * rm->m_rs_lock 413 * -> rs->rs_lock 414 */ 415 spinlock_t m_rs_lock; 416 wait_queue_head_t m_flush_wait; 417 418 struct rds_sock *m_rs; 419 420 /* cookie to send to remote, in rds header */ 421 rds_rdma_cookie_t m_rdma_cookie; 422 423 unsigned int m_used_sgs; 424 unsigned int m_total_sgs; 425 426 void *m_final_op; 427 428 struct { 429 struct rm_atomic_op { 430 int op_type; 431 union { 432 struct { 433 uint64_t compare; 434 uint64_t swap; 435 uint64_t compare_mask; 436 uint64_t swap_mask; 437 } op_m_cswp; 438 struct { 439 uint64_t add; 440 uint64_t nocarry_mask; 441 } op_m_fadd; 442 }; 443 444 u32 op_rkey; 445 u64 op_remote_addr; 446 unsigned int op_notify:1; 447 unsigned int op_recverr:1; 448 unsigned int op_mapped:1; 449 unsigned int op_silent:1; 450 unsigned int op_active:1; 451 struct scatterlist *op_sg; 452 struct rds_notifier *op_notifier; 453 454 struct rds_mr *op_rdma_mr; 455 } atomic; 456 struct rm_rdma_op { 457 u32 op_rkey; 458 u64 op_remote_addr; 459 unsigned int op_write:1; 460 unsigned int op_fence:1; 461 unsigned int op_notify:1; 462 unsigned int op_recverr:1; 463 unsigned int op_mapped:1; 464 unsigned int op_silent:1; 465 unsigned int op_active:1; 466 unsigned int op_bytes; 467 unsigned int op_nents; 468 unsigned int op_count; 469 struct scatterlist *op_sg; 470 struct rds_notifier *op_notifier; 471 472 struct rds_mr *op_rdma_mr; 473 474 u64 op_odp_addr; 475 struct rds_mr *op_odp_mr; 476 } rdma; 477 struct rm_data_op { 478 unsigned int op_active:1; 479 unsigned int op_nents; 480 unsigned int op_count; 481 unsigned int op_dmasg; 482 unsigned int op_dmaoff; 483 struct rds_znotifier *op_mmp_znotifier; 484 struct scatterlist *op_sg; 485 } data; 486 }; 487 488 struct rds_conn_path *m_conn_path; 489 }; 490 491 /* 492 * The RDS notifier is used (optionally) to tell the application about 493 * completed RDMA operations. Rather than keeping the whole rds message 494 * around on the queue, we allocate a small notifier that is put on the 495 * socket's notifier_list. Notifications are delivered to the application 496 * through control messages. 497 */ 498 struct rds_notifier { 499 struct list_head n_list; 500 uint64_t n_user_token; 501 int n_status; 502 }; 503 504 /* Available as part of RDS core, so doesn't need to participate 505 * in get_preferred transport etc 506 */ 507 #define RDS_TRANS_LOOP 3 508 509 struct rds_transport { 510 char t_name[TRANSNAMSIZ]; 511 struct list_head t_item; 512 struct module *t_owner; 513 unsigned int t_prefer_loopback:1, 514 t_mp_capable:1; 515 unsigned int t_type; 516 517 int (*laddr_check)(struct net *net, const struct in6_addr *addr, 518 __u32 scope_id); 519 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp); 520 void (*conn_free)(void *data); 521 522 /* 523 * conn_slots_available is invoked when a previously unavailable 524 * connection slot becomes available again. rds_tcp_accept_one_path may 525 * return -ENOBUFS if it cannot find an available slot, and then stashes 526 * the new socket in "rds_tcp_accepted_sock". This function re-issues 527 * `rds_tcp_accept_one_path`, which picks up the stashed socket and 528 * continuing where it left with "-ENOBUFS" last time. This ensures 529 * messages received on the new socket are not discarded when no 530 * connection path was available at the time. 531 */ 532 void (*conn_slots_available)(struct rds_connection *conn); 533 int (*conn_path_connect)(struct rds_conn_path *cp); 534 535 /* 536 * conn_shutdown stops traffic on the given connection. Once 537 * it returns the connection can not call rds_recv_incoming(). 538 * This will only be called once after conn_connect returns 539 * non-zero success and will The caller serializes this with 540 * the send and connecting paths (xmit_* and conn_*). The 541 * transport is responsible for other serialization, including 542 * rds_recv_incoming(). This is called in process context but 543 * should try hard not to block. 544 */ 545 void (*conn_path_shutdown)(struct rds_conn_path *conn); 546 void (*xmit_path_prepare)(struct rds_conn_path *cp); 547 void (*xmit_path_complete)(struct rds_conn_path *cp); 548 549 /* 550 * .xmit is called by rds_send_xmit() to tell the transport to send 551 * part of a message. The caller serializes on the send_sem so this 552 * doesn't need to be reentrant for a given conn. The header must be 553 * sent before the data payload. .xmit must be prepared to send a 554 * message with no data payload. .xmit should return the number of 555 * bytes that were sent down the connection, including header bytes. 556 * Returning 0 tells the caller that it doesn't need to perform any 557 * additional work now. This is usually the case when the transport has 558 * filled the sending queue for its connection and will handle 559 * triggering the rds thread to continue the send when space becomes 560 * available. Returning -EAGAIN tells the caller to retry the send 561 * immediately. Returning -ENOMEM tells the caller to retry the send at 562 * some point in the future. 563 */ 564 int (*xmit)(struct rds_connection *conn, struct rds_message *rm, 565 unsigned int hdr_off, unsigned int sg, unsigned int off); 566 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op); 567 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op); 568 int (*recv_path)(struct rds_conn_path *cp); 569 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to); 570 void (*inc_free)(struct rds_incoming *inc); 571 572 int (*cm_handle_connect)(struct rdma_cm_id *cm_id, 573 struct rdma_cm_event *event, bool isv6); 574 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6); 575 void (*cm_connect_complete)(struct rds_connection *conn, 576 struct rdma_cm_event *event); 577 578 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter, 579 unsigned int avail); 580 void (*exit)(void); 581 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, 582 struct rds_sock *rs, u32 *key_ret, 583 struct rds_connection *conn, 584 u64 start, u64 length, int need_odp); 585 void (*sync_mr)(void *trans_private, int direction); 586 void (*free_mr)(void *trans_private, int invalidate); 587 void (*flush_mrs)(void); 588 bool (*t_unloading)(struct rds_connection *conn); 589 u8 (*get_tos_map)(u8 tos); 590 }; 591 592 /* Bind hash table key length. It is the sum of the size of a struct 593 * in6_addr, a scope_id and a port. 594 */ 595 #define RDS_BOUND_KEY_LEN \ 596 (sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16)) 597 598 struct rds_sock { 599 struct sock rs_sk; 600 601 u64 rs_user_addr; 602 u64 rs_user_bytes; 603 604 /* 605 * bound_addr used for both incoming and outgoing, no INADDR_ANY 606 * support. 607 */ 608 struct rhash_head rs_bound_node; 609 u8 rs_bound_key[RDS_BOUND_KEY_LEN]; 610 struct sockaddr_in6 rs_bound_sin6; 611 #define rs_bound_addr rs_bound_sin6.sin6_addr 612 #define rs_bound_addr_v4 rs_bound_sin6.sin6_addr.s6_addr32[3] 613 #define rs_bound_port rs_bound_sin6.sin6_port 614 #define rs_bound_scope_id rs_bound_sin6.sin6_scope_id 615 struct in6_addr rs_conn_addr; 616 #define rs_conn_addr_v4 rs_conn_addr.s6_addr32[3] 617 __be16 rs_conn_port; 618 struct rds_transport *rs_transport; 619 620 /* 621 * rds_sendmsg caches the conn it used the last time around. 622 * This helps avoid costly lookups. 623 */ 624 struct rds_connection *rs_conn; 625 626 /* flag indicating we were congested or not */ 627 int rs_congested; 628 /* seen congestion (ENOBUFS) when sending? */ 629 int rs_seen_congestion; 630 631 /* rs_lock protects all these adjacent members before the newline */ 632 spinlock_t rs_lock; 633 struct list_head rs_send_queue; 634 u32 rs_snd_bytes; 635 int rs_rcv_bytes; 636 struct list_head rs_notify_queue; /* currently used for failed RDMAs */ 637 638 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask 639 * to decide whether the application should be woken up. 640 * If not set, we use rs_cong_track to find out whether a cong map 641 * update arrived. 642 */ 643 uint64_t rs_cong_mask; 644 uint64_t rs_cong_notify; 645 struct list_head rs_cong_list; 646 unsigned long rs_cong_track; 647 648 /* 649 * rs_recv_lock protects the receive queue, and is 650 * used to serialize with rds_release. 651 */ 652 rwlock_t rs_recv_lock; 653 struct list_head rs_recv_queue; 654 655 /* just for stats reporting */ 656 struct list_head rs_item; 657 658 /* these have their own lock */ 659 spinlock_t rs_rdma_lock; 660 struct rb_root rs_rdma_keys; 661 662 /* Socket options - in case there will be more */ 663 unsigned char rs_recverr, 664 rs_cong_monitor; 665 u32 rs_hash_initval; 666 667 /* Socket receive path trace points*/ 668 u8 rs_rx_traces; 669 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX]; 670 struct rds_msg_zcopy_queue rs_zcookie_queue; 671 u8 rs_tos; 672 }; 673 674 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk) 675 { 676 return container_of(sk, struct rds_sock, rs_sk); 677 } 678 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs) 679 { 680 return &rs->rs_sk; 681 } 682 683 /* 684 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value 685 * to account for overhead. We don't account for overhead, we just apply 686 * the number of payload bytes to the specified value. 687 */ 688 static inline int rds_sk_sndbuf(struct rds_sock *rs) 689 { 690 return rds_rs_to_sk(rs)->sk_sndbuf / 2; 691 } 692 static inline int rds_sk_rcvbuf(struct rds_sock *rs) 693 { 694 return rds_rs_to_sk(rs)->sk_rcvbuf / 2; 695 } 696 697 struct rds_statistics { 698 uint64_t s_conn_reset; 699 uint64_t s_recv_drop_bad_checksum; 700 uint64_t s_recv_drop_old_seq; 701 uint64_t s_recv_drop_no_sock; 702 uint64_t s_recv_drop_dead_sock; 703 uint64_t s_recv_deliver_raced; 704 uint64_t s_recv_delivered; 705 uint64_t s_recv_queued; 706 uint64_t s_recv_immediate_retry; 707 uint64_t s_recv_delayed_retry; 708 uint64_t s_recv_ack_required; 709 uint64_t s_recv_rdma_bytes; 710 uint64_t s_recv_ping; 711 uint64_t s_send_queue_empty; 712 uint64_t s_send_queue_full; 713 uint64_t s_send_lock_contention; 714 uint64_t s_send_lock_queue_raced; 715 uint64_t s_send_immediate_retry; 716 uint64_t s_send_delayed_retry; 717 uint64_t s_send_drop_acked; 718 uint64_t s_send_ack_required; 719 uint64_t s_send_queued; 720 uint64_t s_send_rdma; 721 uint64_t s_send_rdma_bytes; 722 uint64_t s_send_pong; 723 uint64_t s_page_remainder_hit; 724 uint64_t s_page_remainder_miss; 725 uint64_t s_copy_to_user; 726 uint64_t s_copy_from_user; 727 uint64_t s_cong_update_queued; 728 uint64_t s_cong_update_received; 729 uint64_t s_cong_send_error; 730 uint64_t s_cong_send_blocked; 731 uint64_t s_recv_bytes_added_to_socket; 732 uint64_t s_recv_bytes_removed_from_socket; 733 uint64_t s_send_stuck_rm; 734 }; 735 736 /* af_rds.c */ 737 void rds_sock_addref(struct rds_sock *rs); 738 void rds_sock_put(struct rds_sock *rs); 739 void rds_wake_sk_sleep(struct rds_sock *rs); 740 static inline void __rds_wake_sk_sleep(struct sock *sk) 741 { 742 wait_queue_head_t *waitq = sk_sleep(sk); 743 744 if (!sock_flag(sk, SOCK_DEAD) && waitq) 745 wake_up(waitq); 746 } 747 extern wait_queue_head_t rds_poll_waitq; 748 749 750 /* bind.c */ 751 int rds_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len); 752 void rds_remove_bound(struct rds_sock *rs); 753 struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, 754 __u32 scope_id); 755 int rds_bind_lock_init(void); 756 void rds_bind_lock_destroy(void); 757 758 /* cong.c */ 759 int rds_cong_get_maps(struct rds_connection *conn); 760 void rds_cong_add_conn(struct rds_connection *conn); 761 void rds_cong_remove_conn(struct rds_connection *conn); 762 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port); 763 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port); 764 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs); 765 void rds_cong_queue_updates(struct rds_cong_map *map); 766 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t); 767 int rds_cong_updated_since(unsigned long *recent); 768 void rds_cong_add_socket(struct rds_sock *); 769 void rds_cong_remove_socket(struct rds_sock *); 770 void rds_cong_exit(void); 771 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn); 772 773 /* connection.c */ 774 extern u32 rds_gen_num; 775 int rds_conn_init(void); 776 void rds_conn_exit(void); 777 struct rds_connection *rds_conn_create(struct net *net, 778 const struct in6_addr *laddr, 779 const struct in6_addr *faddr, 780 struct rds_transport *trans, 781 u8 tos, gfp_t gfp, 782 int dev_if); 783 struct rds_connection *rds_conn_create_outgoing(struct net *net, 784 const struct in6_addr *laddr, 785 const struct in6_addr *faddr, 786 struct rds_transport *trans, 787 u8 tos, gfp_t gfp, int dev_if); 788 void rds_conn_shutdown(struct rds_conn_path *cpath); 789 void rds_conn_destroy(struct rds_connection *conn); 790 void rds_conn_drop(struct rds_connection *conn); 791 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy); 792 void rds_conn_connect_if_down(struct rds_connection *conn); 793 void rds_conn_path_connect_if_down(struct rds_conn_path *cp); 794 void rds_check_all_paths(struct rds_connection *conn); 795 void rds_for_each_conn_info(struct socket *sock, unsigned int len, 796 struct rds_info_iterator *iter, 797 struct rds_info_lengths *lens, 798 int (*visitor)(struct rds_connection *, void *), 799 u64 *buffer, 800 size_t item_len); 801 802 __printf(2, 3) 803 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...); 804 #define rds_conn_path_error(cp, fmt...) \ 805 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt) 806 807 static inline int 808 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new) 809 { 810 return atomic_cmpxchg(&cp->cp_state, old, new) == old; 811 } 812 813 static inline int 814 rds_conn_transition(struct rds_connection *conn, int old, int new) 815 { 816 WARN_ON(conn->c_trans->t_mp_capable); 817 return rds_conn_path_transition(&conn->c_path[0], old, new); 818 } 819 820 static inline int 821 rds_conn_path_state(struct rds_conn_path *cp) 822 { 823 return atomic_read(&cp->cp_state); 824 } 825 826 static inline int 827 rds_conn_state(struct rds_connection *conn) 828 { 829 WARN_ON(conn->c_trans->t_mp_capable); 830 return rds_conn_path_state(&conn->c_path[0]); 831 } 832 833 static inline int 834 rds_conn_path_up(struct rds_conn_path *cp) 835 { 836 return atomic_read(&cp->cp_state) == RDS_CONN_UP; 837 } 838 839 static inline int 840 rds_conn_path_down(struct rds_conn_path *cp) 841 { 842 return atomic_read(&cp->cp_state) == RDS_CONN_DOWN; 843 } 844 845 static inline int 846 rds_conn_up(struct rds_connection *conn) 847 { 848 WARN_ON(conn->c_trans->t_mp_capable); 849 return rds_conn_path_up(&conn->c_path[0]); 850 } 851 852 static inline int 853 rds_conn_path_connecting(struct rds_conn_path *cp) 854 { 855 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING; 856 } 857 858 static inline int 859 rds_conn_connecting(struct rds_connection *conn) 860 { 861 WARN_ON(conn->c_trans->t_mp_capable); 862 return rds_conn_path_connecting(&conn->c_path[0]); 863 } 864 865 /* message.c */ 866 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); 867 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents); 868 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, 869 bool zcopy); 870 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); 871 void rds_message_populate_header(struct rds_header *hdr, __be16 sport, 872 __be16 dport, u64 seq); 873 int rds_message_add_extension(struct rds_header *hdr, 874 unsigned int type, const void *data, unsigned int len); 875 int rds_message_next_extension(struct rds_header *hdr, 876 unsigned int *pos, void *buf, unsigned int *buflen); 877 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset); 878 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); 879 void rds_message_addref(struct rds_message *rm); 880 void rds_message_put(struct rds_message *rm); 881 void rds_message_wait(struct rds_message *rm); 882 void rds_message_unmapped(struct rds_message *rm); 883 void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *info); 884 885 static inline void rds_message_make_checksum(struct rds_header *hdr) 886 { 887 hdr->h_csum = 0; 888 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2); 889 } 890 891 static inline int rds_message_verify_checksum(const struct rds_header *hdr) 892 { 893 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0; 894 } 895 896 897 /* page.c */ 898 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, 899 gfp_t gfp); 900 void rds_page_exit(void); 901 902 /* recv.c */ 903 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, 904 struct in6_addr *saddr); 905 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn, 906 struct in6_addr *saddr); 907 void rds_inc_put(struct rds_incoming *inc); 908 void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr, 909 struct in6_addr *daddr, 910 struct rds_incoming *inc, gfp_t gfp); 911 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 912 int msg_flags); 913 void rds_clear_recv_queue(struct rds_sock *rs); 914 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg); 915 void rds_inc_info_copy(struct rds_incoming *inc, 916 struct rds_info_iterator *iter, 917 __be32 saddr, __be32 daddr, int flip); 918 void rds6_inc_info_copy(struct rds_incoming *inc, 919 struct rds_info_iterator *iter, 920 struct in6_addr *saddr, struct in6_addr *daddr, 921 int flip); 922 923 /* send.c */ 924 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len); 925 void rds_send_path_reset(struct rds_conn_path *conn); 926 int rds_send_xmit(struct rds_conn_path *cp); 927 struct sockaddr_in; 928 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest); 929 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack); 930 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 931 is_acked_func is_acked); 932 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, 933 is_acked_func is_acked); 934 void rds_send_ping(struct rds_connection *conn, int cp_index); 935 int rds_send_pong(struct rds_conn_path *cp, __be16 dport); 936 937 /* rdma.c */ 938 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); 939 int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen); 940 int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen); 941 int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen); 942 void rds_rdma_drop_keys(struct rds_sock *rs); 943 int rds_rdma_extra_size(struct rds_rdma_args *args, 944 struct rds_iov_vector *iov); 945 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, 946 struct cmsghdr *cmsg); 947 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, 948 struct cmsghdr *cmsg, 949 struct rds_iov_vector *vec); 950 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, 951 struct cmsghdr *cmsg); 952 void rds_rdma_free_op(struct rm_rdma_op *ro); 953 void rds_atomic_free_op(struct rm_atomic_op *ao); 954 void rds_rdma_send_complete(struct rds_message *rm, int wc_status); 955 void rds_atomic_send_complete(struct rds_message *rm, int wc_status); 956 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, 957 struct cmsghdr *cmsg); 958 959 void __rds_put_mr_final(struct kref *kref); 960 961 static inline bool rds_destroy_pending(struct rds_connection *conn) 962 { 963 return !check_net(rds_conn_net(conn)) || 964 (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn)); 965 } 966 967 enum { 968 ODP_NOT_NEEDED, 969 ODP_ZEROBASED, 970 ODP_VIRTUAL 971 }; 972 973 /* stats.c */ 974 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); 975 #define rds_stats_inc_which(which, member) do { \ 976 per_cpu(which, get_cpu()).member++; \ 977 put_cpu(); \ 978 } while (0) 979 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member) 980 #define rds_stats_add_which(which, member, count) do { \ 981 per_cpu(which, get_cpu()).member += count; \ 982 put_cpu(); \ 983 } while (0) 984 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count) 985 int rds_stats_init(void); 986 void rds_stats_exit(void); 987 void rds_stats_info_copy(struct rds_info_iterator *iter, 988 uint64_t *values, const char *const *names, 989 size_t nr); 990 991 /* sysctl.c */ 992 int rds_sysctl_init(void); 993 void rds_sysctl_exit(void); 994 extern unsigned long rds_sysctl_sndbuf_min; 995 extern unsigned long rds_sysctl_sndbuf_default; 996 extern unsigned long rds_sysctl_sndbuf_max; 997 extern unsigned long rds_sysctl_reconnect_min_jiffies; 998 extern unsigned long rds_sysctl_reconnect_max_jiffies; 999 extern unsigned int rds_sysctl_max_unacked_packets; 1000 extern unsigned int rds_sysctl_max_unacked_bytes; 1001 extern unsigned int rds_sysctl_ping_enable; 1002 extern unsigned long rds_sysctl_trace_flags; 1003 extern unsigned int rds_sysctl_trace_level; 1004 1005 /* threads.c */ 1006 int rds_threads_init(void); 1007 void rds_threads_exit(void); 1008 extern struct workqueue_struct *rds_wq; 1009 void rds_queue_reconnect(struct rds_conn_path *cp); 1010 void rds_connect_worker(struct work_struct *); 1011 void rds_shutdown_worker(struct work_struct *); 1012 void rds_send_worker(struct work_struct *); 1013 void rds_recv_worker(struct work_struct *); 1014 void rds_connect_path_complete(struct rds_conn_path *conn, int curr); 1015 void rds_connect_complete(struct rds_connection *conn); 1016 int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2); 1017 1018 /* transport.c */ 1019 void rds_trans_register(struct rds_transport *trans); 1020 void rds_trans_unregister(struct rds_transport *trans); 1021 struct rds_transport *rds_trans_get_preferred(struct net *net, 1022 const struct in6_addr *addr, 1023 __u32 scope_id); 1024 void rds_trans_put(struct rds_transport *trans); 1025 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, 1026 unsigned int avail); 1027 struct rds_transport *rds_trans_get(int t_type); 1028 1029 #endif 1030