1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #ifndef _RDSV3_RDSV3_H 26 #define _RDSV3_RDSV3_H 27 28 /* 29 * The name of this file is rds.h in ofed. 30 */ 31 32 #ifdef __cplusplus 33 extern "C" { 34 #endif 35 36 #include <sys/sunndi.h> 37 #include <netinet/in.h> 38 #include <sys/synch.h> 39 #include <sys/stropts.h> 40 #include <sys/socket.h> 41 #include <sys/socketvar.h> 42 #include <inet/ip.h> 43 #include <sys/avl.h> 44 #include <sys/param.h> 45 #include <sys/rds.h> 46 47 #include <sys/ib/ibtl/ibti.h> 48 #include <sys/ib/clients/of/rdma/ib_verbs.h> 49 #include <sys/ib/clients/of/rdma/ib_addr.h> 50 #include <sys/ib/clients/of/rdma/rdma_cm.h> 51 #include <sys/ib/clients/rdsv3/rdsv3_impl.h> 52 #include <sys/ib/clients/rdsv3/info.h> 53 54 #define NIPQUAD(addr) \ 55 (unsigned char)((ntohl(addr) >> 24) & 0xFF), \ 56 (unsigned char)((ntohl(addr) >> 16) & 0xFF), \ 57 (unsigned char)((ntohl(addr) >> 8) & 0xFF), \ 58 (unsigned char)(ntohl(addr) & 0xFF) 59 60 /* 61 * RDS Network protocol version 62 */ 63 #define RDS_PROTOCOL_3_0 0x0300 64 #define RDS_PROTOCOL_3_1 0x0301 65 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1 66 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8) 67 #define RDS_PROTOCOL_MINOR(v) ((v) & 255) 68 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min) 69 70 /* 71 * XXX randomly chosen, but at least seems to be unused: 72 * # 18464-18768 Unassigned 73 * We should do better. We want a reserved port to discourage unpriv'ed 74 * userspace from listening. 75 * 76 * port 18633 was the version that had ack frames on the wire. 77 */ 78 #define RDSV3_PORT 18634 79 80 /* 81 * This is the sad making. Some kernels have a bug in the per_cpu() api which 82 * makes DEFINE_PER_CPU trigger an oops on insmod because the per-cpu section 83 * in the module is not cacheline-aligned. As much as we'd like to tell users 84 * with older kernels to stuff it, that's not reasonable. We'll roll our own 85 * until this doesn't have to build against older kernels. 86 */ 87 #define RDSV3_DEFINE_PER_CPU(type, var) type var[NR_CPUS] 88 #define RDSV3_DECLARE_PER_CPU(type, var) extern type var[NR_CPUS] 89 #define rdsv3_per_cpu(var, cpu) var[cpu] 90 91 static inline ulong_t 92 ceil(ulong_t x, ulong_t y) 93 { 94 return ((x + y - 1) / y); 95 } 96 97 #define RDSV3_FRAG_SHIFT 12 98 #define RDSV3_FRAG_SIZE ((unsigned int)(1 << RDSV3_FRAG_SHIFT)) 99 100 #define RDSV3_CONG_MAP_BYTES (65536 / 8) 101 #define RDSV3_CONG_MAP_LONGS (RDSV3_CONG_MAP_BYTES / sizeof (unsigned long)) 102 #define RDSV3_CONG_MAP_PAGES (RDSV3_CONG_MAP_BYTES / PAGE_SIZE) 103 #define RDSV3_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8) 104 105 struct rdsv3_cong_map { 106 struct avl_node m_rb_node; 107 uint32_be_t m_addr; 108 rdsv3_wait_queue_t m_waitq; 109 struct list m_conn_list; 110 unsigned long m_page_addrs[RDSV3_CONG_MAP_PAGES]; 111 }; 112 113 /* 114 * This is how we will track the connection state: 115 * A connection is always in one of the following 116 * states. Updates to the state are atomic and imply 117 * a memory barrier. 118 */ 119 enum { 120 RDSV3_CONN_DOWN = 0, 121 RDSV3_CONN_CONNECTING, 122 RDSV3_CONN_DISCONNECTING, 123 RDSV3_CONN_UP, 124 RDSV3_CONN_ERROR, 125 }; 126 127 /* Bits for c_flags */ 128 #define RDSV3_LL_SEND_FULL 0 129 #define RDSV3_RECONNECT_PENDING 1 130 131 struct rdsv3_connection { 132 struct avl_node c_hash_node; 133 uint32_be_t c_laddr; 134 uint32_be_t c_faddr; 135 unsigned int c_loopback:1; 136 struct rdsv3_connection *c_passive; 137 138 struct rdsv3_cong_map *c_lcong; 139 struct rdsv3_cong_map *c_fcong; 140 141 struct mutex c_send_lock; /* protect send ring */ 142 struct rdsv3_message *c_xmit_rm; 143 unsigned long c_xmit_sg; 144 unsigned int c_xmit_hdr_off; 145 unsigned int c_xmit_data_off; 146 unsigned int c_xmit_rdma_sent; 147 148 kmutex_t c_lock; /* protect msg queues */ 149 uint64_t c_next_tx_seq; 150 struct list c_send_queue; 151 struct list c_retrans; 152 153 uint64_t c_next_rx_seq; 154 155 struct rdsv3_transport *c_trans; 156 void *c_transport_data; 157 158 atomic_t c_state; 159 unsigned long c_flags; 160 unsigned long c_reconnect_jiffies; 161 struct rdsv3_delayed_work_s c_send_w; 162 struct rdsv3_delayed_work_s c_recv_w; 163 struct rdsv3_delayed_work_s c_conn_w; 164 struct rdsv3_work_s c_down_w; 165 struct mutex c_cm_lock; /* protect conn state & cm */ 166 167 struct list_node c_map_item; 168 unsigned long c_map_queued; 169 unsigned long c_map_offset; 170 unsigned long c_map_bytes; 171 172 unsigned int c_unacked_packets; 173 unsigned int c_unacked_bytes; 174 175 /* Protocol version */ 176 unsigned int c_version; 177 }; 178 179 #define RDSV3_FLAG_CONG_BITMAP 0x01 180 #define RDSV3_FLAG_ACK_REQUIRED 0x02 181 #define RDSV3_FLAG_RETRANSMITTED 0x04 182 #define RDSV3_MAX_ADV_CREDIT 127 183 184 /* 185 * Maximum space available for extension headers. 186 */ 187 #define RDSV3_HEADER_EXT_SPACE 16 188 189 struct rdsv3_header { 190 uint64_be_t h_sequence; 191 uint64_be_t h_ack; 192 uint32_be_t h_len; 193 uint16_be_t h_sport; 194 uint16_be_t h_dport; 195 uint8_t h_flags; 196 uint8_t h_credit; 197 uint8_t h_padding[4]; 198 uint16_be_t h_csum; 199 200 uint8_t h_exthdr[RDSV3_HEADER_EXT_SPACE]; 201 }; 202 203 /* Reserved - indicates end of extensions */ 204 #define RDSV3_EXTHDR_NONE 0 205 206 /* 207 * This extension header is included in the very 208 * first message that is sent on a new connection, 209 * and identifies the protocol level. This will help 210 * rolling updates if a future change requires breaking 211 * the protocol. 212 */ 213 #define RDSV3_EXTHDR_VERSION 1 214 struct rdsv3_ext_header_version { 215 uint32_be_t h_version; 216 }; 217 218 /* 219 * This extension header is included in the RDS message 220 * chasing an RDMA operation. 221 */ 222 #define RDSV3_EXTHDR_RDMA 2 223 struct rdsv3_ext_header_rdma { 224 uint32_be_t h_rdma_rkey; 225 }; 226 227 /* 228 * This extension header tells the peer about the 229 * destination <R_Key,offset> of the requested RDMA 230 * operation. 231 */ 232 #define RDSV3_EXTHDR_RDMA_DEST 3 233 struct rdsv3_ext_header_rdma_dest { 234 uint32_be_t h_rdma_rkey; 235 uint32_be_t h_rdma_offset; 236 }; 237 238 #define __RDSV3_EXTHDR_MAX 16 /* for now */ 239 240 struct rdsv3_incoming { 241 atomic_t i_refcount; 242 struct list_node i_item; 243 struct rdsv3_connection *i_conn; 244 struct rdsv3_header i_hdr; 245 unsigned long i_rx_jiffies; 246 uint32_be_t i_saddr; 247 248 rdsv3_rdma_cookie_t i_rdma_cookie; 249 }; 250 251 /* 252 * m_sock_item and m_conn_item are on lists that are serialized under 253 * conn->c_lock. m_sock_item has additional meaning in that once it is empty 254 * the message will not be put back on the retransmit list after being sent. 255 * messages that are canceled while being sent rely on this. 256 * 257 * m_inc is used by loopback so that it can pass an incoming message straight 258 * back up into the rx path. It embeds a wire header which is also used by 259 * the send path, which is kind of awkward. 260 * 261 * m_sock_item indicates the message's presence on a socket's send or receive 262 * queue. m_rs will point to that socket. 263 * 264 * m_daddr is used by cancellation to prune messages to a given destination. 265 * 266 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock 267 * nesting. As paths iterate over messages on a sock, or conn, they must 268 * also lock the conn, or sock, to remove the message from those lists too. 269 * Testing the flag to determine if the message is still on the lists lets 270 * us avoid testing the list_head directly. That means each path can use 271 * the message's list_head to keep it on a local list while juggling locks 272 * without confusing the other path. 273 * 274 * m_ack_seq is an optional field set by transports who need a different 275 * sequence number range to invalidate. They can use this in a callback 276 * that they pass to rdsv3_send_drop_acked() to see if each message has been 277 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't 278 * had ack_seq set yet. 279 */ 280 #define RDSV3_MSG_ON_SOCK 1 281 #define RDSV3_MSG_ON_CONN 2 282 #define RDSV3_MSG_HAS_ACK_SEQ 3 283 #define RDSV3_MSG_ACK_REQUIRED 4 284 #define RDSV3_MSG_RETRANSMITTED 5 285 #define RDSV3_MSG_MAPPED 6 286 #define RDSV3_MSG_PAGEVEC 7 287 288 struct rdsv3_message { 289 atomic_t m_refcount; 290 struct list_node m_sock_item; 291 struct list_node m_conn_item; 292 struct rdsv3_incoming m_inc; 293 uint64_t m_ack_seq; 294 uint32_be_t m_daddr; 295 unsigned long m_flags; 296 297 /* 298 * Never access m_rs without holding m_rs_lock. 299 * Lock nesting is 300 * rm->m_rs_lock 301 * -> rs->rs_lock 302 */ 303 kmutex_t m_rs_lock; 304 struct rdsv3_sock *m_rs; 305 struct rdsv3_rdma_op *m_rdma_op; 306 rdsv3_rdma_cookie_t m_rdma_cookie; 307 struct rdsv3_mr *m_rdma_mr; 308 unsigned int m_nents; 309 unsigned int m_count; 310 struct rdsv3_scatterlist m_sg[1]; 311 }; 312 313 /* 314 * The RDS notifier is used (optionally) to tell the application about 315 * completed RDMA operations. Rather than keeping the whole rds message 316 * around on the queue, we allocate a small notifier that is put on the 317 * socket's notifier_list. Notifications are delivered to the application 318 * through control messages. 319 */ 320 struct rdsv3_notifier { 321 list_node_t n_list; 322 uint64_t n_user_token; 323 int n_status; 324 }; 325 326 /* 327 * struct rdsv3_transport - transport specific behavioural hooks 328 * 329 * @xmit: .xmit is called by rdsv3_send_xmit() to tell the transport to send 330 * part of a message. The caller serializes on the send_sem so this 331 * doesn't need to be reentrant for a given conn. The header must be 332 * sent before the data payload. .xmit must be prepared to send a 333 * message with no data payload. .xmit should return the number of 334 * bytes that were sent down the connection, including header bytes. 335 * Returning 0 tells the caller that it doesn't need to perform any 336 * additional work now. This is usually the case when the transport has 337 * filled the sending queue for its connection and will handle 338 * triggering the rds thread to continue the send when space becomes 339 * available. Returning -EAGAIN tells the caller to retry the send 340 * immediately. Returning -ENOMEM tells the caller to retry the send at 341 * some point in the future. 342 * 343 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once 344 * it returns the connection can not call rdsv3_recv_incoming(). 345 * This will only be called once after conn_connect returns 346 * non-zero success and will The caller serializes this with 347 * the send and connecting paths (xmit_* and conn_*). The 348 * transport is responsible for other serialization, including 349 * rdsv3_recv_incoming(). This is called in process context but 350 * should try hard not to block. 351 * 352 * @xmit_cong_map: This asks the transport to send the local bitmap down the 353 * given connection. XXX get a better story about the bitmap 354 * flag and header. 355 */ 356 357 #define RDS_TRANS_IB 0 358 #define RDS_TRANS_IWARP 1 359 #define RDS_TRANS_TCP 2 360 #define RDS_TRANS_COUNT 3 361 362 struct rdsv3_transport { 363 char t_name[TRANSNAMSIZ]; 364 struct list_node t_item; 365 unsigned int t_type; 366 unsigned int t_prefer_loopback:1; 367 368 int (*laddr_check)(uint32_be_t addr); 369 int (*conn_alloc)(struct rdsv3_connection *conn, int gfp); 370 void (*conn_free)(void *data); 371 int (*conn_connect)(struct rdsv3_connection *conn); 372 void (*conn_shutdown)(struct rdsv3_connection *conn); 373 void (*xmit_prepare)(struct rdsv3_connection *conn); 374 void (*xmit_complete)(struct rdsv3_connection *conn); 375 int (*xmit)(struct rdsv3_connection *conn, struct rdsv3_message *rm, 376 unsigned int hdr_off, unsigned int sg, unsigned int off); 377 int (*xmit_cong_map)(struct rdsv3_connection *conn, 378 struct rdsv3_cong_map *map, unsigned long offset); 379 int (*xmit_rdma)(struct rdsv3_connection *conn, 380 struct rdsv3_rdma_op *op); 381 int (*recv)(struct rdsv3_connection *conn); 382 int (*inc_copy_to_user)(struct rdsv3_incoming *inc, uio_t *uio, 383 size_t size); 384 void (*inc_purge)(struct rdsv3_incoming *inc); 385 void (*inc_free)(struct rdsv3_incoming *inc); 386 387 int (*cm_handle_connect)(struct rdma_cm_id *cm_id, 388 struct rdma_cm_event *event); 389 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id); 390 void (*cm_connect_complete)(struct rdsv3_connection *conn, 391 struct rdma_cm_event *event); 392 393 unsigned int (*stats_info_copy)(struct rdsv3_info_iterator *iter, 394 unsigned int avail); 395 void (*exit)(void); 396 void *(*get_mr)(struct rdsv3_iovec *sg, unsigned long nr_sg, 397 struct rdsv3_sock *rs, uint32_t *key_ret); 398 void (*sync_mr)(void *trans_private, int direction); 399 void (*free_mr)(void *trans_private, int invalidate); 400 void (*flush_mrs)(void); 401 }; 402 403 struct rdsv3_sock { 404 struct rsock *rs_sk; 405 uint64_t rs_user_addr; 406 uint64_t rs_user_bytes; 407 408 /* 409 * bound_addr used for both incoming and outgoing, no INADDR_ANY 410 * support. 411 */ 412 struct avl_node rs_bound_node; 413 uint32_be_t rs_bound_addr; 414 uint32_be_t rs_conn_addr; 415 uint16_be_t rs_bound_port; 416 uint16_be_t rs_conn_port; 417 418 /* 419 * This is only used to communicate the transport between bind and 420 * initiating connections. All other trans use is referenced through 421 * the connection. 422 */ 423 struct rdsv3_transport *rs_transport; 424 425 /* 426 * rdsv3_sendmsg caches the conn it used the last time around. 427 * This helps avoid costly lookups. 428 */ 429 struct rdsv3_connection *rs_conn; 430 kmutex_t rs_conn_lock; 431 432 /* flag indicating we were congested or not */ 433 int rs_congested; 434 /* seen congestion (ENOBUFS) when sending? */ 435 int rs_seen_congestion; 436 437 /* rs_lock protects all these adjacent members before the newline */ 438 kmutex_t rs_lock; 439 struct list rs_send_queue; 440 uint32_t rs_snd_bytes; 441 int rs_rcv_bytes; 442 /* currently used for failed RDMAs */ 443 struct list rs_notify_queue; 444 445 /* 446 * Congestion wake_up. If rs_cong_monitor is set, we use cong_mask 447 * to decide whether the application should be woken up. 448 * If not set, we use rs_cong_track to find out whether a cong map 449 * update arrived. 450 */ 451 uint64_t rs_cong_mask; 452 uint64_t rs_cong_notify; 453 struct list_node rs_cong_list; 454 unsigned long rs_cong_track; 455 456 /* 457 * rs_recv_lock protects the receive queue, and is 458 * used to serialize with rdsv3_release. 459 */ 460 krwlock_t rs_recv_lock; 461 struct list rs_recv_queue; 462 463 /* just for stats reporting */ 464 struct list_node rs_item; 465 466 /* these have their own lock */ 467 kmutex_t rs_rdma_lock; 468 struct avl_tree rs_rdma_keys; 469 470 /* Socket options - in case there will be more */ 471 unsigned char rs_recverr, 472 rs_cong_monitor; 473 474 cred_t *rs_cred; 475 zoneid_t rs_zoneid; 476 }; 477 478 static inline struct rdsv3_sock * 479 rdsv3_sk_to_rs(const struct rsock *sk) 480 { 481 return ((struct rdsv3_sock *)sk->sk_protinfo); 482 } 483 484 static inline struct rsock * 485 rdsv3_rs_to_sk(const struct rdsv3_sock *rs) 486 { 487 return ((struct rsock *)rs->rs_sk); 488 } 489 490 /* 491 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value 492 * to account for overhead. We don't account for overhead, we just apply 493 * the number of payload bytes to the specified value. 494 */ 495 static inline int 496 rdsv3_sk_sndbuf(struct rdsv3_sock *rs) 497 { 498 /* XXX */ 499 return (rdsv3_rs_to_sk(rs)->sk_sndbuf); 500 } 501 502 static inline int 503 rdsv3_sk_rcvbuf(struct rdsv3_sock *rs) 504 { 505 /* XXX */ 506 return (rdsv3_rs_to_sk(rs)->sk_rcvbuf); 507 } 508 509 struct rdsv3_statistics { 510 uint64_t s_conn_reset; 511 uint64_t s_recv_drop_bad_checksum; 512 uint64_t s_recv_drop_old_seq; 513 uint64_t s_recv_drop_no_sock; 514 uint64_t s_recv_drop_dead_sock; 515 uint64_t s_recv_deliver_raced; 516 uint64_t s_recv_delivered; 517 uint64_t s_recv_queued; 518 uint64_t s_recv_immediate_retry; 519 uint64_t s_recv_delayed_retry; 520 uint64_t s_recv_ack_required; 521 uint64_t s_recv_rdma_bytes; 522 uint64_t s_recv_ping; 523 uint64_t s_send_queue_empty; 524 uint64_t s_send_queue_full; 525 uint64_t s_send_sem_contention; 526 uint64_t s_send_sem_queue_raced; 527 uint64_t s_send_immediate_retry; 528 uint64_t s_send_delayed_retry; 529 uint64_t s_send_drop_acked; 530 uint64_t s_send_ack_required; 531 uint64_t s_send_queued; 532 uint64_t s_send_rdma; 533 uint64_t s_send_rdma_bytes; 534 uint64_t s_send_pong; 535 uint64_t s_page_remainder_hit; 536 uint64_t s_page_remainder_miss; 537 uint64_t s_copy_to_user; 538 uint64_t s_copy_from_user; 539 uint64_t s_cong_update_queued; 540 uint64_t s_cong_update_received; 541 uint64_t s_cong_send_error; 542 uint64_t s_cong_send_blocked; 543 }; 544 545 /* af_rds.c */ 546 void rdsv3_sock_addref(struct rdsv3_sock *rs); 547 void rdsv3_sock_put(struct rdsv3_sock *rs); 548 void rdsv3_wake_sk_sleep(struct rdsv3_sock *rs); 549 void __rdsv3_wake_sk_sleep(struct rsock *sk); 550 551 extern rdsv3_wait_queue_t rdsv3_poll_waitq; 552 553 /* bind.c */ 554 int rdsv3_bind(sock_lower_handle_t proto_handle, struct sockaddr *sa, 555 socklen_t len, cred_t *cr); 556 void rdsv3_remove_bound(struct rdsv3_sock *rs); 557 struct rdsv3_sock *rdsv3_find_bound(uint32_be_t addr, uint16_be_t port); 558 559 /* conn.c */ 560 int rdsv3_conn_init(void); 561 void rdsv3_conn_exit(void); 562 struct rdsv3_connection *rdsv3_conn_create(uint32_be_t laddr, uint32_be_t faddr, 563 struct rdsv3_transport *trans, int gfp); 564 struct rdsv3_connection *rdsv3_conn_create_outgoing(uint32_be_t laddr, 565 uint32_be_t faddr, 566 struct rdsv3_transport *trans, int gfp); 567 void rdsv3_conn_destroy(struct rdsv3_connection *conn); 568 void rdsv3_conn_reset(struct rdsv3_connection *conn); 569 void rdsv3_conn_drop(struct rdsv3_connection *conn); 570 void rdsv3_for_each_conn_info(struct rsock *sock, unsigned int len, 571 struct rdsv3_info_iterator *iter, 572 struct rdsv3_info_lengths *lens, 573 int (*visitor)(struct rdsv3_connection *, void *), 574 size_t item_len); 575 576 static inline int 577 rdsv3_conn_transition(struct rdsv3_connection *conn, int old, int new) 578 { 579 return (atomic_cmpxchg(&conn->c_state, old, new) == old); 580 } 581 582 static inline int 583 rdsv3_conn_state(struct rdsv3_connection *conn) 584 { 585 return (atomic_get(&conn->c_state)); 586 } 587 588 static inline int 589 rdsv3_conn_up(struct rdsv3_connection *conn) 590 { 591 return (atomic_get(&conn->c_state) == RDSV3_CONN_UP); 592 } 593 594 static inline int 595 rdsv3_conn_connecting(struct rdsv3_connection *conn) 596 { 597 return (atomic_get(&conn->c_state) == RDSV3_CONN_CONNECTING); 598 } 599 600 /* recv.c */ 601 void rdsv3_inc_init(struct rdsv3_incoming *inc, struct rdsv3_connection *conn, 602 uint32_be_t saddr); 603 void rdsv3_inc_addref(struct rdsv3_incoming *inc); 604 void rdsv3_inc_put(struct rdsv3_incoming *inc); 605 void rdsv3_recv_incoming(struct rdsv3_connection *conn, uint32_be_t saddr, 606 uint32_be_t daddr, 607 struct rdsv3_incoming *inc, int gfp); 608 int rdsv3_recvmsg(struct rdsv3_sock *rs, uio_t *uio, 609 struct msghdr *msg, size_t size, int msg_flags); 610 void rdsv3_clear_recv_queue(struct rdsv3_sock *rs); 611 int rdsv3_notify_queue_get(struct rdsv3_sock *rs, struct msghdr *msg); 612 void rdsv3_inc_info_copy(struct rdsv3_incoming *inc, 613 struct rdsv3_info_iterator *iter, 614 uint32_be_t saddr, uint32_be_t daddr, int flip); 615 616 /* page.c */ 617 int rdsv3_page_remainder_alloc(struct rdsv3_scatterlist *scat, 618 unsigned long bytes, int gfp); 619 620 /* send.c */ 621 int rdsv3_sendmsg(struct rdsv3_sock *rs, uio_t *uio, struct nmsghdr *msg, 622 size_t payload_len); 623 void rdsv3_send_reset(struct rdsv3_connection *conn); 624 int rdsv3_send_xmit(struct rdsv3_connection *conn); 625 struct sockaddr_in; 626 void rdsv3_send_drop_to(struct rdsv3_sock *rs, struct sockaddr_in *dest); 627 typedef int (*is_acked_func)(struct rdsv3_message *rm, uint64_t ack); 628 void rdsv3_send_drop_acked(struct rdsv3_connection *conn, uint64_t ack, 629 is_acked_func is_acked); 630 int rdsv3_send_acked_before(struct rdsv3_connection *conn, uint64_t seq); 631 void rdsv3_send_remove_from_sock(struct list *messages, int status); 632 int rdsv3_send_pong(struct rdsv3_connection *conn, uint16_be_t dport); 633 struct rdsv3_message *rdsv3_send_get_message(struct rdsv3_connection *, 634 struct rdsv3_rdma_op *); 635 636 /* rdma.c */ 637 void rdsv3_rdma_unuse(struct rdsv3_sock *rs, uint32_t r_key, int force); 638 639 /* cong.c */ 640 void rdsv3_cong_init(void); 641 int rdsv3_cong_get_maps(struct rdsv3_connection *conn); 642 void rdsv3_cong_add_conn(struct rdsv3_connection *conn); 643 void rdsv3_cong_remove_conn(struct rdsv3_connection *conn); 644 void rdsv3_cong_set_bit(struct rdsv3_cong_map *map, uint16_be_t port); 645 void rdsv3_cong_clear_bit(struct rdsv3_cong_map *map, uint16_be_t port); 646 int rdsv3_cong_wait(struct rdsv3_cong_map *map, uint16_be_t port, int nonblock, 647 struct rdsv3_sock *rs); 648 void rdsv3_cong_queue_updates(struct rdsv3_cong_map *map); 649 void rdsv3_cong_map_updated(struct rdsv3_cong_map *map, uint64_t); 650 int rdsv3_cong_updated_since(unsigned long *recent); 651 void rdsv3_cong_add_socket(struct rdsv3_sock *); 652 void rdsv3_cong_remove_socket(struct rdsv3_sock *); 653 void rdsv3_cong_exit(void); 654 struct rdsv3_message *rdsv3_cong_update_alloc(struct rdsv3_connection *conn); 655 656 /* stats.c */ 657 RDSV3_DECLARE_PER_CPU(struct rdsv3_statistics, rdsv3_stats); 658 #define rdsv3_stats_inc_which(which, member) do { \ 659 rdsv3_per_cpu(which, get_cpu()).member++; \ 660 put_cpu(); \ 661 } while (0) 662 #define rdsv3_stats_inc(member) rdsv3_stats_inc_which(rdsv3_stats, member) 663 #define rdsv3_stats_add_which(which, member, count) do { \ 664 rdsv3_per_cpu(which, get_cpu()).member += count; \ 665 put_cpu(); \ 666 } while (0) 667 #define rdsv3_stats_add(member, count) \ 668 rdsv3_stats_add_which(rdsv3_stats, member, count) 669 int rdsv3_stats_init(void); 670 void rdsv3_stats_exit(void); 671 void rdsv3_stats_info_copy(struct rdsv3_info_iterator *iter, 672 uint64_t *values, char **names, size_t nr); 673 674 675 /* sysctl.c */ 676 int rdsv3_sysctl_init(void); 677 void rdsv3_sysctl_exit(void); 678 extern unsigned long rdsv3_sysctl_sndbuf_min; 679 extern unsigned long rdsv3_sysctl_sndbuf_default; 680 extern unsigned long rdsv3_sysctl_sndbuf_max; 681 extern unsigned long rdsv3_sysctl_reconnect_min_jiffies; 682 extern unsigned long rdsv3_sysctl_reconnect_max_jiffies; 683 extern unsigned int rdsv3_sysctl_max_unacked_packets; 684 extern unsigned int rdsv3_sysctl_max_unacked_bytes; 685 extern unsigned int rdsv3_sysctl_ping_enable; 686 extern unsigned long rdsv3_sysctl_trace_flags; 687 extern unsigned int rdsv3_sysctl_trace_level; 688 689 /* threads.c */ 690 int rdsv3_threads_init(); 691 void rdsv3_threads_exit(void); 692 extern struct rdsv3_workqueue_struct_s *rdsv3_wq; 693 void rdsv3_connect_worker(struct rdsv3_work_s *); 694 void rdsv3_shutdown_worker(struct rdsv3_work_s *); 695 void rdsv3_send_worker(struct rdsv3_work_s *); 696 void rdsv3_recv_worker(struct rdsv3_work_s *); 697 void rdsv3_connect_complete(struct rdsv3_connection *conn); 698 699 /* transport.c */ 700 int rdsv3_trans_register(struct rdsv3_transport *trans); 701 void rdsv3_trans_unregister(struct rdsv3_transport *trans); 702 struct rdsv3_transport *rdsv3_trans_get_preferred(uint32_be_t addr); 703 unsigned int rdsv3_trans_stats_info_copy(struct rdsv3_info_iterator *iter, 704 unsigned int avail); 705 void rdsv3_trans_exit(void); 706 707 /* message.c */ 708 struct rdsv3_message *rdsv3_message_alloc(unsigned int nents, int gfp); 709 struct rdsv3_message *rdsv3_message_copy_from_user(struct uio *uiop, 710 size_t total_len); 711 struct rdsv3_message *rdsv3_message_map_pages(unsigned long *page_addrs, 712 unsigned int total_len); 713 void rdsv3_message_populate_header(struct rdsv3_header *hdr, uint16_be_t sport, 714 uint16_be_t dport, uint64_t seq); 715 int rdsv3_message_add_extension(struct rdsv3_header *hdr, 716 unsigned int type, const void *data, unsigned int len); 717 int rdsv3_message_next_extension(struct rdsv3_header *hdr, 718 unsigned int *pos, void *buf, unsigned int *buflen); 719 int rdsv3_message_add_version_extension(struct rdsv3_header *hdr, 720 unsigned int version); 721 int rdsv3_message_get_version_extension(struct rdsv3_header *hdr, 722 unsigned int *version); 723 int rdsv3_message_add_rdma_dest_extension(struct rdsv3_header *hdr, 724 uint32_t r_key, uint32_t offset); 725 int rdsv3_message_inc_copy_to_user(struct rdsv3_incoming *inc, 726 uio_t *uio, size_t size); 727 void rdsv3_message_inc_purge(struct rdsv3_incoming *inc); 728 void rdsv3_message_inc_free(struct rdsv3_incoming *inc); 729 void rdsv3_message_addref(struct rdsv3_message *rm); 730 void rdsv3_message_put(struct rdsv3_message *rm); 731 void rdsv3_message_wait(struct rdsv3_message *rm); 732 void rdsv3_message_unmapped(struct rdsv3_message *rm); 733 734 static inline void 735 rdsv3_message_make_checksum(struct rdsv3_header *hdr) 736 { 737 hdr->h_csum = 0; 738 hdr->h_csum = 739 rdsv3_ip_fast_csum((void *)hdr, sizeof (*hdr) >> 2); 740 } 741 742 static inline int 743 rdsv3_message_verify_checksum(const struct rdsv3_header *hdr) 744 { 745 return (!hdr->h_csum || 746 rdsv3_ip_fast_csum((void *)hdr, sizeof (*hdr) >> 2) == 0); 747 } 748 749 /* rdsv3_sc.c */ 750 extern boolean_t rdsv3_if_lookup_by_name(char *if_name); 751 extern int rdsv3_sc_path_lookup(ipaddr_t *localip, ipaddr_t *remip); 752 extern ipaddr_t rdsv3_scaddr_to_ibaddr(ipaddr_t addr); 753 754 #ifdef __cplusplus 755 } 756 #endif 757 758 #endif /* _RDSV3_RDSV3_H */ 759