Lines Matching +full:rs +full:-
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
61 struct sock *sk = sock->sk; in rds_release()
62 struct rds_sock *rs; in rds_release() local
67 rs = rds_sk_to_rs(sk); in rds_release()
70 /* Note - rds_clear_recv_queue grabs rs_recv_lock, so in rds_release()
73 rds_clear_recv_queue(rs); in rds_release()
74 rds_cong_remove_socket(rs); in rds_release()
76 rds_remove_bound(rs); in rds_release()
78 rds_send_drop_to(rs, NULL); in rds_release()
79 rds_rdma_drop_keys(rs); in rds_release()
80 rds_notify_queue_get(rs, NULL); in rds_release()
81 rds_notify_msg_zcopy_purge(&rs->rs_zcookie_queue); in rds_release()
84 list_del_init(&rs->rs_item); in rds_release()
85 rds_sock_count--; in rds_release()
88 rds_trans_put(rs->rs_transport); in rds_release()
90 sock->sk = NULL; in rds_release()
97 * Careful not to race with rds_release -> sock_orphan which clears sk_sleep.
101 * NB - normally, one would use sk_callback_lock for this, but we can
103 * with _lock_bh only - so relying on sk_callback_lock introduces livelocks.
105 void rds_wake_sk_sleep(struct rds_sock *rs) in rds_wake_sk_sleep() argument
109 read_lock_irqsave(&rs->rs_recv_lock, flags); in rds_wake_sk_sleep()
110 __rds_wake_sk_sleep(rds_rs_to_sk(rs)); in rds_wake_sk_sleep()
111 read_unlock_irqrestore(&rs->rs_recv_lock, flags); in rds_wake_sk_sleep()
117 struct rds_sock *rs = rds_sk_to_rs(sock->sk); in rds_getname() local
124 if (ipv6_addr_any(&rs->rs_conn_addr)) in rds_getname()
125 return -ENOTCONN; in rds_getname()
127 if (ipv6_addr_v4mapped(&rs->rs_conn_addr)) { in rds_getname()
129 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); in rds_getname()
130 sin->sin_family = AF_INET; in rds_getname()
131 sin->sin_port = rs->rs_conn_port; in rds_getname()
132 sin->sin_addr.s_addr = rs->rs_conn_addr_v4; in rds_getname()
136 sin6->sin6_family = AF_INET6; in rds_getname()
137 sin6->sin6_port = rs->rs_conn_port; in rds_getname()
138 sin6->sin6_addr = rs->rs_conn_addr; in rds_getname()
139 sin6->sin6_flowinfo = 0; in rds_getname()
141 sin6->sin6_scope_id = rs->rs_bound_scope_id; in rds_getname()
151 if (ipv6_addr_any(&rs->rs_bound_addr)) { in rds_getname()
152 if (ipv6_addr_any(&rs->rs_conn_addr)) { in rds_getname()
155 sin->sin_family = AF_UNSPEC; in rds_getname()
160 if (!(ipv6_addr_type(&rs->rs_conn_addr) & in rds_getname()
164 sin6->sin6_family = AF_INET6; in rds_getname()
171 sin->sin_family = AF_INET; in rds_getname()
174 if (ipv6_addr_v4mapped(&rs->rs_bound_addr)) { in rds_getname()
176 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); in rds_getname()
177 sin->sin_family = AF_INET; in rds_getname()
178 sin->sin_port = rs->rs_bound_port; in rds_getname()
179 sin->sin_addr.s_addr = rs->rs_bound_addr_v4; in rds_getname()
183 sin6->sin6_family = AF_INET6; in rds_getname()
184 sin6->sin6_port = rs->rs_bound_port; in rds_getname()
185 sin6->sin6_addr = rs->rs_bound_addr; in rds_getname()
186 sin6->sin6_flowinfo = 0; in rds_getname()
187 sin6->sin6_scope_id = rs->rs_bound_scope_id; in rds_getname()
201 * - there is data on the receive queue.
202 * - to signal that a previously congested destination may have become
204 * - A notification has been queued to the socket (this can be a congestion
215 struct sock *sk = sock->sk; in rds_poll()
216 struct rds_sock *rs = rds_sk_to_rs(sk); in rds_poll() local
222 if (rs->rs_seen_congestion) in rds_poll()
225 read_lock_irqsave(&rs->rs_recv_lock, flags); in rds_poll()
226 if (!rs->rs_cong_monitor) { in rds_poll()
230 if (rds_cong_updated_since(&rs->rs_cong_track)) in rds_poll()
233 spin_lock(&rs->rs_lock); in rds_poll()
234 if (rs->rs_cong_notify) in rds_poll()
236 spin_unlock(&rs->rs_lock); in rds_poll()
238 if (!list_empty(&rs->rs_recv_queue) || in rds_poll()
239 !list_empty(&rs->rs_notify_queue) || in rds_poll()
240 !list_empty(&rs->rs_zcookie_queue.zcookie_head)) in rds_poll()
242 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) in rds_poll()
244 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in rds_poll()
246 read_unlock_irqrestore(&rs->rs_recv_lock, flags); in rds_poll()
248 /* clear state any time we wake a seen-congested socket */ in rds_poll()
250 rs->rs_seen_congestion = 0; in rds_poll()
257 struct rds_sock *rs = rds_sk_to_rs(sock->sk); in rds_ioctl() local
263 return -EFAULT; in rds_ioctl()
265 if (rs->rs_transport && in rds_ioctl()
266 rs->rs_transport->get_tos_map) in rds_ioctl()
267 tos = rs->rs_transport->get_tos_map(utos); in rds_ioctl()
269 return -ENOIOCTLCMD; in rds_ioctl()
272 if (rs->rs_tos || rs->rs_conn) { in rds_ioctl()
274 return -EINVAL; in rds_ioctl()
276 rs->rs_tos = tos; in rds_ioctl()
281 tos = rs->rs_tos; in rds_ioctl()
284 return -EFAULT; in rds_ioctl()
287 return -ENOIOCTLCMD; in rds_ioctl()
293 static int rds_cancel_sent_to(struct rds_sock *rs, sockptr_t optval, int len) in rds_cancel_sent_to() argument
300 if (ipv6_addr_any(&rs->rs_bound_addr)) { in rds_cancel_sent_to()
301 ret = -ENOTCONN; /* XXX not a great errno */ in rds_cancel_sent_to()
306 ret = -EINVAL; in rds_cancel_sent_to()
312 ret = -EFAULT; in rds_cancel_sent_to()
320 ret = -EFAULT; in rds_cancel_sent_to()
325 rds_send_drop_to(rs, &sin6); in rds_cancel_sent_to()
336 return -EINVAL; in rds_set_bool_option()
338 return -EFAULT; in rds_set_bool_option()
343 static int rds_cong_monitor(struct rds_sock *rs, sockptr_t optval, int optlen) in rds_cong_monitor() argument
347 ret = rds_set_bool_option(&rs->rs_cong_monitor, optval, optlen); in rds_cong_monitor()
349 if (rs->rs_cong_monitor) { in rds_cong_monitor()
350 rds_cong_add_socket(rs); in rds_cong_monitor()
352 rds_cong_remove_socket(rs); in rds_cong_monitor()
353 rs->rs_cong_mask = 0; in rds_cong_monitor()
354 rs->rs_cong_notify = 0; in rds_cong_monitor()
360 static int rds_set_transport(struct rds_sock *rs, sockptr_t optval, int optlen) in rds_set_transport() argument
364 if (rs->rs_transport) in rds_set_transport()
365 return -EOPNOTSUPP; /* previously attached to transport */ in rds_set_transport()
368 return -EINVAL; in rds_set_transport()
371 return -EFAULT; in rds_set_transport()
374 return -EINVAL; in rds_set_transport()
376 rs->rs_transport = rds_trans_get(t_type); in rds_set_transport()
378 return rs->rs_transport ? 0 : -ENOPROTOOPT; in rds_set_transport()
387 return -EFAULT; in rds_enable_recvtstamp()
390 return -EFAULT; in rds_enable_recvtstamp()
405 static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval, in rds_recv_track_latency() argument
412 return -EFAULT; in rds_recv_track_latency()
415 return -EFAULT; in rds_recv_track_latency()
418 return -EFAULT; in rds_recv_track_latency()
420 rs->rs_rx_traces = trace.rx_traces; in rds_recv_track_latency()
421 for (i = 0; i < rs->rs_rx_traces; i++) { in rds_recv_track_latency()
423 rs->rs_rx_traces = 0; in rds_recv_track_latency()
424 return -EFAULT; in rds_recv_track_latency()
426 rs->rs_rx_trace[i] = trace.rx_trace_pos[i]; in rds_recv_track_latency()
435 struct rds_sock *rs = rds_sk_to_rs(sock->sk); in rds_setsockopt() local
439 ret = -ENOPROTOOPT; in rds_setsockopt()
445 ret = rds_cancel_sent_to(rs, optval, optlen); in rds_setsockopt()
448 ret = rds_get_mr(rs, optval, optlen); in rds_setsockopt()
451 ret = rds_get_mr_for_dest(rs, optval, optlen); in rds_setsockopt()
454 ret = rds_free_mr(rs, optval, optlen); in rds_setsockopt()
457 ret = rds_set_bool_option(&rs->rs_recverr, optval, optlen); in rds_setsockopt()
460 ret = rds_cong_monitor(rs, optval, optlen); in rds_setsockopt()
463 lock_sock(sock->sk); in rds_setsockopt()
464 ret = rds_set_transport(rs, optval, optlen); in rds_setsockopt()
465 release_sock(sock->sk); in rds_setsockopt()
469 lock_sock(sock->sk); in rds_setsockopt()
470 ret = rds_enable_recvtstamp(sock->sk, optval, optlen, optname); in rds_setsockopt()
471 release_sock(sock->sk); in rds_setsockopt()
474 ret = rds_recv_track_latency(rs, optval, optlen); in rds_setsockopt()
477 ret = -ENOPROTOOPT; in rds_setsockopt()
486 struct rds_sock *rs = rds_sk_to_rs(sock->sk); in rds_getsockopt() local
487 int ret = -ENOPROTOOPT, len; in rds_getsockopt()
494 ret = -EFAULT; in rds_getsockopt()
506 ret = -EINVAL; in rds_getsockopt()
508 if (put_user(rs->rs_recverr, (int __user *) optval) || in rds_getsockopt()
510 ret = -EFAULT; in rds_getsockopt()
516 ret = -EINVAL; in rds_getsockopt()
519 trans = (rs->rs_transport ? rs->rs_transport->t_type : in rds_getsockopt()
523 ret = -EFAULT; in rds_getsockopt()
539 struct sock *sk = sock->sk; in rds_connect()
541 struct rds_sock *rs = rds_sk_to_rs(sk); in rds_connect() local
545 return -EINVAL; in rds_connect()
549 switch (uaddr->sa_family) { in rds_connect()
553 ret = -EINVAL; in rds_connect()
556 if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) { in rds_connect()
557 ret = -EDESTADDRREQ; in rds_connect()
560 if (ipv4_is_multicast(sin->sin_addr.s_addr) || in rds_connect()
561 sin->sin_addr.s_addr == htonl(INADDR_BROADCAST)) { in rds_connect()
562 ret = -EINVAL; in rds_connect()
565 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &rs->rs_conn_addr); in rds_connect()
566 rs->rs_conn_port = sin->sin_port; in rds_connect()
576 ret = -EINVAL; in rds_connect()
579 addr_type = ipv6_addr_type(&sin6->sin6_addr); in rds_connect()
584 ret = -EPROTOTYPE; in rds_connect()
591 addr4 = sin6->sin6_addr.s6_addr32[3]; in rds_connect()
595 ret = -EPROTOTYPE; in rds_connect()
604 if (sin6->sin6_scope_id == 0 || in rds_connect()
605 (!ipv6_addr_any(&rs->rs_bound_addr) && in rds_connect()
606 rs->rs_bound_scope_id && in rds_connect()
607 sin6->sin6_scope_id != rs->rs_bound_scope_id)) { in rds_connect()
608 ret = -EINVAL; in rds_connect()
615 rs->rs_bound_scope_id = sin6->sin6_scope_id; in rds_connect()
617 rs->rs_conn_addr = sin6->sin6_addr; in rds_connect()
618 rs->rs_conn_port = sin6->sin6_port; in rds_connect()
624 ret = -EAFNOSUPPORT; in rds_connect()
660 struct rds_sock *rs = rds_sk_to_rs(sk); in rds_sock_destruct() local
662 WARN_ON((&rs->rs_item != rs->rs_item.next || in rds_sock_destruct()
663 &rs->rs_item != rs->rs_item.prev)); in rds_sock_destruct()
668 struct rds_sock *rs; in __rds_create() local
671 sock->ops = &rds_proto_ops; in __rds_create()
672 sk->sk_protocol = protocol; in __rds_create()
673 sk->sk_destruct = rds_sock_destruct; in __rds_create()
675 rs = rds_sk_to_rs(sk); in __rds_create()
676 spin_lock_init(&rs->rs_lock); in __rds_create()
677 rwlock_init(&rs->rs_recv_lock); in __rds_create()
678 INIT_LIST_HEAD(&rs->rs_send_queue); in __rds_create()
679 INIT_LIST_HEAD(&rs->rs_recv_queue); in __rds_create()
680 INIT_LIST_HEAD(&rs->rs_notify_queue); in __rds_create()
681 INIT_LIST_HEAD(&rs->rs_cong_list); in __rds_create()
682 rds_message_zcopy_queue_init(&rs->rs_zcookie_queue); in __rds_create()
683 spin_lock_init(&rs->rs_rdma_lock); in __rds_create()
684 rs->rs_rdma_keys = RB_ROOT; in __rds_create()
685 rs->rs_rx_traces = 0; in __rds_create()
686 rs->rs_tos = 0; in __rds_create()
687 rs->rs_conn = NULL; in __rds_create()
690 list_add_tail(&rs->rs_item, &rds_sock_list); in __rds_create()
702 if (sock->type != SOCK_SEQPACKET || protocol) in rds_create()
703 return -ESOCKTNOSUPPORT; in rds_create()
707 return -ENOMEM; in rds_create()
712 void rds_sock_addref(struct rds_sock *rs) in rds_sock_addref() argument
714 sock_hold(rds_rs_to_sk(rs)); in rds_sock_addref()
717 void rds_sock_put(struct rds_sock *rs) in rds_sock_put() argument
719 sock_put(rds_rs_to_sk(rs)); in rds_sock_put()
732 struct rds_sock *rs; in rds_sock_inc_info() local
740 list_for_each_entry(rs, &rds_sock_list, rs_item) { in rds_sock_inc_info()
742 if (!ipv6_addr_v4mapped(&rs->rs_bound_addr)) in rds_sock_inc_info()
745 read_lock(&rs->rs_recv_lock); in rds_sock_inc_info()
748 list_for_each_entry(inc, &rs->rs_recv_queue, i_item) { in rds_sock_inc_info()
752 inc->i_saddr.s6_addr32[3], in rds_sock_inc_info()
753 rs->rs_bound_addr_v4, in rds_sock_inc_info()
757 read_unlock(&rs->rs_recv_lock); in rds_sock_inc_info()
762 lens->nr = total; in rds_sock_inc_info()
763 lens->each = sizeof(struct rds_info_message); in rds_sock_inc_info()
773 struct rds_sock *rs; in rds6_sock_inc_info() local
779 list_for_each_entry(rs, &rds_sock_list, rs_item) { in rds6_sock_inc_info()
780 read_lock(&rs->rs_recv_lock); in rds6_sock_inc_info()
782 list_for_each_entry(inc, &rs->rs_recv_queue, i_item) { in rds6_sock_inc_info()
785 rds6_inc_info_copy(inc, iter, &inc->i_saddr, in rds6_sock_inc_info()
786 &rs->rs_bound_addr, 1); in rds6_sock_inc_info()
789 read_unlock(&rs->rs_recv_lock); in rds6_sock_inc_info()
794 lens->nr = total; in rds6_sock_inc_info()
795 lens->each = sizeof(struct rds6_info_message); in rds6_sock_inc_info()
805 struct rds_sock *rs; in rds_sock_info() local
816 list_for_each_entry(rs, &rds_sock_list, rs_item) { in rds_sock_info()
818 if (!ipv6_addr_v4mapped(&rs->rs_bound_addr)) in rds_sock_info()
820 sinfo.sndbuf = rds_sk_sndbuf(rs); in rds_sock_info()
821 sinfo.rcvbuf = rds_sk_rcvbuf(rs); in rds_sock_info()
822 sinfo.bound_addr = rs->rs_bound_addr_v4; in rds_sock_info()
823 sinfo.connected_addr = rs->rs_conn_addr_v4; in rds_sock_info()
824 sinfo.bound_port = rs->rs_bound_port; in rds_sock_info()
825 sinfo.connected_port = rs->rs_conn_port; in rds_sock_info()
826 sinfo.inum = sock_i_ino(rds_rs_to_sk(rs)); in rds_sock_info()
833 lens->nr = cnt; in rds_sock_info()
834 lens->each = sizeof(struct rds_info_socket); in rds_sock_info()
845 struct rds_sock *rs; in rds6_sock_info() local
854 list_for_each_entry(rs, &rds_sock_list, rs_item) { in rds6_sock_info()
855 sinfo6.sndbuf = rds_sk_sndbuf(rs); in rds6_sock_info()
856 sinfo6.rcvbuf = rds_sk_rcvbuf(rs); in rds6_sock_info()
857 sinfo6.bound_addr = rs->rs_bound_addr; in rds6_sock_info()
858 sinfo6.connected_addr = rs->rs_conn_addr; in rds6_sock_info()
859 sinfo6.bound_port = rs->rs_bound_port; in rds6_sock_info()
860 sinfo6.connected_port = rs->rs_conn_port; in rds6_sock_info()
861 sinfo6.inum = sock_i_ino(rds_rs_to_sk(rs)); in rds6_sock_info()
867 lens->nr = rds_sock_count; in rds6_sock_info()
868 lens->each = sizeof(struct rds6_info_socket); in rds6_sock_info()
957 MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");