Lines Matching +full:packet +full:- +full:oriented

4  * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * Copyright (c) 2020-2021, Red Hat Inc
75 * struct tipc_sock - TIPC socket structure
76 * @sk: socket - interacts with 'port' and with user via the socket API
77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
102 * @pkt_cnt: TIPC socket packet count
169 return msg_prevnode(&tsk->phdr);
174 return msg_destnode(&tsk->phdr);
179 return msg_destport(&tsk->phdr);
184 return msg_src_droppable(&tsk->phdr) != 0;
189 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
194 return msg_dest_droppable(&tsk->phdr) != 0;
199 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
204 return msg_importance(&tsk->phdr);
215 return -EINVAL;
216 msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
222 return tsk->snt_unacked > tsk->snd_win;
240 * - If block based flow control is not supported by peer we
245 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
250 /* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
254 struct sock *sk = &tsk->sk;
256 tsk->maxnagle = 0;
257 if (sk->sk_type != SOCK_STREAM)
259 if (tsk->nodelay)
261 if (!(tsk->peer_caps & TIPC_NAGLE))
264 if (tsk->max_pkt == MAX_MSG_SIZE)
265 tsk->maxnagle = 1500;
267 tsk->maxnagle = tsk->max_pkt;
271 * tsk_advance_rx_queue - discard first buffer in socket receive queue
279 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
300 * tsk_rej_rx_queue - reject all buffers in socket receive queue
310 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
316 return READ_ONCE(sk->sk_state) == TIPC_ESTABLISHED;
319 /* tipc_sk_type_connectionless - check if the socket is datagram socket
326 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
329 /* tsk_peer_msg - verify if message was sent by connected port's peer
336 struct sock *sk = &tsk->sk;
362 /* tipc_set_sk_state - set the sk_state of the socket
371 int oldsk_state = sk->sk_state;
372 int res = -EINVAL;
396 sk->sk_state = state;
403 struct sock *sk = sock->sk;
405 int typ = sock->type;
410 if (sk->sk_state == TIPC_DISCONNECTING)
411 return -EPIPE;
413 return -ENOTCONN;
416 return -EAGAIN;
432 sk_ = (sock_)->sk; \
447 * tipc_sk_create - create a TIPC socket
449 * @sock: pre-allocated socket structure
468 return -EPROTONOSUPPORT;
470 switch (sock->type) {
482 return -EPROTOTYPE;
488 return -ENOMEM;
491 tsk->max_pkt = MAX_PKT_DEFAULT;
492 tsk->maxnagle = 0;
493 tsk->nagle_start = NAGLE_START_INIT;
494 INIT_LIST_HEAD(&tsk->publications);
495 INIT_LIST_HEAD(&tsk->cong_links);
496 msg = &tsk->phdr;
499 sock->ops = ops;
505 return -EINVAL;
514 msg_set_origport(msg, tsk->portid);
515 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
516 sk->sk_shutdown = 0;
517 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
518 sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
519 sk->sk_data_ready = tipc_data_ready;
520 sk->sk_write_space = tipc_write_space;
521 sk->sk_destruct = tipc_sock_destruct;
522 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
523 tsk->group_is_open = true;
524 atomic_set(&tsk->dupl_rcvcnt, 0);
527 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
528 tsk->rcv_win = tsk->snd_win;
532 if (sock->type == SOCK_DGRAM)
535 __skb_queue_head_init(&tsk->mc_method.deferredq);
544 sock_put(&tsk->sk);
550 struct sock *sk = sock->sk;
557 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
558 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
564 __skb_queue_purge(&sk->sk_write_queue);
567 skb = skb_peek(&sk->sk_receive_queue);
568 if (skb && TIPC_SKB_CB(skb)->bytes_read) {
569 __skb_unlink(skb, &sk->sk_receive_queue);
579 switch (sk->sk_state) {
583 tipc_node_remove_conn(net, dnode, tsk->portid);
584 /* Send a FIN+/- to its peer */
585 skb = __skb_dequeue(&sk->sk_receive_queue);
587 __skb_queue_purge(&sk->sk_receive_queue);
594 tsk->portid, error);
596 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
603 __skb_queue_purge(&sk->sk_receive_queue);
609 * tipc_release - destroy a TIPC socket
616 * is partially-read, it is discarded and the next one is rejected instead.)
626 struct sock *sk = sock->sk;
631 * releases a pre-allocated child socket that was never used)
641 sk->sk_shutdown = SHUTDOWN_MASK;
644 __skb_queue_purge(&tsk->mc_method.deferredq);
645 sk_stop_timer(sk, &sk->sk_timer);
651 tipc_dest_list_purge(&tsk->cong_links);
652 tsk->cong_link_cnt = 0;
653 call_rcu(&tsk->rcu, tipc_sk_callback);
654 sock->sk = NULL;
660 * __tipc_bind - associate or disassociate TIPC name(s) with a socket
672 * access any non-constant socket information.
677 struct tipc_sock *tsk = tipc_sk(sock->sk);
683 if (ua->addrtype == TIPC_SERVICE_ADDR) {
684 ua->addrtype = TIPC_SERVICE_RANGE;
685 ua->sr.upper = ua->sr.lower;
687 if (ua->scope < 0) {
689 ua->scope = -ua->scope;
692 if (ua->scope != TIPC_NODE_SCOPE)
693 ua->scope = TIPC_CLUSTER_SCOPE;
695 if (tsk->group)
696 return -EACCES;
707 lock_sock(sock->sk);
709 release_sock(sock->sk);
716 u32 atype = ua->addrtype;
720 return -EINVAL;
722 return -EAFNOSUPPORT;
723 if (ua->sr.type < TIPC_RESERVED_TYPES) {
725 ua->sr.type);
726 return -EACCES;
733 * tipc_getname - get port ID of socket or peer socket
748 struct sock *sk = sock->sk;
754 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
755 return -ENOTCONN;
756 addr->addr.id.ref = tsk_peer_port(tsk);
757 addr->addr.id.node = tsk_peer_node(tsk);
759 addr->addr.id.ref = tsk->portid;
760 addr->addr.id.node = tipc_own_addr(sock_net(sk));
763 addr->addrtype = TIPC_SOCKET_ADDR;
764 addr->family = AF_TIPC;
765 addr->scope = 0;
766 addr->addr.name.domain = 0;
772 * tipc_poll - read and possibly block on pollmask
781 * since the pollmask info is potentially out-of-date the moment this routine
792 struct sock *sk = sock->sk;
799 if (sk->sk_shutdown & RCV_SHUTDOWN)
801 if (sk->sk_shutdown == SHUTDOWN_MASK)
804 switch (sk->sk_state) {
806 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
811 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
815 if (tsk->group_is_open && !tsk->cong_link_cnt)
819 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
831 * tipc_sendmcast - send multicast message
844 struct sock *sk = sock->sk;
846 struct tipc_msg *hdr = &tsk->phdr;
853 if (tsk->group)
854 return -EACCES;
857 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
865 return -EHOSTUNREACH;
873 msg_set_nametype(hdr, ua->sr.type);
874 msg_set_namelower(hdr, ua->sr.lower);
875 msg_set_nameupper(hdr, ua->sr.upper);
885 rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
886 &tsk->cong_link_cnt);
895 * tipc_send_group_msg - send a message to a member in the group
908 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
909 struct tipc_mc_method *method = &tsk->mc_method;
911 struct tipc_msg *hdr = &tsk->phdr;
924 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
930 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
931 if (unlikely(rc == -ELINKCONG)) {
932 tipc_dest_push(&tsk->cong_links, dnode, 0);
933 tsk->cong_link_cnt++;
940 method->rcast = true;
941 method->mandatory = true;
946 * tipc_send_group_unicast - send message to a member in the group
958 struct sock *sk = sock->sk;
959 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
967 node = ua->sk.node;
968 port = ua->sk.ref;
970 return -EHOSTUNREACH;
974 !tipc_dest_find(&tsk->cong_links, node, 0) &&
975 tsk->group &&
976 !tipc_group_cong(tsk->group, node, port, blks,
982 return -EHOSTUNREACH;
990 * tipc_send_group_anycast - send message to any member with given identity
1002 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1003 struct sock *sk = sock->sk;
1005 struct list_head *cong_links = &tsk->cong_links;
1007 struct tipc_msg *hdr = &tsk->phdr;
1017 ua->sa.type = msg_nametype(hdr);
1018 ua->scope = msg_lookup_scope(hdr);
1021 exclude = tipc_group_exclude(tsk->group);
1025 /* Look for a non-congested destination member, if any */
1029 return -EHOSTUNREACH;
1031 cong = tipc_group_cong(tsk->group, node, port, blks,
1051 tsk->group &&
1052 !tipc_group_cong(tsk->group, node, port,
1063 return -EHOSTUNREACH;
1071 * tipc_send_group_bcast - send message to all members in communication group
1083 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1084 struct sock *sk = sock->sk;
1088 struct tipc_mc_method *method = &tsk->mc_method;
1089 bool ack = method->mandatory && method->rcast;
1091 struct tipc_msg *hdr = &tsk->phdr;
1094 int rc = -EHOSTUNREACH;
1098 !tsk->cong_link_cnt && tsk->group &&
1099 !tipc_group_bc_cong(tsk->group, blks));
1103 dsts = tipc_group_dests(tsk->group);
1104 if (!dsts->local && !dsts->remote)
1105 return -EHOSTUNREACH;
1110 msg_set_nameinst(hdr, ua->sa.instance);
1118 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1130 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1135 tipc_group_update_bc_members(tsk->group, blks, ack);
1138 method->mandatory = false;
1139 method->expires = jiffies;
1145 * tipc_send_group_mcast - send message to all members with given identity
1157 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1158 struct sock *sk = sock->sk;
1160 struct tipc_group *grp = tsk->group;
1161 struct tipc_msg *hdr = &tsk->phdr;
1166 ua->sa.type = msg_nametype(hdr);
1167 ua->scope = msg_lookup_scope(hdr);
1171 return -EHOSTUNREACH;
1174 tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
1183 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1188 * Multi-threaded: parallel calls with reference to same queues may occur
1207 skb = tipc_skb_peek(arrvq, &inputq->lock);
1208 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1223 spin_lock_bh(&inputq->lock);
1229 spin_unlock_bh(&inputq->lock);
1254 spin_lock_bh(&inputq->lock);
1260 spin_unlock_bh(&inputq->lock);
1272 struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
1274 struct net *net = sock_net(&tsk->sk);
1279 tsk->pkt_cnt += skb_queue_len(txq);
1280 if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1281 tsk->oneway = 0;
1282 if (tsk->nagle_start < NAGLE_START_MAX)
1283 tsk->nagle_start *= 2;
1284 tsk->expect_ack = false;
1285 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1286 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1287 tsk->nagle_start);
1289 tsk->nagle_start = NAGLE_START_INIT;
1292 tsk->expect_ack = true;
1294 tsk->expect_ack = false;
1297 tsk->msg_acc = 0;
1298 tsk->pkt_cnt = 0;
1301 if (!skb || tsk->cong_link_cnt)
1308 if (tsk->msg_acc)
1309 tsk->pkt_cnt += skb_queue_len(txq);
1310 tsk->snt_unacked += tsk->snd_backlog;
1311 tsk->snd_backlog = 0;
1312 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1313 if (rc == -ELINKCONG)
1314 tsk->cong_link_cnt = 1;
1318 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1330 struct sock *sk = &tsk->sk;
1344 sk->sk_state_change(sk);
1347 * - convert msg to abort msg and add to inqueue
1357 tsk->probe_unacked = false;
1367 tsk->snt_unacked -= msg_conn_ack(hdr);
1368 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1369 tsk->snd_win = msg_adv_win(hdr);
1371 sk->sk_write_space(sk);
1380 * tipc_sendmsg - send message in connectionless manner
1395 struct sock *sk = sock->sk;
1407 struct sock *sk = sock->sk;
1410 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1411 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1412 struct list_head *clinks = &tsk->cong_links;
1414 struct tipc_group *grp = tsk->group;
1415 struct tipc_msg *hdr = &tsk->phdr;
1421 return -EMSGSIZE;
1424 if (!tipc_uaddr_valid(ua, m->msg_namelen))
1425 return -EINVAL;
1426 atype = ua->addrtype;
1439 return -EINVAL;
1443 ua = (struct tipc_uaddr *)&tsk->peer;
1444 if (!syn && ua->family != AF_TIPC)
1445 return -EDESTADDRREQ;
1446 atype = ua->addrtype;
1450 if (sk->sk_state == TIPC_LISTEN)
1451 return -EPIPE;
1452 if (sk->sk_state != TIPC_OPEN)
1453 return -EISCONN;
1454 if (tsk->published)
1455 return -EOPNOTSUPP;
1457 tsk->conn_addrtype = atype;
1467 skaddr.node = ua->lookup_node;
1468 ua->scope = tipc_node2scope(skaddr.node);
1470 return -EHOSTUNREACH;
1472 skaddr = ua->sk;
1474 return -EINVAL;
1489 msg_set_nametype(hdr, ua->sa.type);
1490 msg_set_nameinst(hdr, ua->sa.instance);
1491 msg_set_lookup_scope(hdr, ua->scope);
1500 mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
1504 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
1506 return -ENOMEM;
1511 rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
1512 if (unlikely(rc == -ELINKCONG)) {
1514 tsk->cong_link_cnt++;
1530 * tipc_sendstream - send stream-oriented data
1542 struct sock *sk = sock->sk;
1554 struct sock *sk = sock->sk;
1555 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1556 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1557 struct sk_buff_head *txq = &sk->sk_write_queue;
1559 struct tipc_msg *hdr = &tsk->phdr;
1563 int maxnagle = tsk->maxnagle;
1564 int maxpkt = tsk->max_pkt;
1569 return -EMSGSIZE;
1572 if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
1575 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1576 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1583 (!tsk->cong_link_cnt &&
1588 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1589 blocks = tsk->snd_backlog;
1590 if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1596 tsk->msg_acc++;
1597 if (blocks <= 64 && tsk->expect_ack) {
1598 tsk->snd_backlog = blocks;
1602 tsk->pkt_cnt += skb_queue_len(txq);
1607 tsk->expect_ack = true;
1609 tsk->expect_ack = false;
1611 tsk->msg_acc = 0;
1612 tsk->pkt_cnt = 0;
1622 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1623 if (unlikely(rc == -ELINKCONG)) {
1624 tsk->cong_link_cnt = 1;
1628 tsk->snt_unacked += blocks;
1629 tsk->snd_backlog = 0;
1638 * tipc_send_packet - send a connection-oriented message
1650 return -EMSGSIZE;
1655 /* tipc_sk_finish_conn - complete the setup of a connection
1660 struct sock *sk = &tsk->sk;
1662 struct tipc_msg *msg = &tsk->phdr;
1671 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1673 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1674 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1675 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1677 __skb_queue_purge(&sk->sk_write_queue);
1678 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1682 tsk->rcv_win = FLOWCTL_MSG_WIN;
1683 tsk->snd_win = FLOWCTL_MSG_WIN;
1687 * tipc_sk_set_orig_addr - capture sender's address for received message
1695 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1701 srcaddr->sock.family = AF_TIPC;
1702 srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
1703 srcaddr->sock.scope = 0;
1704 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1705 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1706 srcaddr->sock.addr.name.domain = 0;
1707 m->msg_namelen = sizeof(struct sockaddr_tipc);
1713 srcaddr->member.family = AF_TIPC;
1714 srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
1715 srcaddr->member.scope = 0;
1716 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1717 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1718 srcaddr->member.addr.name.domain = 0;
1719 m->msg_namelen = sizeof(*srcaddr);
1723 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1740 if (likely(m->msg_controllen == 0))
1749 return -ENOMEM;
1776 has_addr = !!tsk->conn_addrtype;
1777 data[0] = msg_nametype(&tsk->phdr);
1778 data[1] = msg_nameinst(&tsk->phdr);
1791 struct sock *sk = &tsk->sk;
1801 tsk->portid, TIPC_OK);
1805 msg_set_conn_ack(msg, tsk->rcv_unacked);
1806 tsk->rcv_unacked = 0;
1809 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1810 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1811 msg_set_adv_win(msg, tsk->rcv_win);
1824 tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1830 struct sock *sk = sock->sk;
1839 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1840 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1841 err = -ENOTCONN;
1852 if (!skb_queue_empty(&sk->sk_receive_queue))
1854 err = -EAGAIN;
1870 * tipc_recvmsg - receive packet-oriented message
1884 struct sock *sk = sock->sk;
1897 return -EINVAL;
1900 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1901 rc = -ENOTCONN;
1911 skb = skb_peek(&sk->sk_receive_queue);
1930 /* Capture data if non-error msg, otherwise just set return value */
1932 int offset = skb_cb->bytes_read;
1934 copy = min_t(int, dlen - offset, buflen);
1941 skb_cb->bytes_read = offset + copy;
1943 m->msg_flags |= MSG_TRUNC;
1944 skb_cb->bytes_read = 0;
1948 m->msg_flags |= MSG_EOR;
1949 skb_cb->bytes_read = 0;
1954 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
1955 rc = -ECONNRESET;
1963 m->msg_flags |= MSG_EOR;
1964 m->msg_flags |= MSG_OOB;
1973 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1975 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1981 if (skb_cb->bytes_read)
1990 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1991 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1999 * tipc_recvstream - receive stream-oriented data
2013 struct sock *sk = sock->sk;
2025 return -EINVAL;
2029 if (unlikely(sk->sk_state == TIPC_OPEN)) {
2030 rc = -ENOTCONN;
2041 skb = skb_peek(&sk->sk_receive_queue);
2048 /* Discard any empty non-errored (SYN-) message */
2065 offset = skb_cb->bytes_read;
2066 copy = min_t(int, dlen - offset, buflen - copied);
2074 skb_cb->bytes_read = offset;
2079 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
2080 rc = -ECONNRESET;
2091 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
2092 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
2099 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
2106 * tipc_write_space - wake up thread if port congestion is released
2114 wq = rcu_dereference(sk->sk_wq);
2116 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2122 * tipc_data_ready - wake up threads to indicate messages have been received
2132 wq = rcu_dereference(sk->sk_wq);
2134 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
2141 __skb_queue_purge(&sk->sk_receive_queue);
2151 struct tipc_group *grp = tsk->group;
2159 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2162 tsk->cong_link_cnt--;
2170 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2178 sk->sk_write_space(sk);
2184 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2193 struct sock *sk = &tsk->sk;
2206 tsk->oneway = 0;
2208 switch (sk->sk_state) {
2215 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2219 /* Empty ACK-, - wake up sleeping connect() and drop */
2220 sk->sk_state_change(sk);
2233 if (skb_queue_empty(&sk->sk_write_queue))
2236 delay %= (tsk->conn_timeout / 4);
2238 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2252 if (!skb_queue_empty(&sk->sk_write_queue))
2254 /* Accept only connection-based messages sent by peer */
2273 tipc_node_remove_conn(net, pnode, tsk->portid);
2274 sk->sk_state_change(sk);
2277 pr_err("Unknown sk_state %u\n", sk->sk_state);
2281 sk->sk_err = ECONNREFUSED;
2282 sk->sk_state_change(sk);
2287 * rcvbuf_limit - get proper overload limit of socket receive queue
2291 * For connection oriented messages, irrespective of importance,
2310 return READ_ONCE(sk->sk_rcvbuf);
2313 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2315 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2316 return READ_ONCE(sk->sk_rcvbuf);
2322 * tipc_sk_filter_rcv - validate incoming message
2337 struct tipc_group *grp = tsk->group;
2345 TIPC_SKB_CB(skb)->bytes_read = 0;
2356 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2366 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2382 __skb_queue_tail(&sk->sk_receive_queue, skb);
2386 sk->sk_data_ready(sk);
2391 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2406 added = sk_rmem_alloc_get(sk) - before;
2407 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2415 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2447 /* Try backlog, compensating for double-counted bytes */
2448 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2449 if (!sk->sk_backlog.len)
2472 * tipc_sk_rcv - handle a chain of incoming buffers
2493 sk = &tsk->sk;
2494 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2496 spin_unlock_bh(&sk->sk_lock.slock);
2527 struct sock *sk = sock->sk;
2535 return -ETIMEDOUT;
2538 if (sk->sk_state == TIPC_DISCONNECTING)
2551 if (addr->family != AF_TIPC)
2553 if (addr->addrtype == TIPC_SERVICE_RANGE)
2554 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2555 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2556 addr->addrtype == TIPC_SOCKET_ADDR);
2560 * tipc_connect - establish a connection to another TIPC port
2564 * @flags: file-related flags associated with socket
2571 struct sock *sk = sock->sk;
2575 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2580 return -EINVAL;
2584 if (tsk->group) {
2585 res = -EINVAL;
2589 if (dst->family == AF_UNSPEC) {
2590 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2592 res = -EINVAL;
2596 res = -EINVAL;
2601 memcpy(&tsk->peer, dest, destlen);
2603 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2604 res = -EINVAL;
2608 previous = sk->sk_state;
2610 switch (sk->sk_state) {
2612 /* Send a 'SYN-' to destination */
2617 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2624 if ((res < 0) && (res != -EWOULDBLOCK))
2628 * difference is that return value in non-blocking
2631 res = -EINPROGRESS;
2636 res = -EALREADY;
2644 res = -EISCONN;
2647 res = -EINVAL;
2656 * tipc_listen - allow socket to listen for incoming connections
2664 struct sock *sk = sock->sk;
2676 struct sock *sk = sock->sk;
2680 /* True wake-one mechanism for incoming connections: only
2686 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2694 if (!skb_queue_empty(&sk->sk_receive_queue))
2696 err = -EAGAIN;
2707 * tipc_accept - wait for connection request
2717 struct sock *new_sk, *sk = sock->sk;
2727 if (sk->sk_state != TIPC_LISTEN) {
2728 res = -EINVAL;
2731 timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
2736 buf = skb_peek(&sk->sk_receive_queue);
2738 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, arg->kern);
2741 security_sk_clone(sock->sk, new_sock->sk);
2743 new_sk = new_sock->sk;
2761 new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
2762 msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
2763 msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
2767 * Respond to 'SYN-' by discarding it & returning 'ACK'.
2773 __skb_dequeue(&sk->sk_receive_queue);
2774 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2786 * tipc_shutdown - shutdown socket connection
2796 struct sock *sk = sock->sk;
2800 return -EINVAL;
2806 sk->sk_shutdown = SHUTDOWN_MASK;
2808 if (sk->sk_state == TIPC_DISCONNECTING) {
2810 __skb_queue_purge(&sk->sk_receive_queue);
2814 res = -ENOTCONN;
2817 sk->sk_state_change(sk);
2830 u32 oport = tsk->portid;
2833 if (tsk->probe_unacked) {
2835 sk->sk_err = ECONNABORTED;
2837 sk->sk_state_change(sk);
2845 tsk->probe_unacked = true;
2846 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2854 if (tsk->cong_link_cnt) {
2855 sk_reset_timer(sk, &sk->sk_timer,
2860 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2876 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2882 if (sk->sk_state == TIPC_ESTABLISHED)
2884 else if (sk->sk_state == TIPC_CONNECTING)
2890 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2893 if (rc == -ELINKCONG) {
2894 tipc_dest_push(&tsk->cong_links, pnode, 0);
2895 tsk->cong_link_cnt = 1;
2902 struct sock *sk = &tsk->sk;
2909 return -EINVAL;
2910 key = tsk->portid + tsk->pub_count + 1;
2911 if (key == tsk->portid)
2912 return -EADDRINUSE;
2913 skaddr.ref = tsk->portid;
2917 return -EINVAL;
2919 list_add(&p->binding_sock, &tsk->publications);
2920 tsk->pub_count++;
2921 tsk->published = true;
2927 struct net *net = sock_net(&tsk->sk);
2930 int rc = -EINVAL;
2932 list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
2934 tipc_uaddr(&_ua, TIPC_SERVICE_RANGE, p->scope,
2935 p->sr.type, p->sr.lower, p->sr.upper);
2936 tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key);
2940 if (p->scope != ua->scope)
2942 if (p->sr.type != ua->sr.type)
2944 if (p->sr.lower != ua->sr.lower)
2946 if (p->sr.upper != ua->sr.upper)
2948 tipc_nametbl_withdraw(net, ua, &p->sk, p->key);
2952 if (list_empty(&tsk->publications)) {
2953 tsk->published = 0;
2959 /* tipc_sk_reinit: set non-zero address in all existing sockets
2969 rhashtable_walk_enter(&tn->sk_rht, &iter);
2975 sock_hold(&tsk->sk);
2977 lock_sock(&tsk->sk);
2978 msg = &tsk->phdr;
2981 release_sock(&tsk->sk);
2983 sock_put(&tsk->sk);
2987 } while (tsk == ERR_PTR(-EAGAIN));
2998 tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
3000 sock_hold(&tsk->sk);
3008 struct sock *sk = &tsk->sk;
3011 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
3014 while (remaining--) {
3018 tsk->portid = portid;
3019 sock_hold(&tsk->sk);
3020 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3023 sock_put(&tsk->sk);
3026 return -1;
3031 struct sock *sk = &tsk->sk;
3034 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
3035 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
3054 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
3064 rhashtable_destroy(&tn->sk_rht);
3069 struct net *net = sock_net(&tsk->sk);
3070 struct tipc_group *grp = tsk->group;
3071 struct tipc_msg *hdr = &tsk->phdr;
3075 if (mreq->type < TIPC_RESERVED_TYPES)
3076 return -EACCES;
3077 if (mreq->scope > TIPC_NODE_SCOPE)
3078 return -EINVAL;
3079 if (mreq->scope != TIPC_NODE_SCOPE)
3080 mreq->scope = TIPC_CLUSTER_SCOPE;
3082 return -EACCES;
3083 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
3085 return -ENOMEM;
3086 tsk->group = grp;
3087 msg_set_lookup_scope(hdr, mreq->scope);
3088 msg_set_nametype(hdr, mreq->type);
3090 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, mreq->scope,
3091 mreq->type, mreq->instance, mreq->instance);
3096 tsk->group = NULL;
3100 tsk->mc_method.rcast = true;
3101 tsk->mc_method.mandatory = true;
3102 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
3108 struct net *net = sock_net(&tsk->sk);
3109 struct tipc_group *grp = tsk->group;
3114 return -EINVAL;
3119 tsk->group = NULL;
3125 * tipc_setsockopt - set socket option
3140 struct sock *sk = sock->sk;
3146 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3149 return -ENOPROTOOPT;
3158 return -EINVAL;
3160 return -EFAULT;
3164 return -EINVAL;
3166 return -EFAULT;
3170 return -EINVAL;
3180 if (sock->type != SOCK_STREAM)
3183 res = -ENOPROTOOPT;
3189 tipc_sk(sk)->conn_timeout = value;
3192 tsk->mc_method.rcast = false;
3193 tsk->mc_method.mandatory = true;
3196 tsk->mc_method.rcast = true;
3197 tsk->mc_method.mandatory = true;
3206 tsk->nodelay = !!value;
3210 res = -EINVAL;
3219 * tipc_getsockopt - get socket option
3234 struct sock *sk = sock->sk;
3241 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3244 return -ENOPROTOOPT;
3262 value = tsk->conn_timeout;
3269 value = skb_queue_len(&sk->sk_receive_queue);
3276 if (tsk->group)
3277 tipc_group_self(tsk->group, &seq, &scope);
3281 res = -EINVAL;
3290 return -EINVAL;
3293 return -EFAULT;
3300 struct net *net = sock_net(sock->sk);
3308 return -EFAULT;
3313 return -EFAULT;
3316 return -EADDRNOTAVAIL;
3319 return -EFAULT;
3321 return -EADDRNOTAVAIL;
3323 return -EFAULT;
3326 return -ENOIOCTLCMD;
3332 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3333 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3334 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3336 tsk1->peer.family = AF_TIPC;
3337 tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
3338 tsk1->peer.scope = TIPC_NODE_SCOPE;
3339 tsk1->peer.addr.id.ref = tsk2->portid;
3340 tsk1->peer.addr.id.node = onode;
3341 tsk2->peer.family = AF_TIPC;
3342 tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
3343 tsk2->peer.scope = TIPC_NODE_SCOPE;
3344 tsk2->peer.addr.id.ref = tsk1->portid;
3345 tsk2->peer.addr.id.node = onode;
3347 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3348 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3428 * tipc_socket_init - initialize TIPC socket interface
3453 * tipc_socket_stop - stop TIPC socket interface
3470 conn_type = msg_nametype(&tsk->phdr);
3471 conn_instance = msg_nameinst(&tsk->phdr);
3474 return -EMSGSIZE;
3481 if (tsk->conn_addrtype != 0) {
3496 return -EMSGSIZE;
3502 struct net *net = sock_net(skb->sk);
3503 struct sock *sk = &tsk->sk;
3505 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3507 return -EMSGSIZE;
3511 return -EMSGSIZE;
3512 } else if (!list_empty(&tsk->publications)) {
3514 return -EMSGSIZE;
3526 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3548 return -EMSGSIZE;
3556 struct rhashtable_iter *iter = (void *)cb->args[4];
3563 if (PTR_ERR(tsk) == -EAGAIN)
3568 sock_hold(&tsk->sk);
3570 lock_sock(&tsk->sk);
3573 release_sock(&tsk->sk);
3574 sock_put(&tsk->sk);
3577 release_sock(&tsk->sk);
3579 sock_put(&tsk->sk);
3583 return skb->len;
3589 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3595 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3596 struct rhashtable_iter *iter = (void *)cb->args[4];
3602 return -ENOMEM;
3604 cb->args[4] = (long)iter;
3607 rhashtable_walk_enter(&tn->sk_rht, iter);
3613 struct rhashtable_iter *hti = (void *)cb->args[4];
3625 struct sock *sk = &tsk->sk;
3630 if (!(sk_filter_state & (1 << sk->sk_state)))
3640 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3641 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3644 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3656 skb_queue_len(&sk->sk_receive_queue)) ||
3658 skb_queue_len(&sk->sk_write_queue)) ||
3663 if (tsk->cong_link_cnt &&
3673 if (tsk->group)
3674 if (tipc_group_fill_sock_diag(tsk->group, skb))
3686 return -EMSGSIZE;
3703 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3712 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3714 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
3716 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
3718 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
3731 return -EMSGSIZE;
3743 list_for_each_entry(p, &tsk->publications, binding_sock) {
3744 if (p->key == *last_publ)
3747 if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
3754 cb->prev_seq = 1;
3756 return -EPIPE;
3759 p = list_first_entry(&tsk->publications, struct publication,
3763 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3766 *last_publ = p->key;
3778 u32 tsk_portid = cb->args[0];
3779 u32 last_publ = cb->args[1];
3780 u32 done = cb->args[2];
3781 struct net *net = sock_net(skb->sk);
3785 struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
3789 return -EINVAL;
3798 return -EINVAL;
3808 return -EINVAL;
3810 lock_sock(&tsk->sk);
3814 release_sock(&tsk->sk);
3815 sock_put(&tsk->sk);
3817 cb->args[0] = tsk_portid;
3818 cb->args[1] = last_publ;
3819 cb->args[2] = done;
3821 return skb->len;
3825 * tipc_sk_filtering - check if a socket should be traced
3857 return (_port == tsk->portid);
3859 if (_sktype && _sktype != sk->sk_type)
3862 if (tsk->published) {
3863 p = list_first_entry_or_null(&tsk->publications,
3866 type = p->sr.type;
3867 lower = p->sr.lower;
3868 upper = p->sr.upper;
3873 type = msg_nametype(&tsk->phdr);
3874 lower = msg_nameinst(&tsk->phdr);
3887 return (sk) ? (tipc_sk(sk))->portid : 0;
3891 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3901 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3903 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3909 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3926 * tipc_sk_dump - dump TIPC socket
3929 * - TIPC_DUMP_NONE: don't dump socket queues
3930 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3931 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3932 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3933 * - TIPC_DUMP_ALL: dump all the socket queues above
3953 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3954 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3955 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3956 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3957 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3959 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3960 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3961 conn_type = msg_nametype(&tsk->phdr);
3962 conn_instance = msg_nameinst(&tsk->phdr);
3963 i += scnprintf(buf + i, sz - i, " %u", conn_type);
3964 i += scnprintf(buf + i, sz - i, " %u", conn_instance);
3966 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3967 if (tsk->published) {
3968 p = list_first_entry_or_null(&tsk->publications,
3970 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
3971 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
3972 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
3974 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3975 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3976 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3977 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3978 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3979 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3980 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3981 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3982 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3983 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3984 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3985 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3986 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3987 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3990 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3991 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3995 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3996 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
4000 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
4001 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
4002 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
4003 i += scnprintf(buf + i, sz - i, " tail ");
4004 i += tipc_skb_dump(sk->sk_backlog.tail, false,