Lines Matching +full:retain +full:- +full:state +full:- +full:shutdown
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
46 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
65 lhash = (__force u32)laddr->s6_addr32[3];
69 fhash = (__force u32)faddr->s6_addr32[3];
92 if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
93 ipv6_addr_equal(&conn->c_laddr, laddr) &&
94 conn->c_trans == trans &&
95 conn->c_tos == tos &&
97 conn->c_dev_if == dev_if) {
102 rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret,
109 * It clears partial message state so that the transport can start sending
115 struct rds_connection *conn = cp->cp_conn;
118 &conn->c_laddr, &conn->c_faddr);
122 cp->cp_flags = 0;
133 spin_lock_init(&cp->cp_lock);
134 cp->cp_next_tx_seq = 1;
135 init_waitqueue_head(&cp->cp_waitq);
136 INIT_LIST_HEAD(&cp->cp_send_queue);
137 INIT_LIST_HEAD(&cp->cp_retrans);
139 cp->cp_conn = conn;
140 atomic_set(&cp->cp_state, RDS_CONN_DOWN);
141 cp->cp_send_gen = 0;
142 cp->cp_reconnect_jiffies = 0;
143 cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
144 INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
145 INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
146 INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
147 INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
148 mutex_init(&cp->cp_cm_lock);
149 cp->cp_flags = 0;
173 int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
178 conn->c_loopback &&
179 conn->c_trans != &rds_loop_transport &&
187 conn = parent->c_passive;
195 conn = ERR_PTR(-ENOMEM);
198 conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
199 if (!conn->c_path) {
201 conn = ERR_PTR(-ENOMEM);
205 INIT_HLIST_NODE(&conn->c_hash_node);
206 conn->c_laddr = *laddr;
207 conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
208 conn->c_faddr = *faddr;
209 conn->c_dev_if = dev_if;
210 conn->c_tos = tos;
219 conn->c_bound_if = dev_if;
222 conn->c_bound_if = 0;
228 kfree(conn->c_path);
239 loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if);
242 conn->c_loopback = 1;
243 if (trans->t_prefer_loopback) {
256 kfree(conn->c_path);
258 conn = ERR_PTR(-EOPNOTSUPP);
264 conn->c_trans = trans;
266 init_waitqueue_head(&conn->c_hs_waitq);
268 __rds_conn_path_init(conn, &conn->c_path[i],
270 conn->c_path[i].cp_index = i;
274 ret = -ENETDOWN;
276 ret = trans->conn_alloc(conn, GFP_ATOMIC);
279 kfree(conn->c_path);
285 rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n",
287 strnlen(trans->t_name, sizeof(trans->t_name)) ?
288 trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : "");
300 if (parent->c_passive) {
301 trans->conn_free(conn->c_path[0].cp_transport_data);
302 kfree(conn->c_path);
304 conn = parent->c_passive;
306 parent->c_passive = conn;
321 cp = &conn->c_path[i];
322 /* The ->conn_alloc invocation may have
326 if (cp->cp_transport_data)
327 trans->conn_free(cp->cp_transport_data);
329 kfree(conn->c_path);
333 conn->c_my_gen_num = rds_gen_num;
334 conn->c_peer_gen_num = 0;
335 hlist_add_head_rcu(&conn->c_hash_node, head);
369 struct rds_connection *conn = cp->cp_conn;
376 * duration of the shutdown operation, else we may be
378 * handler is supposed to check for state DISCONNECTING
380 mutex_lock(&cp->cp_cm_lock);
386 "shutdown called in state %d\n",
387 atomic_read(&cp->cp_state));
388 mutex_unlock(&cp->cp_cm_lock);
391 mutex_unlock(&cp->cp_cm_lock);
393 wait_event(cp->cp_waitq,
394 !test_bit(RDS_IN_XMIT, &cp->cp_flags));
395 wait_event(cp->cp_waitq,
396 !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
398 conn->c_trans->conn_path_shutdown(cp);
405 /* This can happen - eg when we're in the middle of tearing
410 * Note that this also happens with rds-tcp because
417 "to state DOWN, current state "
419 atomic_read(&cp->cp_state));
427 * conn - the reconnect is always triggered by the active peer. */
428 cancel_delayed_work_sync(&cp->cp_conn_w);
430 if (!hlist_unhashed(&conn->c_hash_node)) {
445 if (!cp->cp_transport_data)
449 cancel_delayed_work_sync(&cp->cp_send_w);
450 cancel_delayed_work_sync(&cp->cp_recv_w);
453 flush_work(&cp->cp_down_w);
457 &cp->cp_send_queue,
459 list_del_init(&rm->m_conn_item);
460 BUG_ON(!list_empty(&rm->m_sock_item));
463 if (cp->cp_xmit_rm)
464 rds_message_put(cp->cp_xmit_rm);
466 WARN_ON(delayed_work_pending(&cp->cp_send_w));
467 WARN_ON(delayed_work_pending(&cp->cp_recv_w));
468 WARN_ON(delayed_work_pending(&cp->cp_conn_w));
469 WARN_ON(work_pending(&cp->cp_down_w));
471 cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
478 * the conn has been shutdown that no one else is referencing the connection.
486 int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
488 rdsdebug("freeing conn %p for %pI4 -> "
489 "%pI4\n", conn, &conn->c_laddr,
490 &conn->c_faddr);
494 hlist_del_init_rcu(&conn->c_hash_node);
500 cp = &conn->c_path[i];
502 BUG_ON(!list_empty(&cp->cp_retrans));
512 kfree(conn->c_path);
516 rds_conn_count--;
561 if (!isv6 && conn->c_isv6)
564 npaths = (conn->c_trans->t_mp_capable ?
568 cp = &conn->c_path[j];
570 list = &cp->cp_send_queue;
572 list = &cp->cp_retrans;
574 spin_lock_irqsave(&cp->cp_lock, flags);
580 __rds_inc_msg_cp(&rm->m_inc,
582 &conn->c_laddr,
583 &conn->c_faddr,
587 spin_unlock_irqrestore(&cp->cp_lock, flags);
593 lens->nr = total;
595 lens->each = sizeof(struct rds6_info_message);
597 lens->each = sizeof(struct rds_info_message);
665 lens->nr = 0;
666 lens->each = item_len;
681 len -= item_len;
683 lens->nr++;
703 lens->nr = 0;
704 lens->each = item_len;
719 cp = conn->c_path;
731 len -= item_len;
733 lens->nr++;
742 struct rds_connection *conn = cp->cp_conn;
744 if (conn->c_isv6)
747 cinfo->next_tx_seq = cp->cp_next_tx_seq;
748 cinfo->next_rx_seq = cp->cp_next_rx_seq;
749 cinfo->laddr = conn->c_laddr.s6_addr32[3];
750 cinfo->faddr = conn->c_faddr.s6_addr32[3];
751 cinfo->tos = conn->c_tos;
752 strscpy_pad(cinfo->transport, conn->c_trans->t_name);
753 cinfo->flags = 0;
755 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
757 /* XXX Future: return the state rather than these funky bits */
758 rds_conn_info_set(cinfo->flags,
759 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
761 rds_conn_info_set(cinfo->flags,
762 atomic_read(&cp->cp_state) == RDS_CONN_UP,
771 struct rds_connection *conn = cp->cp_conn;
773 cinfo6->next_tx_seq = cp->cp_next_tx_seq;
774 cinfo6->next_rx_seq = cp->cp_next_rx_seq;
775 cinfo6->laddr = conn->c_laddr;
776 cinfo6->faddr = conn->c_faddr;
777 strscpy_pad(cinfo6->transport, conn->c_trans->t_name);
778 cinfo6->flags = 0;
780 rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
782 /* XXX Future: return the state rather than these funky bits */
783 rds_conn_info_set(cinfo6->flags,
784 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
786 rds_conn_info_set(cinfo6->flags,
787 atomic_read(&cp->cp_state) == RDS_CONN_UP,
833 return -ENOMEM;
879 atomic_set(&cp->cp_state, RDS_CONN_ERROR);
882 if (!destroy && rds_destroy_pending(cp->cp_conn)) {
886 queue_work(rds_wq, &cp->cp_down_w);
893 WARN_ON(conn->c_trans->t_mp_capable);
894 rds_conn_path_drop(&conn->c_path[0], false);
900 * delayed reconnect however - in this case we should not interfere.
905 if (rds_destroy_pending(cp->cp_conn)) {
910 !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
911 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
923 rds_conn_path_connect_if_down(&conn->c_path[i]);
924 } while (++i < conn->c_npaths);
929 WARN_ON(conn->c_trans->t_mp_capable);
930 rds_conn_path_connect_if_down(&conn->c_path[0]);