Lines Matching +full:no +full:- +full:memory +full:- +full:wc

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
51 conn->c_version = version; in rds_ib_set_protocol()
59 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_set_flow_control()
63 ic->i_flowctl = 1; in rds_ib_set_flow_control()
66 ic->i_flowctl = 0; in rds_ib_set_flow_control()
76 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cm_connect_complete()
84 dp = event->param.conn.private_data; in rds_ib_cm_connect_complete()
85 if (conn->c_isv6) { in rds_ib_cm_connect_complete()
86 if (event->param.conn.private_data_len >= in rds_ib_cm_connect_complete()
88 major = dp->ricp_v6.dp_protocol_major; in rds_ib_cm_connect_complete()
89 minor = dp->ricp_v6.dp_protocol_minor; in rds_ib_cm_connect_complete()
90 credit = dp->ricp_v6.dp_credit; in rds_ib_cm_connect_complete()
92 * aligned. Since dp_ack_seq is 64-bit extended load in rds_ib_cm_connect_complete()
96 ack_seq = get_unaligned(&dp->ricp_v6.dp_ack_seq); in rds_ib_cm_connect_complete()
98 } else if (event->param.conn.private_data_len >= in rds_ib_cm_connect_complete()
100 major = dp->ricp_v4.dp_protocol_major; in rds_ib_cm_connect_complete()
101 minor = dp->ricp_v4.dp_protocol_minor; in rds_ib_cm_connect_complete()
102 credit = dp->ricp_v4.dp_credit; in rds_ib_cm_connect_complete()
103 ack_seq = get_unaligned(&dp->ricp_v4.dp_ack_seq); in rds_ib_cm_connect_complete()
112 if (conn->c_version < RDS_PROTOCOL_VERSION) { in rds_ib_cm_connect_complete()
113 if (conn->c_version != RDS_PROTOCOL_COMPAT_VERSION) { in rds_ib_cm_connect_complete()
114 pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n", in rds_ib_cm_connect_complete()
115 &conn->c_laddr, &conn->c_faddr, in rds_ib_cm_connect_complete()
116 RDS_PROTOCOL_MAJOR(conn->c_version), in rds_ib_cm_connect_complete()
117 RDS_PROTOCOL_MINOR(conn->c_version)); in rds_ib_cm_connect_complete()
124 ic->i_active_side ? "Active" : "Passive", in rds_ib_cm_connect_complete()
125 &conn->c_laddr, &conn->c_faddr, conn->c_tos, in rds_ib_cm_connect_complete()
126 RDS_PROTOCOL_MAJOR(conn->c_version), in rds_ib_cm_connect_complete()
127 RDS_PROTOCOL_MINOR(conn->c_version), in rds_ib_cm_connect_complete()
128 ic->i_flowctl ? ", flow control" : ""); in rds_ib_cm_connect_complete()
131 ic->i_sl = ic->i_cm_id->route.path_rec->sl; in rds_ib_cm_connect_complete()
133 atomic_set(&ic->i_cq_quiesce, 0); in rds_ib_cm_connect_complete()
141 /* Post receive buffers - as a side effect, this will update in rds_ib_cm_connect_complete()
146 err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr); in rds_ib_cm_connect_complete()
159 conn->c_proposed_version = conn->c_version; in rds_ib_cm_connect_complete()
171 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cm_fill_conn_param()
172 struct rds_ib_device *rds_ibdev = ic->rds_ibdev; in rds_ib_cm_fill_conn_param()
176 conn_param->responder_resources = in rds_ib_cm_fill_conn_param()
177 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources); in rds_ib_cm_fill_conn_param()
178 conn_param->initiator_depth = in rds_ib_cm_fill_conn_param()
179 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth); in rds_ib_cm_fill_conn_param()
180 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); in rds_ib_cm_fill_conn_param()
181 conn_param->rnr_retry_count = 7; in rds_ib_cm_fill_conn_param()
186 dp->ricp_v6.dp_saddr = conn->c_laddr; in rds_ib_cm_fill_conn_param()
187 dp->ricp_v6.dp_daddr = conn->c_faddr; in rds_ib_cm_fill_conn_param()
188 dp->ricp_v6.dp_protocol_major = in rds_ib_cm_fill_conn_param()
190 dp->ricp_v6.dp_protocol_minor = in rds_ib_cm_fill_conn_param()
192 dp->ricp_v6.dp_protocol_minor_mask = in rds_ib_cm_fill_conn_param()
194 dp->ricp_v6.dp_ack_seq = in rds_ib_cm_fill_conn_param()
196 dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos; in rds_ib_cm_fill_conn_param()
198 conn_param->private_data = &dp->ricp_v6; in rds_ib_cm_fill_conn_param()
199 conn_param->private_data_len = sizeof(dp->ricp_v6); in rds_ib_cm_fill_conn_param()
201 dp->ricp_v4.dp_saddr = conn->c_laddr.s6_addr32[3]; in rds_ib_cm_fill_conn_param()
202 dp->ricp_v4.dp_daddr = conn->c_faddr.s6_addr32[3]; in rds_ib_cm_fill_conn_param()
203 dp->ricp_v4.dp_protocol_major = in rds_ib_cm_fill_conn_param()
205 dp->ricp_v4.dp_protocol_minor = in rds_ib_cm_fill_conn_param()
207 dp->ricp_v4.dp_protocol_minor_mask = in rds_ib_cm_fill_conn_param()
209 dp->ricp_v4.dp_ack_seq = in rds_ib_cm_fill_conn_param()
211 dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos; in rds_ib_cm_fill_conn_param()
213 conn_param->private_data = &dp->ricp_v4; in rds_ib_cm_fill_conn_param()
214 conn_param->private_data_len = sizeof(dp->ricp_v4); in rds_ib_cm_fill_conn_param()
218 if (ic->i_flowctl) { in rds_ib_cm_fill_conn_param()
222 (atomic_read(&ic->i_credits)); in rds_ib_cm_fill_conn_param()
224 dp->ricp_v6.dp_credit = cpu_to_be32(credits); in rds_ib_cm_fill_conn_param()
226 dp->ricp_v4.dp_credit = cpu_to_be32(credits); in rds_ib_cm_fill_conn_param()
228 &ic->i_credits); in rds_ib_cm_fill_conn_param()
236 event->event, ib_event_msg(event->event), data); in rds_ib_cq_event_handler()
250 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cq_comp_handler_recv()
256 tasklet_schedule(&ic->i_recv_tasklet); in rds_ib_cq_comp_handler_recv()
263 struct ib_wc *wc; in poll_scq() local
267 wc = wcs + i; in poll_scq()
268 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", in poll_scq()
269 (unsigned long long)wc->wr_id, wc->status, in poll_scq()
270 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); in poll_scq()
272 if (wc->wr_id <= ic->i_send_ring.w_nr || in poll_scq()
273 wc->wr_id == RDS_IB_ACK_WR_ID) in poll_scq()
274 rds_ib_send_cqe_handler(ic, wc); in poll_scq()
276 rds_ib_mr_cqe_handler(ic, wc); in poll_scq()
285 struct rds_connection *conn = ic->conn; in rds_ib_tasklet_fn_send()
290 if (atomic_read(&ic->i_cq_quiesce)) in rds_ib_tasklet_fn_send()
293 poll_scq(ic, ic->i_send_cq, ic->i_send_wc); in rds_ib_tasklet_fn_send()
294 ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); in rds_ib_tasklet_fn_send()
295 poll_scq(ic, ic->i_send_cq, ic->i_send_wc); in rds_ib_tasklet_fn_send()
298 (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || in rds_ib_tasklet_fn_send()
299 test_bit(0, &conn->c_map_queued))) in rds_ib_tasklet_fn_send()
300 rds_send_xmit(&ic->conn->c_path[0]); in rds_ib_tasklet_fn_send()
308 struct ib_wc *wc; in poll_rcq() local
312 wc = wcs + i; in poll_rcq()
313 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", in poll_rcq()
314 (unsigned long long)wc->wr_id, wc->status, in poll_rcq()
315 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); in poll_rcq()
317 rds_ib_recv_cqe_handler(ic, wc, ack_state); in poll_rcq()
325 struct rds_connection *conn = ic->conn; in rds_ib_tasklet_fn_recv()
326 struct rds_ib_device *rds_ibdev = ic->rds_ibdev; in rds_ib_tasklet_fn_recv()
335 if (atomic_read(&ic->i_cq_quiesce)) in rds_ib_tasklet_fn_recv()
339 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); in rds_ib_tasklet_fn_recv()
340 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); in rds_ib_tasklet_fn_recv()
341 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); in rds_ib_tasklet_fn_recv()
345 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { in rds_ib_tasklet_fn_recv()
347 ic->i_ack_recv = state.ack_recv; in rds_ib_tasklet_fn_recv()
357 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_qp_event_handler()
359 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, in rds_ib_qp_event_handler()
360 ib_event_msg(event->event)); in rds_ib_qp_event_handler()
362 switch (event->event) { in rds_ib_qp_event_handler()
364 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); in rds_ib_qp_event_handler()
367 rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n", in rds_ib_qp_event_handler()
368 event->event, ib_event_msg(event->event), in rds_ib_qp_event_handler()
369 &conn->c_laddr, &conn->c_faddr); in rds_ib_qp_event_handler()
378 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cq_comp_handler_send()
384 tasklet_schedule(&ic->i_send_tasklet); in rds_ib_cq_comp_handler_send()
389 int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1]; in ibdev_get_unused_vector()
390 int index = rds_ibdev->dev->num_comp_vectors - 1; in ibdev_get_unused_vector()
393 for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) { in ibdev_get_unused_vector()
394 if (rds_ibdev->vector_load[i] < min) { in ibdev_get_unused_vector()
396 min = rds_ibdev->vector_load[i]; in ibdev_get_unused_vector()
400 rds_ibdev->vector_load[index]++; in ibdev_get_unused_vector()
406 rds_ibdev->vector_load[index]--; in ibdev_put_vector()
435 /* Free the DMA memory used to store struct rds_header.
438 * @hdrs: pointer to the array storing DMA memory pointers
449 rds_dma_hdr_free(dev->dev, hdrs[i], dma_addrs[i], dir); in rds_dma_hdrs_free()
455 /* Allocate DMA coherent memory to be used to store struct rds_header for
456 * sending/receiving packets. The pointers to the DMA memory and the
463 * It returns the pointer to the array storing the DMA memory pointers. On
475 ibdev_to_node(dev->dev)); in rds_dma_hdrs_alloc()
480 ibdev_to_node(dev->dev)); in rds_dma_hdrs_alloc()
487 hdrs[i] = rds_dma_hdr_alloc(dev->dev, &hdr_daddrs[i], dir); in rds_dma_hdrs_alloc()
504 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_setup_qp()
505 struct ib_device *dev = ic->i_cm_id->device; in rds_ib_setup_qp()
518 return -EOPNOTSUPP; in rds_ib_setup_qp()
529 max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ? in rds_ib_setup_qp()
530 rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr; in rds_ib_setup_qp()
531 if (ic->i_send_ring.w_nr != max_wrs) in rds_ib_setup_qp()
532 rds_ib_ring_resize(&ic->i_send_ring, max_wrs); in rds_ib_setup_qp()
534 max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ? in rds_ib_setup_qp()
535 rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr; in rds_ib_setup_qp()
536 if (ic->i_recv_ring.w_nr != max_wrs) in rds_ib_setup_qp()
537 rds_ib_ring_resize(&ic->i_recv_ring, max_wrs); in rds_ib_setup_qp()
539 /* Protection domain and memory range */ in rds_ib_setup_qp()
540 ic->i_pd = rds_ibdev->pd; in rds_ib_setup_qp()
542 ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev); in rds_ib_setup_qp()
543 cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1; in rds_ib_setup_qp()
544 cq_attr.comp_vector = ic->i_scq_vector; in rds_ib_setup_qp()
545 ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, in rds_ib_setup_qp()
548 if (IS_ERR(ic->i_send_cq)) { in rds_ib_setup_qp()
549 ret = PTR_ERR(ic->i_send_cq); in rds_ib_setup_qp()
550 ic->i_send_cq = NULL; in rds_ib_setup_qp()
551 ibdev_put_vector(rds_ibdev, ic->i_scq_vector); in rds_ib_setup_qp()
556 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); in rds_ib_setup_qp()
557 cq_attr.cqe = ic->i_recv_ring.w_nr; in rds_ib_setup_qp()
558 cq_attr.comp_vector = ic->i_rcq_vector; in rds_ib_setup_qp()
559 ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, in rds_ib_setup_qp()
562 if (IS_ERR(ic->i_recv_cq)) { in rds_ib_setup_qp()
563 ret = PTR_ERR(ic->i_recv_cq); in rds_ib_setup_qp()
564 ic->i_recv_cq = NULL; in rds_ib_setup_qp()
565 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); in rds_ib_setup_qp()
570 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); in rds_ib_setup_qp()
576 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); in rds_ib_setup_qp()
587 attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1; in rds_ib_setup_qp()
588 attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1; in rds_ib_setup_qp()
589 attr.cap.max_send_sge = rds_ibdev->max_sge; in rds_ib_setup_qp()
593 attr.send_cq = ic->i_send_cq; in rds_ib_setup_qp()
594 attr.recv_cq = ic->i_recv_cq; in rds_ib_setup_qp()
600 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); in rds_ib_setup_qp()
606 ic->i_send_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_send_hdrs_dma, in rds_ib_setup_qp()
607 ic->i_send_ring.w_nr, in rds_ib_setup_qp()
609 if (!ic->i_send_hdrs) { in rds_ib_setup_qp()
610 ret = -ENOMEM; in rds_ib_setup_qp()
615 ic->i_recv_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_recv_hdrs_dma, in rds_ib_setup_qp()
616 ic->i_recv_ring.w_nr, in rds_ib_setup_qp()
618 if (!ic->i_recv_hdrs) { in rds_ib_setup_qp()
619 ret = -ENOMEM; in rds_ib_setup_qp()
624 ic->i_ack = rds_dma_hdr_alloc(rds_ibdev->dev, &ic->i_ack_dma, in rds_ib_setup_qp()
626 if (!ic->i_ack) { in rds_ib_setup_qp()
627 ret = -ENOMEM; in rds_ib_setup_qp()
632 ic->i_sends = vzalloc_node(array_size(sizeof(struct rds_ib_send_work), in rds_ib_setup_qp()
633 ic->i_send_ring.w_nr), in rds_ib_setup_qp()
635 if (!ic->i_sends) { in rds_ib_setup_qp()
636 ret = -ENOMEM; in rds_ib_setup_qp()
641 ic->i_recvs = vzalloc_node(array_size(sizeof(struct rds_ib_recv_work), in rds_ib_setup_qp()
642 ic->i_recv_ring.w_nr), in rds_ib_setup_qp()
644 if (!ic->i_recvs) { in rds_ib_setup_qp()
645 ret = -ENOMEM; in rds_ib_setup_qp()
652 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, in rds_ib_setup_qp()
653 ic->i_send_cq, ic->i_recv_cq); in rds_ib_setup_qp()
658 vfree(ic->i_sends); in rds_ib_setup_qp()
661 rds_dma_hdr_free(rds_ibdev->dev, ic->i_ack, ic->i_ack_dma, in rds_ib_setup_qp()
663 ic->i_ack = NULL; in rds_ib_setup_qp()
666 rds_dma_hdrs_free(rds_ibdev, ic->i_recv_hdrs, ic->i_recv_hdrs_dma, in rds_ib_setup_qp()
667 ic->i_recv_ring.w_nr, DMA_FROM_DEVICE); in rds_ib_setup_qp()
668 ic->i_recv_hdrs = NULL; in rds_ib_setup_qp()
669 ic->i_recv_hdrs_dma = NULL; in rds_ib_setup_qp()
672 rds_dma_hdrs_free(rds_ibdev, ic->i_send_hdrs, ic->i_send_hdrs_dma, in rds_ib_setup_qp()
673 ic->i_send_ring.w_nr, DMA_TO_DEVICE); in rds_ib_setup_qp()
674 ic->i_send_hdrs = NULL; in rds_ib_setup_qp()
675 ic->i_send_hdrs_dma = NULL; in rds_ib_setup_qp()
678 rdma_destroy_qp(ic->i_cm_id); in rds_ib_setup_qp()
680 ib_destroy_cq(ic->i_recv_cq); in rds_ib_setup_qp()
681 ic->i_recv_cq = NULL; in rds_ib_setup_qp()
683 ib_destroy_cq(ic->i_send_cq); in rds_ib_setup_qp()
684 ic->i_send_cq = NULL; in rds_ib_setup_qp()
695 const union rds_ib_conn_priv *dp = event->param.conn.private_data; in rds_ib_protocol_compatible()
702 * rdma_cm private data is odd - when there is any private data in the in rds_ib_protocol_compatible()
707 * from an older version. This could be 3.0 or 2.0 - we can't tell. in rds_ib_protocol_compatible()
708 * We really should have changed this for OFED 1.3 :-( in rds_ib_protocol_compatible()
712 if (!event->param.conn.private_data_len) { in rds_ib_protocol_compatible()
713 printk(KERN_NOTICE "RDS incoming connection has no private data, " in rds_ib_protocol_compatible()
720 major = dp->ricp_v6.dp_protocol_major; in rds_ib_protocol_compatible()
721 minor = dp->ricp_v6.dp_protocol_minor; in rds_ib_protocol_compatible()
722 mask = dp->ricp_v6.dp_protocol_minor_mask; in rds_ib_protocol_compatible()
725 major = dp->ricp_v4.dp_protocol_major; in rds_ib_protocol_compatible()
726 minor = dp->ricp_v4.dp_protocol_minor; in rds_ib_protocol_compatible()
727 mask = dp->ricp_v4.dp_protocol_minor_mask; in rds_ib_protocol_compatible()
730 /* Even if len is crap *now* I still want to check it. -ASG */ in rds_ib_protocol_compatible()
731 if (event->param.conn.private_data_len < data_len || major == 0) in rds_ib_protocol_compatible()
745 &dp->ricp_v6.dp_saddr, major, minor); in rds_ib_protocol_compatible()
748 &dp->ricp_v4.dp_saddr, major, minor); in rds_ib_protocol_compatible()
772 idx = dev->ifindex; in __rds_find_ifindex()
785 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; in rds_ib_cm_handle_connect()
786 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; in rds_ib_cm_handle_connect()
809 dp = event->param.conn.private_data; in rds_ib_cm_handle_connect()
812 dp_cmn = &dp->ricp_v6.dp_cmn; in rds_ib_cm_handle_connect()
813 saddr6 = &dp->ricp_v6.dp_saddr; in rds_ib_cm_handle_connect()
814 daddr6 = &dp->ricp_v6.dp_daddr; in rds_ib_cm_handle_connect()
822 /* No index found... Need to bail out. */ in rds_ib_cm_handle_connect()
824 err = -EOPNOTSUPP; in rds_ib_cm_handle_connect()
830 /* No index found... Need to bail out. */ in rds_ib_cm_handle_connect()
832 err = -EOPNOTSUPP; in rds_ib_cm_handle_connect()
837 err = -EOPNOTSUPP; in rds_ib_cm_handle_connect()
841 dp_cmn = &dp->ricp_v4.dp_cmn; in rds_ib_cm_handle_connect()
842 ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr); in rds_ib_cm_handle_connect()
843 ipv6_addr_set_v4mapped(dp->ricp_v4.dp_daddr, &d_mapped_addr); in rds_ib_cm_handle_connect()
852 (unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss); in rds_ib_cm_handle_connect()
856 &rds_ib_transport, dp_cmn->ricpc_dp_toss, in rds_ib_cm_handle_connect()
868 * by both hosts, we have a random backoff mechanism - in rds_ib_cm_handle_connect()
871 mutex_lock(&conn->c_cm_lock); in rds_ib_cm_handle_connect()
879 /* Wait and see - our connect may still be succeeding */ in rds_ib_cm_handle_connect()
885 ic = conn->c_transport_data; in rds_ib_cm_handle_connect()
888 rds_ib_set_flow_control(conn, be32_to_cpu(dp_cmn->ricpc_credit)); in rds_ib_cm_handle_connect()
892 if (dp_cmn->ricpc_ack_seq) in rds_ib_cm_handle_connect()
893 rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq), in rds_ib_cm_handle_connect()
896 BUG_ON(cm_id->context); in rds_ib_cm_handle_connect()
897 BUG_ON(ic->i_cm_id); in rds_ib_cm_handle_connect()
899 ic->i_cm_id = cm_id; in rds_ib_cm_handle_connect()
900 cm_id->context = conn; in rds_ib_cm_handle_connect()
913 event->param.conn.responder_resources, in rds_ib_cm_handle_connect()
914 event->param.conn.initiator_depth, isv6); in rds_ib_cm_handle_connect()
923 mutex_unlock(&conn->c_cm_lock); in rds_ib_cm_handle_connect()
933 struct rds_connection *conn = cm_id->context; in rds_ib_cm_initiate_connect()
934 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cm_initiate_connect()
942 ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */ in rds_ib_cm_initiate_connect()
951 conn->c_proposed_version, in rds_ib_cm_initiate_connect()
959 /* Beware - returning non-zero tells the rdma_cm to destroy in rds_ib_cm_initiate_connect()
963 if (ic->i_cm_id == cm_id) in rds_ib_cm_initiate_connect()
966 ic->i_active_side = true; in rds_ib_cm_initiate_connect()
972 struct rds_connection *conn = cp->cp_conn; in rds_ib_conn_path_connect()
978 ic = conn->c_transport_data; in rds_ib_conn_path_connect()
983 if (conn->c_isv6) in rds_ib_conn_path_connect()
988 ic->i_cm_id = rdma_create_id(&init_net, handler, conn, in rds_ib_conn_path_connect()
990 if (IS_ERR(ic->i_cm_id)) { in rds_ib_conn_path_connect()
991 ret = PTR_ERR(ic->i_cm_id); in rds_ib_conn_path_connect()
992 ic->i_cm_id = NULL; in rds_ib_conn_path_connect()
997 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn); in rds_ib_conn_path_connect()
999 if (ipv6_addr_v4mapped(&conn->c_faddr)) { in rds_ib_conn_path_connect()
1003 sin->sin_family = AF_INET; in rds_ib_conn_path_connect()
1004 sin->sin_addr.s_addr = conn->c_laddr.s6_addr32[3]; in rds_ib_conn_path_connect()
1005 sin->sin_port = 0; in rds_ib_conn_path_connect()
1008 sin->sin_family = AF_INET; in rds_ib_conn_path_connect()
1009 sin->sin_addr.s_addr = conn->c_faddr.s6_addr32[3]; in rds_ib_conn_path_connect()
1010 sin->sin_port = htons(RDS_PORT); in rds_ib_conn_path_connect()
1015 sin6->sin6_family = AF_INET6; in rds_ib_conn_path_connect()
1016 sin6->sin6_addr = conn->c_laddr; in rds_ib_conn_path_connect()
1017 sin6->sin6_port = 0; in rds_ib_conn_path_connect()
1018 sin6->sin6_scope_id = conn->c_dev_if; in rds_ib_conn_path_connect()
1021 sin6->sin6_family = AF_INET6; in rds_ib_conn_path_connect()
1022 sin6->sin6_addr = conn->c_faddr; in rds_ib_conn_path_connect()
1023 sin6->sin6_port = htons(RDS_CM_PORT); in rds_ib_conn_path_connect()
1024 sin6->sin6_scope_id = conn->c_dev_if; in rds_ib_conn_path_connect()
1027 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src, in rds_ib_conn_path_connect()
1031 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id, in rds_ib_conn_path_connect()
1033 rdma_destroy_id(ic->i_cm_id); in rds_ib_conn_path_connect()
1034 ic->i_cm_id = NULL; in rds_ib_conn_path_connect()
1048 struct rds_connection *conn = cp->cp_conn; in rds_ib_conn_path_shutdown()
1049 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_conn_path_shutdown()
1052 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, in rds_ib_conn_path_shutdown()
1053 ic->i_pd, ic->i_send_cq, ic->i_recv_cq, in rds_ib_conn_path_shutdown()
1054 ic->i_cm_id ? ic->i_cm_id->qp : NULL); in rds_ib_conn_path_shutdown()
1056 if (ic->i_cm_id) { in rds_ib_conn_path_shutdown()
1057 rdsdebug("disconnecting cm %p\n", ic->i_cm_id); in rds_ib_conn_path_shutdown()
1058 err = rdma_disconnect(ic->i_cm_id); in rds_ib_conn_path_shutdown()
1064 ic->i_cm_id, err); in rds_ib_conn_path_shutdown()
1078 * sends to complete we're ensured that there will be no in rds_ib_conn_path_shutdown()
1082 rds_ib_ring_empty(&ic->i_recv_ring) && in rds_ib_conn_path_shutdown()
1083 (atomic_read(&ic->i_signaled_sends) == 0) && in rds_ib_conn_path_shutdown()
1084 (atomic_read(&ic->i_fastreg_inuse_count) == 0) && in rds_ib_conn_path_shutdown()
1085 (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR)); in rds_ib_conn_path_shutdown()
1086 tasklet_kill(&ic->i_send_tasklet); in rds_ib_conn_path_shutdown()
1087 tasklet_kill(&ic->i_recv_tasklet); in rds_ib_conn_path_shutdown()
1089 atomic_set(&ic->i_cq_quiesce, 1); in rds_ib_conn_path_shutdown()
1092 if (ic->i_cm_id->qp) in rds_ib_conn_path_shutdown()
1093 rdma_destroy_qp(ic->i_cm_id); in rds_ib_conn_path_shutdown()
1094 if (ic->i_send_cq) { in rds_ib_conn_path_shutdown()
1095 if (ic->rds_ibdev) in rds_ib_conn_path_shutdown()
1096 ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector); in rds_ib_conn_path_shutdown()
1097 ib_destroy_cq(ic->i_send_cq); in rds_ib_conn_path_shutdown()
1100 if (ic->i_recv_cq) { in rds_ib_conn_path_shutdown()
1101 if (ic->rds_ibdev) in rds_ib_conn_path_shutdown()
1102 ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector); in rds_ib_conn_path_shutdown()
1103 ib_destroy_cq(ic->i_recv_cq); in rds_ib_conn_path_shutdown()
1106 if (ic->rds_ibdev) { in rds_ib_conn_path_shutdown()
1108 if (ic->i_send_hdrs) { in rds_ib_conn_path_shutdown()
1109 rds_dma_hdrs_free(ic->rds_ibdev, in rds_ib_conn_path_shutdown()
1110 ic->i_send_hdrs, in rds_ib_conn_path_shutdown()
1111 ic->i_send_hdrs_dma, in rds_ib_conn_path_shutdown()
1112 ic->i_send_ring.w_nr, in rds_ib_conn_path_shutdown()
1114 ic->i_send_hdrs = NULL; in rds_ib_conn_path_shutdown()
1115 ic->i_send_hdrs_dma = NULL; in rds_ib_conn_path_shutdown()
1118 if (ic->i_recv_hdrs) { in rds_ib_conn_path_shutdown()
1119 rds_dma_hdrs_free(ic->rds_ibdev, in rds_ib_conn_path_shutdown()
1120 ic->i_recv_hdrs, in rds_ib_conn_path_shutdown()
1121 ic->i_recv_hdrs_dma, in rds_ib_conn_path_shutdown()
1122 ic->i_recv_ring.w_nr, in rds_ib_conn_path_shutdown()
1124 ic->i_recv_hdrs = NULL; in rds_ib_conn_path_shutdown()
1125 ic->i_recv_hdrs_dma = NULL; in rds_ib_conn_path_shutdown()
1128 if (ic->i_ack) { in rds_ib_conn_path_shutdown()
1129 rds_dma_hdr_free(ic->rds_ibdev->dev, ic->i_ack, in rds_ib_conn_path_shutdown()
1130 ic->i_ack_dma, DMA_TO_DEVICE); in rds_ib_conn_path_shutdown()
1131 ic->i_ack = NULL; in rds_ib_conn_path_shutdown()
1134 WARN_ON(ic->i_send_hdrs); in rds_ib_conn_path_shutdown()
1135 WARN_ON(ic->i_send_hdrs_dma); in rds_ib_conn_path_shutdown()
1136 WARN_ON(ic->i_recv_hdrs); in rds_ib_conn_path_shutdown()
1137 WARN_ON(ic->i_recv_hdrs_dma); in rds_ib_conn_path_shutdown()
1138 WARN_ON(ic->i_ack); in rds_ib_conn_path_shutdown()
1141 if (ic->i_sends) in rds_ib_conn_path_shutdown()
1143 if (ic->i_recvs) in rds_ib_conn_path_shutdown()
1146 rdma_destroy_id(ic->i_cm_id); in rds_ib_conn_path_shutdown()
1151 if (ic->rds_ibdev) in rds_ib_conn_path_shutdown()
1152 rds_ib_remove_conn(ic->rds_ibdev, conn); in rds_ib_conn_path_shutdown()
1154 ic->i_cm_id = NULL; in rds_ib_conn_path_shutdown()
1155 ic->i_pd = NULL; in rds_ib_conn_path_shutdown()
1156 ic->i_send_cq = NULL; in rds_ib_conn_path_shutdown()
1157 ic->i_recv_cq = NULL; in rds_ib_conn_path_shutdown()
1159 BUG_ON(ic->rds_ibdev); in rds_ib_conn_path_shutdown()
1162 if (ic->i_data_op) { in rds_ib_conn_path_shutdown()
1165 rm = container_of(ic->i_data_op, struct rds_message, data); in rds_ib_conn_path_shutdown()
1167 ic->i_data_op = NULL; in rds_ib_conn_path_shutdown()
1171 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); in rds_ib_conn_path_shutdown()
1173 atomic64_set(&ic->i_ack_next, 0); in rds_ib_conn_path_shutdown()
1175 ic->i_ack_next = 0; in rds_ib_conn_path_shutdown()
1177 ic->i_ack_recv = 0; in rds_ib_conn_path_shutdown()
1180 ic->i_flowctl = 0; in rds_ib_conn_path_shutdown()
1181 atomic_set(&ic->i_credits, 0); in rds_ib_conn_path_shutdown()
1183 /* Re-init rings, but retain sizes. */ in rds_ib_conn_path_shutdown()
1184 rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr); in rds_ib_conn_path_shutdown()
1185 rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr); in rds_ib_conn_path_shutdown()
1187 if (ic->i_ibinc) { in rds_ib_conn_path_shutdown()
1188 rds_inc_put(&ic->i_ibinc->ii_inc); in rds_ib_conn_path_shutdown()
1189 ic->i_ibinc = NULL; in rds_ib_conn_path_shutdown()
1192 vfree(ic->i_sends); in rds_ib_conn_path_shutdown()
1193 ic->i_sends = NULL; in rds_ib_conn_path_shutdown()
1194 vfree(ic->i_recvs); in rds_ib_conn_path_shutdown()
1195 ic->i_recvs = NULL; in rds_ib_conn_path_shutdown()
1196 ic->i_active_side = false; in rds_ib_conn_path_shutdown()
1208 return -ENOMEM; in rds_ib_conn_alloc()
1216 INIT_LIST_HEAD(&ic->ib_node); in rds_ib_conn_alloc()
1217 tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send, in rds_ib_conn_alloc()
1219 tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv, in rds_ib_conn_alloc()
1221 mutex_init(&ic->i_recv_mutex); in rds_ib_conn_alloc()
1223 spin_lock_init(&ic->i_ack_lock); in rds_ib_conn_alloc()
1225 atomic_set(&ic->i_signaled_sends, 0); in rds_ib_conn_alloc()
1226 atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR); in rds_ib_conn_alloc()
1232 rds_ib_ring_init(&ic->i_send_ring, 0); in rds_ib_conn_alloc()
1233 rds_ib_ring_init(&ic->i_recv_ring, 0); in rds_ib_conn_alloc()
1235 ic->conn = conn; in rds_ib_conn_alloc()
1236 conn->c_transport_data = ic; in rds_ib_conn_alloc()
1239 list_add_tail(&ic->ib_node, &ib_nodev_conns); in rds_ib_conn_alloc()
1243 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data); in rds_ib_conn_alloc()
1262 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; in rds_ib_conn_free()
1265 list_del(&ic->ib_node); in rds_ib_conn_free()