| /linux/drivers/infiniband/core/ |
| H A D | iwcm.c | 97 struct iwcm_id_private *cm_id; member 156 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work() 180 work->cm_id = cm_id_priv; in alloc_work_entries() 224 static void add_ref(struct iw_cm_id *cm_id) in add_ref() argument 227 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in add_ref() 231 static void rem_ref(struct iw_cm_id *cm_id) in rem_ref() argument 235 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in rem_ref() 240 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); 307 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) in iw_cm_disconnect() argument 314 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in iw_cm_disconnect() [all …]
|
| H A D | cm_trace.h | 139 const struct ib_cm_id *cm_id 142 TP_ARGS(cm_id), 145 __field(const void *, cm_id) /* for eBPF scripts */ 153 __entry->cm_id = cm_id; 154 __entry->local_id = be32_to_cpu(cm_id->local_id); 155 __entry->remote_id = be32_to_cpu(cm_id->remote_id); 156 __entry->state = cm_id->state; 157 __entry->lap_state = cm_id->lap_state; 171 const struct ib_cm_id *cm_id \ 173 TP_ARGS(cm_id)) [all …]
|
| H A D | ucma.c | 94 struct rdma_cm_id *cm_id; member 170 if (!ctx->cm_id->device) { in ucma_get_ctx_dev() 188 rdma_destroy_id(ctx->cm_id); in ucma_close_id() 191 ctx->cm_id = NULL; in ucma_close_id() 218 struct rdma_cm_id *cm_id) in ucma_set_ctx_cm_id() argument 221 ctx->cm_id = cm_id; in ucma_set_ctx_cm_id() 289 if (ctx->cm_id->qp_type == IB_QPT_UD) in ucma_create_uevent() 290 ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud, in ucma_create_uevent() 302 static int ucma_connect_event_handler(struct rdma_cm_id *cm_id, in ucma_connect_event_handler() argument 305 struct ucma_context *listen_ctx = cm_id->context; in ucma_connect_event_handler() [all …]
|
| H A D | cma_trace.h | 29 __field(u32, cm_id) 36 __entry->cm_id = id_priv->res.id; 45 __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos 75 __field(u32, cm_id) 82 __entry->cm_id = id_priv->res.id; 91 __entry->cm_id, __entry->srcaddr, __entry->dstaddr, 104 __field(u32, cm_id) 112 __entry->cm_id = id_priv->res.id; 122 __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos, 182 __field(u32, cm_id) [all …]
|
| H A D | cm.c | 1045 static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id, in cm_destroy_id_wait_timeout() argument 1050 cm_id_priv = container_of(cm_id, struct cm_id_private, id); in cm_destroy_id_wait_timeout() 1052 cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount)); in cm_destroy_id_wait_timeout() 1055 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) in cm_destroy_id() argument 1063 cm_id_priv = container_of(cm_id, struct cm_id_private, id); in cm_destroy_id() 1065 old_state = cm_id->state; in cm_destroy_id() 1067 switch (cm_id->state) { in cm_destroy_id() 1078 cm_id->state = IB_CM_IDLE; in cm_destroy_id() 1084 cm_id->state = IB_CM_IDLE; in cm_destroy_id() 1092 cm_id->state = IB_CM_IDLE; in cm_destroy_id() [all …]
|
| H A D | cma.c | 134 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id. 143 return id_priv->cm_id.iw; in rdma_iw_cm_id() 768 * @id_priv: cm_id which should be bound to cma device 772 * It is applicable to active and passive side cm_id. 1294 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr() 1297 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr() 1303 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr() 1307 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr() 1808 const struct ib_cm_id *cm_id, in cma_find_listener() argument 1822 if (id_priv->id.device == cm_id in cma_find_listener() 1840 cma_ib_id_from_event(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event,struct cma_req_info * req,struct net_device ** net_dev) cma_ib_id_from_event() argument 2179 cma_ib_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event) cma_ib_handler() argument 2401 cma_ib_req_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event) cma_ib_req_handler() argument 2489 rdma_read_gids(struct rdma_cm_id * cm_id,union ib_gid * sgid,union ib_gid * dgid) rdma_read_gids() argument 2580 iw_conn_req_handler(struct iw_cm_id * cm_id,struct iw_cm_event * iw_event) iw_conn_req_handler() argument 4216 cma_sidr_rep_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event) cma_sidr_rep_handler() argument 4420 struct iw_cm_id *cm_id; cma_connect_iw() local [all...] |
| /linux/net/rds/ |
| H A D | rdma_transport.c | 49 static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, in rds_rdma_cm_event_handler_cmn() argument 54 struct rds_connection *conn = cm_id->context; in rds_rdma_cm_event_handler_cmn() 60 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, in rds_rdma_cm_event_handler_cmn() 63 if (cm_id->device->node_type == RDMA_NODE_IB_CA) in rds_rdma_cm_event_handler_cmn() 85 ret = trans->cm_handle_connect(cm_id, event, isv6); in rds_rdma_cm_event_handler_cmn() 90 rdma_set_service_type(cm_id, conn->c_tos); in rds_rdma_cm_event_handler_cmn() 91 rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32); in rds_rdma_cm_event_handler_cmn() 93 ret = rdma_resolve_route(cm_id, in rds_rdma_cm_event_handler_cmn() 106 if (ibic && ibic->i_cm_id == cm_id) { in rds_rdma_cm_event_handler_cmn() 107 cm_id->route.path_rec[0].sl = in rds_rdma_cm_event_handler_cmn() [all …]
|
| H A D | ib.c | 410 struct rdma_cm_id *cm_id; in rds_ib_laddr_check() local 422 cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, in rds_ib_laddr_check() 424 if (IS_ERR(cm_id)) in rds_ib_laddr_check() 425 return PTR_ERR(cm_id); in rds_ib_laddr_check() 475 ret = rdma_bind_addr(cm_id, sa); in rds_ib_laddr_check() 478 if (ret || !cm_id->device || in rds_ib_laddr_check() 479 cm_id->device->node_type != RDMA_NODE_IB_CA) in rds_ib_laddr_check() 484 cm_id->device ? cm_id->device->node_type : -1); in rds_ib_laddr_check() 487 rdma_destroy_id(cm_id); in rds_ib_laddr_check()
|
| H A D | rdma_transport.h | 20 int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, 22 int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
|
| H A D | ib_cm.c | 782 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, in rds_ib_cm_handle_connect() argument 785 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; in rds_ib_cm_handle_connect() 786 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; in rds_ib_cm_handle_connect() 896 BUG_ON(cm_id->context); in rds_ib_cm_handle_connect() 899 ic->i_cm_id = cm_id; in rds_ib_cm_handle_connect() 900 cm_id->context = conn; in rds_ib_cm_handle_connect() 916 rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32); in rds_ib_cm_handle_connect() 918 if (rdma_accept(cm_id, &conn_param)) in rds_ib_cm_handle_connect() 925 rdma_reject(cm_id, &err, sizeof(int), in rds_ib_cm_handle_connect() 931 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6) in rds_ib_cm_initiate_connect() argument [all …]
|
| /linux/drivers/infiniband/hw/qedr/ |
| H A D | qedr_iw_cm.c | 97 if (ep->cm_id) in qedr_iw_free_ep() 98 ep->cm_id->rem_ref(ep->cm_id); in qedr_iw_free_ep() 135 listener->cm_id->event_handler(listener->cm_id, &event); in qedr_iw_mpa_request() 166 if (ep->cm_id) in qedr_iw_issue_event() 167 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_issue_event() 175 if (ep->cm_id) in qedr_iw_close_event() 239 if (ep->cm_id) in qedr_iw_disconnect_worker() 240 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_disconnect_worker() 532 int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) in qedr_iw_connect() argument 534 struct qedr_dev *dev = get_qedr_dev(cm_id->device); in qedr_iw_connect() [all …]
|
| H A D | qedr_iw_cm.h | 34 int qedr_iw_connect(struct iw_cm_id *cm_id, 37 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog); 39 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id); 41 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 43 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
|
| /linux/include/rdma/ |
| H A D | iw_cm.h | 42 typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id, 53 typedef int (*iw_event_handler)(struct iw_cm_id *cm_id, 115 void iw_destroy_cm_id(struct iw_cm_id *cm_id); 128 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog); 144 int iw_cm_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param); 158 int iw_cm_reject(struct iw_cm_id *cm_id, const void *private_data, 173 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param); 185 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt); 196 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr,
|
| H A D | ib_cm.h | 289 typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id, 325 void ib_destroy_cm_id(struct ib_cm_id *cm_id); 343 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id); 380 int ib_send_cm_req(struct ib_cm_id *cm_id, 405 int ib_send_cm_rep(struct ib_cm_id *cm_id, 416 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 429 int ib_send_cm_dreq(struct ib_cm_id *cm_id, 444 int ib_send_cm_drep(struct ib_cm_id *cm_id, 462 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event); 476 int ib_send_cm_rej(struct ib_cm_id *cm_id, [all …]
|
| /linux/drivers/nvme/target/ |
| H A D | rdma.c | 90 struct rdma_cm_id *cm_id; member 123 struct rdma_cm_id *cm_id; member 629 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init() local 634 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_init() 635 cm_id->port_num, req->sg, req->sg_cnt, in nvmet_rdma_rw_ctx_init() 639 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_init() 648 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy() local 652 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_destroy() 653 cm_id->port_num, req->sg, req->sg_cnt, in nvmet_rdma_rw_ctx_destroy() 657 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_destroy() [all …]
|
| /linux/net/9p/ |
| H A D | trans_rdma.c | 72 struct rdma_cm_id *cm_id; member 168 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler() 189 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in recv_done() 236 ib_dma_unmap_single(rdma->cm_id->device, in send_done() 264 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans() 265 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans() 278 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv() 281 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv() 297 ib_dma_unmap_single(rdma->cm_id->device, c->busa, in post_recv() 373 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request() [all …]
|
| /linux/drivers/infiniband/hw/irdma/ |
| H A D | cm.c | 137 struct iw_cm_id *cm_id, in irdma_get_cmevent_info() argument 140 memcpy(&event->local_addr, &cm_id->m_local_addr, in irdma_get_cmevent_info() 142 memcpy(&event->remote_addr, &cm_id->m_remote_addr, in irdma_get_cmevent_info() 160 struct iw_cm_id *cm_id, in irdma_send_cm_event() argument 167 trace_irdma_send_cm_event(cm_node, cm_id, type, status, in irdma_send_cm_event() 172 cm_node, cm_id, cm_node->accelerated, cm_node->state, type, in irdma_send_cm_event() 187 irdma_get_cmevent_info(cm_node, cm_id, &event); in irdma_send_cm_event() 203 return cm_id->event_handler(cm_id, &event); in irdma_send_cm_event() 234 if (!cm_node->cm_id) in irdma_create_event() 250 event->cm_info.cm_id = cm_node->cm_id; in irdma_create_event() [all …]
|
| H A D | cm.h | 245 struct iw_cm_id *cm_id; member 296 struct iw_cm_id *cm_id; member 330 struct iw_cm_id *cm_id; member 394 int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 395 int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); 396 int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 397 int irdma_create_listen(struct iw_cm_id *cm_id, int backlog); 398 int irdma_destroy_listen(struct iw_cm_id *cm_id);
|
| H A D | trace_cm.h | 201 TP_PROTO(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id, 203 TP_ARGS(cm_node, cm_id, type, status, caller), 206 __field(struct iw_cm_id *, cm_id) 222 __entry->cm_id = cm_id; 241 __entry->cm_id, 257 TP_PROTO(struct iw_cm_id *cm_id, enum iw_cm_event_type type, 259 TP_ARGS(cm_id, type, status, caller), 260 TP_STRUCT__entry(__field(struct iw_cm_id *, cm_id) 265 TP_fast_assign(__entry->cm_id = cm_id; 271 __entry->cm_id,
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | cm.c | 152 epc->cm_id->rem_ref(epc->cm_id); in deref_cm_id() 153 epc->cm_id = NULL; in deref_cm_id() 160 epc->cm_id->add_ref(epc->cm_id); in ref_cm_id() 1277 if (ep->com.cm_id) { in close_complete_upcall() 1279 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall() 1280 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall() 1293 if (ep->com.cm_id) { in peer_close_upcall() 1295 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall() 1296 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall() 1309 if (ep->com.cm_id) { in peer_abort_upcall() [all …]
|
| /linux/drivers/infiniband/ulp/ipoib/ |
| H A D | ipoib_cm.c | 80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 274 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument 282 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 293 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 314 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 347 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_nonsrq_init_rx() argument 375 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); in ipoib_cm_nonsrq_init_rx() 420 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_send_rep() argument 439 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep() 442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, in ipoib_cm_req_handler() argument [all …]
|
| /linux/drivers/infiniband/ulp/rtrs/ |
| H A D | rtrs-srv.c | 791 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno); 1558 rdma_disconnect(con->c.cm_id); in rtrs_srv_close_work() 1584 rdma_destroy_id(con->c.cm_id); in rtrs_srv_close_work() 1600 struct rdma_cm_id *cm_id) in rtrs_rdma_do_accept() argument 1624 err = rdma_accept(cm_id, ¶m); in rtrs_rdma_do_accept() 1631 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno) in rtrs_rdma_do_reject() argument 1642 err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED); in rtrs_rdma_do_reject() 1664 struct rdma_cm_id *cm_id, in create_con() argument 1682 con->c.cm_id = cm_id; in create_con() 1735 cm_id->context = &con->c; in create_con() [all …]
|
| /linux/drivers/infiniband/ulp/isert/ |
| H A D | ib_isert.c | 92 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback() 437 isert_conn->cm_id = cma_id; in isert_connect_request() 494 if (isert_conn->cm_id && in isert_connect_release() 496 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release() 554 struct isert_np *isert_np = isert_conn->cm_id->context; in isert_handle_unbound_conn() 592 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate() 607 isert_np->cm_id = NULL; in isert_np_cma_handler() 610 isert_np->cm_id = isert_setup_id(isert_np); in isert_np_cma_handler() 611 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler() 613 isert_np, PTR_ERR(isert_np->cm_id)); in isert_np_cma_handler() [all …]
|
| /linux/drivers/infiniband/ulp/srpt/ |
| H A D | ib_srpt.h | 297 struct ib_cm_id *cm_id; member 300 struct rdma_cm_id *cm_id; member 458 struct ib_cm_id *cm_id; member
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_cm.c | 367 id = cep->listen_cep->cm_id; in siw_cm_upcall() 369 id = cep->cm_id; in siw_cm_upcall() 412 if (!cep->cm_id) in siw_free_cm_id() 415 cep->cm_id->rem_ref(cep->cm_id); in siw_free_cm_id() 416 cep->cm_id = NULL; in siw_free_cm_id() 459 if (cep->cm_id) { in siw_qp_cm_drop() 1125 if (cep->cm_id) in siw_cm_work_handler() 1132 if (cep->cm_id) { in siw_cm_work_handler() 1184 if (cep->cm_id) in siw_cm_work_handler() 1235 if (cep->cm_id) { in siw_cm_work_handler() [all …]
|